diff --git a/Cargo.lock b/Cargo.lock index e6ca24864b2f6..4711e7a62f927 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5738,6 +5738,84 @@ dependencies = [ "kitchensink-runtime", ] +[[package]] +name = "node-sassafras" +version = "0.3.4-dev" +dependencies = [ + "clap 4.3.2", + "frame-benchmarking", + "frame-benchmarking-cli", + "frame-system", + "futures", + "jsonrpsee", + "node-sassafras-runtime", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc", + "sc-basic-authorship", + "sc-cli", + "sc-client-api", + "sc-consensus", + "sc-consensus-grandpa", + "sc-consensus-sassafras", + "sc-executor", + "sc-keystore", + "sc-network", + "sc-offchain", + "sc-rpc", + "sc-rpc-api", + "sc-service", + "sc-telemetry", + "sc-transaction-pool", + "sc-transaction-pool-api", + "sp-api", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-grandpa", + "sp-consensus-sassafras", + "sp-core", + "sp-inherents", + "sp-keyring", + "sp-runtime", + "sp-timestamp", + "substrate-build-script-utils", + "substrate-frame-rpc-system", +] + +[[package]] +name = "node-sassafras-runtime" +version = "0.3.4-dev" +dependencies = [ + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "pallet-balances", + "pallet-grandpa", + "pallet-sassafras", + "pallet-session", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-block-builder", + "sp-consensus-sassafras", + "sp-core", + "sp-inherents", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std", + "sp-transaction-pool", + "sp-version", + "substrate-wasm-builder", +] + [[package]] name = "node-template" version = "4.0.0-dev" @@ -7429,6 +7507,27 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-sassafras" +version = "0.3.4-dev" +dependencies = [ + "array-bytes", + "env_logger 0.10.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "pallet-session", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-consensus-sassafras", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-scheduler" version = "4.0.0-dev" @@ -9702,6 +9801,45 @@ dependencies = [ "thiserror", ] +[[package]] +name = "sc-consensus-sassafras" +version = "0.3.4-dev" +dependencies = [ + "async-trait", + "env_logger 0.10.0", + "fork-tree", + "futures", + "log", + "parity-scale-codec", + "parking_lot 0.12.1", + "sc-block-builder", + "sc-client-api", + "sc-consensus", + "sc-consensus-epochs", + "sc-consensus-slots", + "sc-keystore", + "sc-network-test", + "sc-telemetry", + "sc-transaction-pool-api", + "sp-api", + "sp-application-crypto", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-sassafras", + "sp-consensus-slots", + "sp-core", + "sp-inherents", + "sp-keyring", + "sp-keystore", + "sp-runtime", + "sp-timestamp", + "substrate-prometheus-endpoint", + "substrate-test-runtime-client", + "thiserror", + "tokio", +] + [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" @@ -11336,6 +11474,21 @@ dependencies = [ "sp-std", ] +[[package]] +name = "sp-consensus-sassafras" +version = "0.3.4-dev" +dependencies = [ + "parity-scale-codec", + "scale-info", + "serde", + "sp-api", + "sp-application-crypto", + "sp-consensus-slots", + "sp-core", + "sp-runtime", + "sp-std", +] + [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" @@ -12299,6 +12452,7 @@ dependencies = [ "log", "pallet-babe", "pallet-balances", + "pallet-sassafras", "pallet-timestamp", "parity-scale-codec", "sc-block-builder", @@ -12315,6 +12469,7 @@ dependencies = [ "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-grandpa", + "sp-consensus-sassafras", "sp-core", "sp-externalities", "sp-genesis-builder", diff --git a/Cargo.toml b/Cargo.toml index 9ee8142e23e76..da15cf4380ca1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,6 +55,8 @@ members = [ "bin/node/rpc", "bin/node/runtime", "bin/node/testing", + "bin/node-sassafras/node", + "bin/node-sassafras/runtime", "bin/utils/chain-spec-builder", "bin/utils/subkey", "client/api", @@ -75,6 +77,7 @@ members = [ "client/consensus/grandpa/rpc", "client/consensus/manual-seal", "client/consensus/pow", + "client/consensus/sassafras", "client/consensus/slots", "client/db", "client/executor", @@ -188,6 +191,7 @@ members = [ "frame/referenda", "frame/remark", "frame/salary", + "frame/sassafras", "frame/scheduler", "frame/scored-pool", "frame/session", @@ -243,6 +247,7 @@ members = [ "primitives/consensus/common", "primitives/consensus/grandpa", "primitives/consensus/pow", + "primitives/consensus/sassafras", "primitives/consensus/slots", "primitives/core", "primitives/core/hashing", diff --git a/bin/node-sassafras/.editorconfig b/bin/node-sassafras/.editorconfig new file mode 100644 index 0000000000000..5adac74ca24b3 --- /dev/null +++ b/bin/node-sassafras/.editorconfig @@ -0,0 +1,16 @@ +root = true + +[*] +indent_style=space +indent_size=2 +tab_width=2 +end_of_line=lf +charset=utf-8 +trim_trailing_whitespace=true +insert_final_newline = true + +[*.{rs,toml}] +indent_style=tab +indent_size=tab +tab_width=4 +max_line_length=100 diff --git a/bin/node-sassafras/node/Cargo.toml b/bin/node-sassafras/node/Cargo.toml new file mode 100644 index 0000000000000..f476d589f274e --- /dev/null +++ b/bin/node-sassafras/node/Cargo.toml @@ -0,0 +1,74 @@ +[package] +name = "node-sassafras" +version = "0.3.4-dev" +authors = ["Parity Technologies ", "Davide Galassi "] +description = "Node testbed for Sassafras consensus." +homepage = "https://substrate.io/" +edition = "2021" +license = "Unlicense" +publish = false +build = "build.rs" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[[bin]] +name = "node-sassafras" + +[dependencies] +clap = { version = "4.0.9", features = ["derive"] } +futures = { version = "0.3.21", features = ["thread-pool"]} + +sc-cli = { version = "0.10.0-dev", path = "../../../client/cli" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } +sc-executor = { version = "0.10.0-dev", path = "../../../client/executor" } +sc-network = { version = "0.10.0-dev", path = "../../../client/network" } +sc-service = { version = "0.10.0-dev", path = "../../../client/service" } +sc-telemetry = { version = "4.0.0-dev", path = "../../../client/telemetry" } +sc-keystore = { version = "4.0.0-dev", path = "../../../client/keystore" } +sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } +sc-offchain = { version = "4.0.0-dev", path = "../../../client/offchain" } +sc-consensus-sassafras = { version = "0.3.4-dev", path = "../../../client/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.3.4-dev", path = "../../../primitives/consensus/sassafras" } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } +sc-consensus-grandpa = { version = "0.10.0-dev", path = "../../../client/consensus/grandpa" } +sp-consensus-grandpa = { version = "4.0.0-dev", path = "../../../primitives/consensus/grandpa" } +sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } +sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } +sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } +sp-keyring = { version = "24.0.0", path = "../../../primitives/keyring" } +frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } +pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment" } + +# These dependencies are used for the node template's RPCs +jsonrpsee = { version = "0.16.2", features = ["server"] } +sc-rpc = { version = "4.0.0-dev", path = "../../../client/rpc" } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } +sc-rpc-api = { version = "0.10.0-dev", path = "../../../client/rpc-api" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } +sc-basic-authorship = { version = "0.10.0-dev", path = "../../../client/basic-authorship" } +substrate-frame-rpc-system = { version = "4.0.0-dev", path = "../../../utils/frame/rpc/system" } +pallet-transaction-payment-rpc = { version = "4.0.0-dev", path = "../../../frame/transaction-payment/rpc/" } + +# These dependencies are used for runtime benchmarking +frame-benchmarking = { version = "4.0.0-dev", path = "../../../frame/benchmarking" } +frame-benchmarking-cli = { version = "4.0.0-dev", path = "../../../utils/frame/benchmarking-cli" } + +# Local Dependencies +node-sassafras-runtime = { version = "0.3.4-dev", path = "../runtime" } + +[build-dependencies] +substrate-build-script-utils = { version = "3.0.0", path = "../../../utils/build-script-utils" } + +[features] +default = [] +runtime-benchmarks = [ + "node-sassafras-runtime/runtime-benchmarks" +] +use-session-pallet = [ + "node-sassafras-runtime/use-session-pallet" +] diff --git a/bin/node-sassafras/node/build.rs b/bin/node-sassafras/node/build.rs new file mode 100644 index 0000000000000..e3bfe3116bf28 --- /dev/null +++ b/bin/node-sassafras/node/build.rs @@ -0,0 +1,7 @@ +use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; + +fn main() { + generate_cargo_keys(); + + rerun_if_git_head_changed(); +} diff --git a/bin/node-sassafras/node/src/chain_spec.rs b/bin/node-sassafras/node/src/chain_spec.rs new file mode 100644 index 0000000000000..bbdcb9de414e8 --- /dev/null +++ b/bin/node-sassafras/node/src/chain_spec.rs @@ -0,0 +1,168 @@ +use node_sassafras_runtime::{ + AccountId, BalancesConfig, GrandpaConfig, RuntimeGenesisConfig, SassafrasConfig, Signature, + SudoConfig, SystemConfig, WASM_BINARY, +}; +#[cfg(feature = "use-session-pallet")] +use node_sassafras_runtime::{SessionConfig, SessionKeys}; +use sc_service::ChainType; +use sp_consensus_grandpa::AuthorityId as GrandpaId; +use sp_consensus_sassafras::{ + AuthorityId as SassafrasId, EpochConfiguration as SassafrasEpochConfig, +}; +use sp_core::{sr25519, Pair, Public}; +use sp_runtime::traits::{IdentifyAccount, Verify}; + +// Genesis constants for Sassafras parameters configuration. +const SASSAFRAS_TICKETS_MAX_ATTEMPTS_NUMBER: u32 = 8; +const SASSAFRAS_TICKETS_REDUNDANCY_FACTOR: u32 = 1; + +/// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. +pub type ChainSpec = sc_service::GenericChainSpec; + +/// Generate a crypto pair from seed. +pub fn get_from_seed(seed: &str) -> ::Public { + TPublic::Pair::from_string(&format!("//{}", seed), None) + .expect("static values are valid; qed") + .public() +} + +type AccountPublic = ::Signer; + +/// Generate an account id from seed. +pub fn get_account_id_from_seed(seed: &str) -> AccountId +where + AccountPublic: From<::Public>, +{ + AccountPublic::from(get_from_seed::(seed)).into_account() +} + +/// Generate authority account id and keys from seed. +pub fn authority_keys_from_seed(seed: &str) -> (AccountId, SassafrasId, GrandpaId) { + ( + get_account_id_from_seed::(seed), + get_from_seed::(seed), + get_from_seed::(seed), + ) +} + +pub fn development_config() -> Result { + let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?; + + Ok(ChainSpec::from_genesis( + "Development", + "dev", + ChainType::Development, + move || { + testnet_genesis( + wasm_binary, + vec![authority_keys_from_seed("Alice")], + get_account_id_from_seed::("Alice"), + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + ) + }, + vec![], + None, + None, + None, + None, + None, + )) +} + +pub fn local_testnet_config() -> Result { + let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?; + + Ok(ChainSpec::from_genesis( + "Local Testnet", + "local_testnet", + ChainType::Local, + move || { + testnet_genesis( + wasm_binary, + vec![authority_keys_from_seed("Alice"), authority_keys_from_seed("Bob")], + get_account_id_from_seed::("Alice"), + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + ) + }, + vec![], + None, + None, + None, + None, + None, + )) +} + +/// Configure initial storage state for FRAME modules. +fn testnet_genesis( + wasm_binary: &[u8], + initial_authorities: Vec<(AccountId, SassafrasId, GrandpaId)>, + root_key: AccountId, + endowed_accounts: Vec, +) -> RuntimeGenesisConfig { + RuntimeGenesisConfig { + system: SystemConfig { + // Add Wasm runtime to storage. + code: wasm_binary.to_vec(), + ..Default::default() + }, + balances: BalancesConfig { + // Configure endowed accounts with initial balance of 1 << 60. + balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(), + }, + sassafras: SassafrasConfig { + #[cfg(feature = "use-session-pallet")] + authorities: Vec::new(), + #[cfg(not(feature = "use-session-pallet"))] + authorities: initial_authorities.iter().map(|x| x.1.clone()).collect(), + epoch_config: SassafrasEpochConfig { + attempts_number: SASSAFRAS_TICKETS_MAX_ATTEMPTS_NUMBER, + redundancy_factor: SASSAFRAS_TICKETS_REDUNDANCY_FACTOR, + }, + ..Default::default() + }, + grandpa: GrandpaConfig { + #[cfg(feature = "use-session-pallet")] + authorities: vec![], + #[cfg(not(feature = "use-session-pallet"))] + authorities: initial_authorities.iter().map(|x| (x.2.clone(), 1)).collect(), + ..Default::default() + }, + sudo: SudoConfig { + // Assign network admin rights. + key: Some(root_key), + }, + transaction_payment: Default::default(), + #[cfg(feature = "use-session-pallet")] + session: SessionConfig { + keys: initial_authorities + .iter() + .map(|x| { + ( + x.0.clone(), + x.0.clone(), + SessionKeys { sassafras: x.1.clone(), grandpa: x.2.clone() }, + ) + }) + .collect::>(), + }, + } +} diff --git a/bin/node-sassafras/node/src/cli.rs b/bin/node-sassafras/node/src/cli.rs new file mode 100644 index 0000000000000..5bc6c9b102aaf --- /dev/null +++ b/bin/node-sassafras/node/src/cli.rs @@ -0,0 +1,45 @@ +use sc_cli::RunCmd; + +#[derive(Debug, clap::Parser)] +pub struct Cli { + #[clap(subcommand)] + pub subcommand: Option, + + #[clap(flatten)] + pub run: RunCmd, +} + +#[derive(Debug, clap::Subcommand)] +pub enum Subcommand { + /// Key management cli utilities + #[clap(subcommand)] + Key(sc_cli::KeySubcommand), + + /// Build a chain specification. + BuildSpec(sc_cli::BuildSpecCmd), + + /// Validate blocks. + CheckBlock(sc_cli::CheckBlockCmd), + + /// Export blocks. + ExportBlocks(sc_cli::ExportBlocksCmd), + + /// Export the state of a given block into a chain spec. + ExportState(sc_cli::ExportStateCmd), + + /// Import blocks. + ImportBlocks(sc_cli::ImportBlocksCmd), + + /// Remove the whole chain. + PurgeChain(sc_cli::PurgeChainCmd), + + /// Revert the chain to a previous state. + Revert(sc_cli::RevertCmd), + + /// Sub-commands concerned with benchmarking. + #[clap(subcommand)] + Benchmark(frame_benchmarking_cli::BenchmarkCmd), + + /// Db meta columns information. + ChainInfo(sc_cli::ChainInfoCmd), +} diff --git a/bin/node-sassafras/node/src/command.rs b/bin/node-sassafras/node/src/command.rs new file mode 100644 index 0000000000000..187b266f9c4d4 --- /dev/null +++ b/bin/node-sassafras/node/src/command.rs @@ -0,0 +1,139 @@ +use crate::{ + chain_spec, + cli::{Cli, Subcommand}, + service, +}; +use frame_benchmarking_cli::BenchmarkCmd; +use node_sassafras_runtime::Block; +use sc_cli::SubstrateCli; +use sc_service::PartialComponents; + +impl SubstrateCli for Cli { + fn impl_name() -> String { + "Sassafras Node".into() + } + + fn impl_version() -> String { + env!("SUBSTRATE_CLI_IMPL_VERSION").into() + } + + fn description() -> String { + env!("CARGO_PKG_DESCRIPTION").into() + } + + fn author() -> String { + env!("CARGO_PKG_AUTHORS").into() + } + + fn support_url() -> String { + "support.anonymous.an".into() + } + + fn copyright_start_year() -> i32 { + 2023 + } + + fn load_spec(&self, id: &str) -> Result, String> { + Ok(match id { + "dev" => Box::new(chain_spec::development_config()?), + "" | "local" => Box::new(chain_spec::local_testnet_config()?), + path => + Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), + }) + } +} + +/// Parse and run command line arguments +pub fn run() -> sc_cli::Result<()> { + let cli = Cli::from_args(); + + match &cli.subcommand { + Some(Subcommand::Key(cmd)) => cmd.run(&cli), + Some(Subcommand::BuildSpec(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) + }, + Some(Subcommand::CheckBlock(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, import_queue, .. } = + service::new_partial(&config)?; + Ok((cmd.run(client, import_queue), task_manager)) + }) + }, + Some(Subcommand::ExportBlocks(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?; + Ok((cmd.run(client, config.database), task_manager)) + }) + }, + Some(Subcommand::ExportState(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?; + Ok((cmd.run(client, config.chain_spec), task_manager)) + }) + }, + Some(Subcommand::ImportBlocks(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, import_queue, .. } = + service::new_partial(&config)?; + Ok((cmd.run(client, import_queue), task_manager)) + }) + }, + Some(Subcommand::PurgeChain(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run(config.database)) + }, + Some(Subcommand::Revert(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { client, task_manager, backend, .. } = + service::new_partial(&config)?; + let aux_revert = Box::new(|client, backend, blocks| { + sc_consensus_sassafras::revert(backend, blocks)?; + sc_consensus_grandpa::revert(client, blocks)?; + Ok(()) + }); + Ok((cmd.run(client, backend, Some(aux_revert)), task_manager)) + }) + }, + Some(Subcommand::Benchmark(cmd)) => { + let runner = cli.create_runner(cmd)?; + + runner.sync_run(|config| { + // This switch needs to be in the client, since the client decides + // which sub-commands it wants to support. + match cmd { + BenchmarkCmd::Pallet(cmd) => { + if !cfg!(feature = "runtime-benchmarks") { + return Err( + "Runtime benchmarking wasn't enabled when building the node. \ + You can enable it with `--features runtime-benchmarks`." + .into(), + ) + } + + cmd.run::(config) + }, + _ => { + eprintln!("Not implemented..."); + Ok(()) + }, + } + }) + }, + Some(Subcommand::ChainInfo(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run::(&config)) + }, + None => { + let runner = cli.create_runner(&cli.run)?; + runner.run_node_until_exit(|config| async move { + service::new_full(config).map_err(sc_cli::Error::Service) + }) + }, + } +} diff --git a/bin/node-sassafras/node/src/main.rs b/bin/node-sassafras/node/src/main.rs new file mode 100644 index 0000000000000..4449d28b9fa41 --- /dev/null +++ b/bin/node-sassafras/node/src/main.rs @@ -0,0 +1,13 @@ +//! Substrate Node Template CLI library. +#![warn(missing_docs)] + +mod chain_spec; +#[macro_use] +mod service; +mod cli; +mod command; +mod rpc; + +fn main() -> sc_cli::Result<()> { + command::run() +} diff --git a/bin/node-sassafras/node/src/rpc.rs b/bin/node-sassafras/node/src/rpc.rs new file mode 100644 index 0000000000000..72c7b3d69ba12 --- /dev/null +++ b/bin/node-sassafras/node/src/rpc.rs @@ -0,0 +1,57 @@ +//! A collection of node-specific RPC methods. +//! Substrate provides the `sc-rpc` crate, which defines the core RPC layer +//! used by Substrate nodes. This file extends those RPC definitions with +//! capabilities that are specific to this project's runtime configuration. + +#![warn(missing_docs)] + +use std::sync::Arc; + +use jsonrpsee::RpcModule; +use node_sassafras_runtime::{opaque::Block, AccountId, Balance, Nonce}; +use sc_transaction_pool_api::TransactionPool; +use sp_api::ProvideRuntimeApi; +use sp_block_builder::BlockBuilder; +use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; + +pub use sc_rpc_api::DenyUnsafe; + +/// Full client dependencies. +pub struct FullDeps { + /// The client instance to use. + pub client: Arc, + /// Transaction pool instance. + pub pool: Arc

, + /// Whether to deny unsafe calls + pub deny_unsafe: DenyUnsafe, +} + +/// Instantiate all full RPC extensions. +pub fn create_full( + deps: FullDeps, +) -> Result, Box> +where + C: ProvideRuntimeApi, + C: HeaderBackend + HeaderMetadata + 'static, + C: Send + Sync + 'static, + C::Api: substrate_frame_rpc_system::AccountNonceApi, + C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, + C::Api: BlockBuilder, + P: TransactionPool + 'static, +{ + use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; + use substrate_frame_rpc_system::{System, SystemApiServer}; + + let mut module = RpcModule::new(()); + let FullDeps { client, pool, deny_unsafe } = deps; + + module.merge(System::new(client.clone(), pool.clone(), deny_unsafe).into_rpc())?; + module.merge(TransactionPayment::new(client).into_rpc())?; + + // Extend this RPC with a custom API by using the following syntax. + // `YourRpcStruct` should have a reference to a client, which is needed + // to call into the runtime. + // `module.merge(YourRpcTrait::into_rpc(YourRpcStruct::new(ReferenceToClient, ...)))?;` + + Ok(module) +} diff --git a/bin/node-sassafras/node/src/service.rs b/bin/node-sassafras/node/src/service.rs new file mode 100644 index 0000000000000..0dfc72edb9fcf --- /dev/null +++ b/bin/node-sassafras/node/src/service.rs @@ -0,0 +1,331 @@ +//! Service and ServiceFactory implementation. Specialized wrapper over substrate service. + +use futures::FutureExt; +use node_sassafras_runtime::{self, opaque::Block, RuntimeApi}; +use sc_client_api::{Backend, BlockBackend}; +use sc_consensus_grandpa::SharedVoterState; +pub use sc_executor::NativeElseWasmExecutor; +use sc_service::{error::Error as ServiceError, Configuration, TaskManager, WarpSyncParams}; +use sc_telemetry::{Telemetry, TelemetryWorker}; +use sc_transaction_pool_api::OffchainTransactionPoolFactory; +use std::{sync::Arc, time::Duration}; + +// Our native executor instance. +pub struct ExecutorDispatch; + +impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { + /// Only enable the benchmarking host functions when we actually want to benchmark. + #[cfg(feature = "runtime-benchmarks")] + type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; + /// Otherwise we only use the default Substrate host functions. + #[cfg(not(feature = "runtime-benchmarks"))] + type ExtendHostFunctions = (); + + fn dispatch(method: &str, data: &[u8]) -> Option> { + node_sassafras_runtime::api::dispatch(method, data) + } + + fn native_version() -> sc_executor::NativeVersion { + node_sassafras_runtime::native_version() + } +} + +pub type FullClient = + sc_service::TFullClient>; +type FullBackend = sc_service::TFullBackend; +type FullSelectChain = sc_consensus::LongestChain; + +type FullGrandpaBlockImport = + sc_consensus_grandpa::GrandpaBlockImport; + +/// The minimum period of blocks on which justifications will be +/// imported and generated. +const GRANDPA_JUSTIFICATION_PERIOD: u32 = 512; + +pub fn new_partial( + config: &Configuration, +) -> Result< + sc_service::PartialComponents< + FullClient, + FullBackend, + FullSelectChain, + sc_consensus::DefaultImportQueue, + sc_transaction_pool::FullPool, + ( + sc_consensus_sassafras::SassafrasBlockImport, + sc_consensus_sassafras::SassafrasLink, + sc_consensus_grandpa::LinkHalf, + Option, + ), + >, + ServiceError, +> { + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let executor = sc_service::new_native_or_wasm_executor(&config); + + let (client, backend, keystore_container, task_manager) = + sc_service::new_full_parts::( + config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + )?; + let client = Arc::new(client); + + let telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", None, worker.run()); + telemetry + }); + + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + config.transaction_pool.clone(), + config.role.is_authority().into(), + config.prometheus_registry(), + task_manager.spawn_essential_handle(), + client.clone(), + ); + + let (grandpa_block_import, grandpa_link) = sc_consensus_grandpa::block_import( + client.clone(), + GRANDPA_JUSTIFICATION_PERIOD, + &client, + select_chain.clone(), + telemetry.as_ref().map(|x| x.handle()), + )?; + + let justification_import = grandpa_block_import.clone(); + + let (sassafras_block_import, sassafras_link) = sc_consensus_sassafras::block_import( + sc_consensus_sassafras::finalized_configuration(&*client)?, + grandpa_block_import, + client.clone(), + )?; + + let slot_duration = sassafras_link.genesis_config().slot_duration; + + let import_queue = sc_consensus_sassafras::import_queue( + sassafras_link.clone(), + sassafras_block_import.clone(), + Some(Box::new(justification_import)), + client.clone(), + select_chain.clone(), + move |_, ()| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + let slot = sc_consensus_sassafras::InherentDataProvider::from_timestamp( + *timestamp, + slot_duration, + ); + Ok((slot, timestamp)) + }, + &task_manager.spawn_essential_handle(), + config.prometheus_registry(), + telemetry.as_ref().map(|x| x.handle()), + )?; + + Ok(sc_service::PartialComponents { + client, + backend, + task_manager, + import_queue, + keystore_container, + select_chain, + transaction_pool, + other: (sassafras_block_import, sassafras_link, grandpa_link, telemetry), + }) +} + +/// Builds a new service for a full client. +pub fn new_full(config: Configuration) -> Result { + let sc_service::PartialComponents { + client, + backend, + mut task_manager, + import_queue, + keystore_container, + select_chain, + transaction_pool, + other: (block_import, sassafras_link, grandpa_link, mut telemetry), + } = new_partial(&config)?; + + let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network); + + let grandpa_protocol_name = sc_consensus_grandpa::protocol_standard_name( + &client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"), + &config.chain_spec, + ); + + net_config.add_notification_protocol(sc_consensus_grandpa::grandpa_peers_set_config( + grandpa_protocol_name.clone(), + )); + + let warp_sync = Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new( + backend.clone(), + grandpa_link.shared_authority_set().clone(), + Vec::default(), + )); + + let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + net_config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + block_announce_validator_builder: None, + warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)), + })?; + + if config.offchain_worker.enabled { + task_manager.spawn_handle().spawn( + "offchain-workers-runner", + "offchain-worker", + sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { + runtime_api_provider: client.clone(), + is_validator: config.role.is_authority(), + keystore: Some(keystore_container.keystore()), + offchain_db: backend.offchain_storage(), + transaction_pool: Some(OffchainTransactionPoolFactory::new( + transaction_pool.clone(), + )), + network_provider: network.clone(), + enable_http_requests: true, + custom_extensions: |_| vec![], + }) + .run(client.clone(), task_manager.spawn_handle()) + .boxed(), + ); + } + + let role = config.role.clone(); + let force_authoring = config.force_authoring; + let name = config.network.node_name.clone(); + let enable_grandpa = !config.disable_grandpa; + let prometheus_registry = config.prometheus_registry().cloned(); + + let rpc_extensions_builder = { + let client = client.clone(); + let pool = transaction_pool.clone(); + + Box::new(move |deny_unsafe, _| { + let deps = + crate::rpc::FullDeps { client: client.clone(), pool: pool.clone(), deny_unsafe }; + crate::rpc::create_full(deps).map_err(Into::into) + }) + }; + + sc_service::spawn_tasks(sc_service::SpawnTasksParams { + network: network.clone(), + client: client.clone(), + keystore: keystore_container.keystore(), + task_manager: &mut task_manager, + transaction_pool: transaction_pool.clone(), + rpc_builder: rpc_extensions_builder, + backend, + system_rpc_tx, + tx_handler_controller, + sync_service: sync_service.clone(), + config, + telemetry: telemetry.as_mut(), + })?; + + if role.is_authority() { + let proposer = sc_basic_authorship::ProposerFactory::new( + task_manager.spawn_handle(), + client.clone(), + transaction_pool.clone(), + prometheus_registry.as_ref(), + telemetry.as_ref().map(|x| x.handle()), + ); + + let slot_duration = sassafras_link.genesis_config().slot_duration; + + let sassafras_params = sc_consensus_sassafras::SassafrasWorkerParams { + client: client.clone(), + keystore: keystore_container.keystore(), + select_chain, + env: proposer, + block_import, + sassafras_link, + sync_oracle: sync_service.clone(), + justification_sync_link: sync_service.clone(), + force_authoring, + create_inherent_data_providers: move |_, _| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + let slot = sc_consensus_sassafras::InherentDataProvider::from_timestamp( + *timestamp, + slot_duration, + ); + Ok((slot, timestamp)) + }, + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()), + }; + + let sassafras = sc_consensus_sassafras::start_sassafras(sassafras_params)?; + + // the Sassafras authoring task is considered essential, i.e. if it + // fails we take down the service with it. + task_manager.spawn_essential_handle().spawn_blocking( + "sassafras", + Some("block-authoring"), + sassafras, + ); + } + + if enable_grandpa { + // if the node isn't actively participating in consensus then it doesn't + // need a keystore, regardless of which protocol we use below. + let keystore = role.is_authority().then(|| keystore_container.keystore()); + + let grandpa_config = sc_consensus_grandpa::Config { + gossip_duration: Duration::from_millis(333), + justification_generation_period: GRANDPA_JUSTIFICATION_PERIOD, + name: Some(name), + observer_enabled: false, + keystore, + local_role: role, + telemetry: telemetry.as_ref().map(|x| x.handle()), + protocol_name: grandpa_protocol_name, + }; + + // start the full GRANDPA voter + // NOTE: non-authorities could run the GRANDPA observer protocol, but at + // this point the full voter should provide better guarantees of block + // and vote data availability than the observer. The observer has not + // been tested extensively yet and having most nodes in a network run it + // could lead to finality stalls. + let grandpa_params = sc_consensus_grandpa::GrandpaParams { + config: grandpa_config, + link: grandpa_link, + network, + sync: Arc::new(sync_service), + voting_rule: sc_consensus_grandpa::VotingRulesBuilder::default().build(), + prometheus_registry, + shared_voter_state: SharedVoterState::empty(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool), + }; + + // the GRANDPA voter task is considered infallible, i.e. + // if it fails we take down the service with it. + task_manager.spawn_essential_handle().spawn_blocking( + "grandpa-voter", + None, + sc_consensus_grandpa::run_grandpa_voter(grandpa_params)?, + ); + } + + network_starter.start_network(); + Ok(task_manager) +} diff --git a/bin/node-sassafras/runtime/Cargo.toml b/bin/node-sassafras/runtime/Cargo.toml new file mode 100644 index 0000000000000..6ec2b0aa45574 --- /dev/null +++ b/bin/node-sassafras/runtime/Cargo.toml @@ -0,0 +1,91 @@ +[package] +name = "node-sassafras-runtime" +version = "0.3.4-dev" +authors = ["Parity Technologies ","Davide Galassi "] +description = "Runtime testbed for Sassafras consensus." +homepage = "https://substrate.io/" +edition = "2021" +license = "Unlicense" +publish = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } + +pallet-sassafras = { version = "0.3.4-dev", default-features = false, path = "../../../frame/sassafras" } +pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../../frame/balances" } +pallet-session = { version = "4.0.0-dev", default-features = false, path = "../../../frame/session" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../../../frame/support" } +pallet-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../../frame/grandpa" } +pallet-sudo = { version = "4.0.0-dev", default-features = false, path = "../../../frame/sudo" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system" } +pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../../frame/timestamp" } +pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment" } +frame-executive = { version = "4.0.0-dev", default-features = false, path = "../../../frame/executive" } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } +sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/block-builder"} +sp-consensus-sassafras = { version = "0.3.4-dev", default-features = false, path = "../../../primitives/consensus/sassafras" } +sp-core = { version = "21.0.0", default-features = false, path = "../../../primitives/core" } +sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/inherents"} +sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/offchain" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-session = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/session" } +sp-std = { version = "8.0.0", default-features = false, path = "../../../primitives/std" } +sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/transaction-pool" } +sp-version = { version = "22.0.0", default-features = false, path = "../../../primitives/version" } + +# Used for the node template's RPCs +frame-system-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } +pallet-transaction-payment-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } + +# Used for runtime benchmarking +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/benchmarking", optional = true } +frame-system-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system/benchmarking", optional = true } + +[build-dependencies] +substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-builder" } + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + "frame-executive/std", + "frame-support/std", + "frame-system-rpc-runtime-api/std", + "frame-system/std", + "pallet-sassafras/std", + "pallet-balances/std", + "pallet-grandpa/std", + "pallet-sudo/std", + "pallet-session/std", + "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", + "sp-api/std", + "sp-block-builder/std", + "sp-consensus-sassafras/std", + "sp-core/std", + "sp-inherents/std", + "sp-offchain/std", + "sp-runtime/std", + "sp-session/std", + "sp-std/std", + "sp-transaction-pool/std", + "sp-version/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-grandpa/runtime-benchmarks", + "pallet-sassafras/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +use-session-pallet = [] diff --git a/bin/node-sassafras/runtime/build.rs b/bin/node-sassafras/runtime/build.rs new file mode 100644 index 0000000000000..9b53d2457dffd --- /dev/null +++ b/bin/node-sassafras/runtime/build.rs @@ -0,0 +1,9 @@ +use substrate_wasm_builder::WasmBuilder; + +fn main() { + WasmBuilder::new() + .with_current_project() + .export_heap_base() + .import_memory() + .build() +} diff --git a/bin/node-sassafras/runtime/src/lib.rs b/bin/node-sassafras/runtime/src/lib.rs new file mode 100644 index 0000000000000..f80bf71d2bb96 --- /dev/null +++ b/bin/node-sassafras/runtime/src/lib.rs @@ -0,0 +1,560 @@ +#![cfg_attr(not(feature = "std"), no_std)] +// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. +#![recursion_limit = "256"] + +// Make the WASM binary available. +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +use sp_api::impl_runtime_apis; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +#[cfg(feature = "use-session-pallet")] +use sp_runtime::traits::OpaqueKeys; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, + traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, Verify}, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, MultiSignature, Perbill, +}; +use sp_std::prelude::*; +#[cfg(feature = "std")] +use sp_version::NativeVersion; +use sp_version::RuntimeVersion; + +use pallet_grandpa::{ + fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, +}; +use pallet_transaction_payment::CurrencyAdapter; + +use frame_support::{ + construct_runtime, parameter_types, + traits::{ConstU128, ConstU32, ConstU64, ConstU8}, + weights::{ + constants::{RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND}, + IdentityFee, Weight, + }, +}; + +/// An index to a block. +pub type BlockNumber = u32; + +/// Alias to 512-bit hash when used in the context of a transaction signature on the chain. +pub type Signature = MultiSignature; + +/// A hash of some data used by the chain. +pub type Hash = sp_core::H256; + +/// Block header type as expected by this runtime. +pub type Header = generic::Header; + +/// The SignedExtension to the basic transaction logic. +pub type SignedExtra = ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, +); + +/// Unchecked extrinsic type as expected by this runtime. +pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic; + +/// Block type as expected by this runtime. +pub type Block = generic::Block; + +/// Some way of identifying an account on the chain. We intentionally make it equivalent +/// to the public key of our transaction signing scheme. +pub type AccountId = <::Signer as IdentifyAccount>::AccountId; + +/// The address format for describing accounts. +pub type Address = sp_runtime::MultiAddress; + +/// Balance of an account. +pub type Balance = u128; + +/// Index of a transaction in the chain. +pub type Nonce = u32; + +/// The payload being signed in transactions. +pub type SignedPayload = generic::SignedPayload; + +/// Executive: handles dispatch to the various modules. +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllPalletsWithSystem, +>; + +/// Opaque types. These are used by the CLI to instantiate machinery that don't need to know +/// the specifics of the runtime. They can then be made to be agnostic over specific formats +/// of data like extrinsics, allowing for them to continue syncing the network through upgrades +/// to even the core data structures. +pub mod opaque { + use super::*; + + pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; + /// Opaque block header type. + pub type Header = generic::Header; + /// Opaque block type. + pub type Block = generic::Block; + /// Opaque block identifier type. + pub type BlockId = generic::BlockId; +} + +impl_opaque_keys! { + pub struct SessionKeys { + pub sassafras: Sassafras, + pub grandpa: Grandpa, + } +} + +#[sp_version::runtime_version] +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("node-sassafras"), + impl_name: create_runtime_str!("node-sassafras"), + authoring_version: 1, + spec_version: 100, + impl_version: 1, + apis: RUNTIME_API_VERSIONS, + transaction_version: 1, + state_version: 1, +}; + +/// Sassafras slot duration in milliseconds +pub const SLOT_DURATION_IN_MILLISECONDS: u64 = 3000; + +/// Sassafras epoch duration in slots. +pub const EPOCH_DURATION_IN_SLOTS: u64 = 10; + +/// Max authorities for both Sassafras and Grandpa. +pub const MAX_AUTHORITIES: u32 = 32; + +/// The version information used to identify this runtime when compiled natively. +#[cfg(feature = "std")] +pub fn native_version() -> NativeVersion { + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } +} + +// Required to send unsigned transactoins from Sassafras pallet +// TODO-SASS-P2 double check (isn't grandpa requiring the same thing??? +impl frame_system::offchain::SendTransactionTypes for Runtime +where + RuntimeCall: From, +{ + type Extrinsic = UncheckedExtrinsic; + type OverarchingCall = RuntimeCall; +} + +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); + +parameter_types! { + pub const BlockHashCount: BlockNumber = 2400; + pub const Version: RuntimeVersion = VERSION; + /// We allow for 2 seconds of compute with a 3 second average block time. + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::with_sensible_defaults( + Weight::from_parts(2u64 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX), + NORMAL_DISPATCH_RATIO, + ); + pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength + ::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub const SS58Prefix: u8 = 42; +} + +// Configure FRAME pallets to include in runtime. + +impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type Block = Block; + type BlockWeights = BlockWeights; + type BlockLength = BlockLength; + type AccountId = AccountId; + type RuntimeCall = RuntimeCall; + type Lookup = AccountIdLookup; + type Nonce = Nonce; + type Hash = Hash; + type Hashing = BlakeTwo256; + type RuntimeEvent = RuntimeEvent; + type RuntimeOrigin = RuntimeOrigin; + type BlockHashCount = BlockHashCount; + type DbWeight = RocksDbWeight; + type Version = Version; + type PalletInfo = PalletInfo; + type OnNewAccount = (); + type OnKilledAccount = (); + type AccountData = pallet_balances::AccountData; + type SystemWeightInfo = (); + type SS58Prefix = SS58Prefix; + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_sassafras::Config for Runtime { + type SlotDuration = ConstU64; + type EpochDuration = ConstU64; + type MaxAuthorities = ConstU32; + type MaxTickets = ConstU32<{ EPOCH_DURATION_IN_SLOTS as u32 }>; + #[cfg(feature = "use-session-pallet")] + type EpochChangeTrigger = pallet_sassafras::ExternalTrigger; + #[cfg(not(feature = "use-session-pallet"))] + type EpochChangeTrigger = pallet_sassafras::SameAuthoritiesForever; +} + +impl pallet_grandpa::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + type MaxAuthorities = ConstU32; + type MaxNominators = ConstU32<0>; + type MaxSetIdSessionEntries = ConstU64<0>; + type KeyOwnerProof = sp_core::Void; + type EquivocationReportSystem = (); +} + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = ConstU64<{ SLOT_DURATION_IN_MILLISECONDS / 2 }>; + type WeightInfo = (); +} + +impl pallet_balances::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type MaxLocks = ConstU32<50>; + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = Balance; + type DustRemoval = (); + type ExistentialDeposit = ConstU128<500>; + type AccountStore = System; + type WeightInfo = pallet_balances::weights::SubstrateWeight; + type FreezeIdentifier = (); + type MaxFreezes = (); + type RuntimeHoldReason = (); + type MaxHolds = (); +} + +impl pallet_transaction_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnChargeTransaction = CurrencyAdapter; + type OperationalFeeMultiplier = ConstU8<5>; + type WeightToFee = IdentityFee; + type LengthToFee = IdentityFee; + type FeeMultiplierUpdate = (); +} + +impl pallet_sudo::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type WeightInfo = pallet_sudo::weights::SubstrateWeight; +} + +#[cfg(feature = "use-session-pallet")] +impl pallet_session::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ValidatorId = ::AccountId; + type ValidatorIdOf = (); //pallet_staking::StashOf; + type ShouldEndSession = Sassafras; + type NextSessionRotation = Sassafras; + type SessionManager = (); //pallet_session::historical::NoteHistoricalRoot; + type SessionHandler = ::KeyTypeIdProviders; + type Keys = SessionKeys; + type WeightInfo = pallet_session::weights::SubstrateWeight; +} + +// Create a runtime using session pallet +#[cfg(feature = "use-session-pallet")] +construct_runtime!( + pub enum Runtime + { + System: frame_system, + Timestamp: pallet_timestamp, + Sassafras: pallet_sassafras, + Grandpa: pallet_grandpa, + Balances: pallet_balances, + TransactionPayment: pallet_transaction_payment, + Sudo: pallet_sudo, + Session: pallet_session, + } +); + +// Create a runtime NOT using session pallet +#[cfg(not(feature = "use-session-pallet"))] +construct_runtime!( + pub enum Runtime + { + System: frame_system, + Timestamp: pallet_timestamp, + Sassafras: pallet_sassafras, + Grandpa: pallet_grandpa, + Balances: pallet_balances, + TransactionPayment: pallet_transaction_payment, + Sudo: pallet_sudo, + } +); + +#[cfg(feature = "runtime-benchmarks")] +#[macro_use] +extern crate frame_benchmarking; + +#[cfg(feature = "runtime-benchmarks")] +mod benches { + define_benchmarks!( + [frame_benchmarking, BaselineBench::] + [frame_system, SystemBench::] + [pallet_balances, Balances] + [pallet_timestamp, Timestamp] + [pallet_grandpa, Grandpa] + [pallet_sassafras, Sassafras] + ); +} + +impl_runtime_apis! { + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block); + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> sp_std::vec::Vec { + Runtime::metadata_versions() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + block_hash: ::Hash, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx, block_hash) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl sp_consensus_sassafras::SassafrasApi for Runtime { + fn ring_context() -> Option { + Sassafras::ring_context() + } + + fn submit_tickets_unsigned_extrinsic( + tickets: Vec + ) -> bool { + Sassafras::submit_tickets_unsigned_extrinsic(tickets) + } + + fn slot_ticket_id(slot: sp_consensus_sassafras::Slot) -> Option { + Sassafras::slot_ticket_id(slot) + } + + fn slot_ticket(slot: sp_consensus_sassafras::Slot) -> Option<(sp_consensus_sassafras::TicketId, sp_consensus_sassafras::TicketBody)> { + Sassafras::slot_ticket(slot) + } + + fn current_epoch() -> sp_consensus_sassafras::Epoch { + Sassafras::current_epoch() + } + + fn next_epoch() -> sp_consensus_sassafras::Epoch { + Sassafras::next_epoch() + } + + fn generate_key_ownership_proof( + _slot: sp_consensus_sassafras::Slot, + _authority_id: sp_consensus_sassafras::AuthorityId, + ) -> Option { + None + } + + fn submit_report_equivocation_unsigned_extrinsic( + equivocation_proof: sp_consensus_sassafras::EquivocationProof<::Header>, + _key_owner_proof: sp_consensus_sassafras::OpaqueKeyOwnershipProof, + ) -> bool { + //let key_owner_proof = key_owner_proof.decode()?; + Sassafras::submit_unsigned_equivocation_report(equivocation_proof) + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + + fn decode_session_keys(encoded: Vec) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl fg_primitives::GrandpaApi for Runtime { + fn grandpa_authorities() -> GrandpaAuthorityList { + Grandpa::grandpa_authorities() + } + + fn current_set_id() -> fg_primitives::SetId { + Grandpa::current_set_id() + } + + fn submit_report_equivocation_unsigned_extrinsic( + _equivocation_proof: fg_primitives::EquivocationProof< + ::Hash, + NumberFor, + >, + _key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof, + ) -> Option<()> { + None + } + + fn generate_key_ownership_proof( + _set_id: fg_primitives::SetId, + _authority_id: GrandpaId, + ) -> Option { + // NOTE: this is the only implementation possible since we've + // defined our key owner proof type as a bottom type (i.e. a type + // with no values). + None + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { + System::account_nonce(account) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { + fn query_info( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + + fn query_fee_details( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi for Runtime { + fn query_call_info( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::RuntimeDispatchInfo { + TransactionPayment::query_call_info(call, len) + } + + fn query_call_fee_details( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_call_fee_details(call, len) + } + + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::{baseline, Benchmarking, BenchmarkList}; + use frame_support::traits::StorageInfoTrait; + use frame_system_benchmarking::Pallet as SystemBench; + use baseline::Pallet as BaselineBench; + + let mut list = Vec::::new(); + list_benchmarks!(list, extra); + + let storage_info = AllPalletsWithSystem::storage_info(); + + (list, storage_info) + } + + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig + ) -> Result, sp_runtime::RuntimeString> { + use frame_benchmarking::{baseline, Benchmarking, BenchmarkBatch, TrackedStorageKey}; + + use frame_system_benchmarking::Pallet as SystemBench; + use baseline::Pallet as BaselineBench; + + impl frame_system_benchmarking::Config for Runtime {} + impl baseline::Config for Runtime {} + + use frame_support::traits::WhitelistedStorageKeys; + let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); + + let mut batches = Vec::::new(); + let params = (&config, &whitelist); + add_benchmarks!(params, batches); + + Ok(batches) + } + } +} diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index b3843f8acfa0a..58d0b1c73216a 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -33,7 +33,6 @@ use sp_consensus_babe::{ inherents::InherentDataProvider, make_vrf_sign_data, AllowedSlots, AuthorityId, AuthorityPair, Slot, }; -use sp_consensus_slots::SlotDuration; use sp_core::crypto::Pair; use sp_keyring::Sr25519Keyring; use sp_keystore::{testing::MemoryKeystore, Keystore}; @@ -66,8 +65,6 @@ type Mutator = Arc; type BabeBlockImport = PanickingBlockImport>>; -const SLOT_DURATION_MS: u64 = 1000; - #[derive(Clone)] struct DummyFactory { client: Arc, @@ -250,14 +247,15 @@ impl TestNetFactory for BabeTestNet { let (_, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); + let slot_duration = data.link.config.slot_duration(); TestVerifier { inner: BabeVerifier { client: client.clone(), select_chain: longest_chain, - create_inherent_data_providers: Box::new(|_, _| async { + create_inherent_data_providers: Box::new(move |_, _| async move { let slot = InherentDataProvider::from_timestamp_and_slot_duration( Timestamp::current(), - SlotDuration::from_millis(SLOT_DURATION_MS), + slot_duration, ); Ok((slot,)) }), @@ -1009,7 +1007,7 @@ async fn obsolete_blocks_aux_data_cleanup() { let data = peer.data.as_ref().expect("babe link set up during initialization"); let client = peer.client().as_client(); - // Register the handler (as done by `babe_start`) + // Register the handler (as done by Babe's `block_import` method) let client_clone = client.clone(); let on_finality = move |summary: &FinalityNotification| { aux_storage_cleanup(client_clone.as_ref(), summary) diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml new file mode 100644 index 0000000000000..2ecd08ccd2a4b --- /dev/null +++ b/client/consensus/sassafras/Cargo.toml @@ -0,0 +1,52 @@ +[package] +name = "sc-consensus-sassafras" +version = "0.3.4-dev" +authors = ["Parity Technologies "] +description = "Sassafras consensus algorithm for substrate" +edition = "2021" +license = "Apache 2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +documentation = "https://docs.rs/sc-consensus-sassafras" +readme = "README.md" +publish = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +async-trait = "0.1.50" +scale-codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +futures = "0.3.21" +log = "0.4.16" +parking_lot = "0.12.0" +thiserror = "1.0" +fork-tree = { version = "3.0.0", path = "../../../utils/fork-tree" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } +sc-client-api = { version = "4.0.0-dev", path = "../../api" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } +sc-consensus-epochs = { version = "0.10.0-dev", path = "../epochs" } +sc-consensus-slots = { version = "0.10.0-dev", path = "../slots" } +sc-telemetry = { version = "4.0.0-dev", path = "../../telemetry" } +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../transaction-pool/api" } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } +sp-application-crypto = { version = "23.0.0", path = "../../../primitives/application-crypto" } +sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sp-consensus-sassafras = { version = "0.3.4-dev", path = "../../../primitives/consensus/sassafras" } +sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } +sp-core = { version = "21.0.0", path = "../../../primitives/core" } +sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } +sp-keystore = { version = "0.27.0", path = "../../../primitives/keystore" } +sp-runtime = { version = "24.0.0", path = "../../../primitives/runtime" } +sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } +env_logger = "0.10.0" + +[dev-dependencies] +sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } +sc-keystore = { version = "4.0.0-dev", path = "../../keystore" } +sc-network-test = { version = "0.8.0", path = "../../network/test" } +sp-keyring = { version = "24.0.0", path = "../../../primitives/keyring" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } +tokio = "1.22.0" diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs new file mode 100644 index 0000000000000..6c6f6e54d37ce --- /dev/null +++ b/client/consensus/sassafras/src/authorship.rs @@ -0,0 +1,641 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Types and functions related to authority selection and slot claiming. + +use super::*; + +use sc_transaction_pool_api::OffchainTransactionPoolFactory; +use sp_consensus_sassafras::{ + digests::SlotClaim, ticket_id_threshold, AuthorityId, Slot, TicketBody, TicketClaim, + TicketEnvelope, TicketId, +}; +use sp_core::{ + bandersnatch::ring_vrf::RingContext, ed25519::Pair as EphemeralPair, twox_64, ByteArray, +}; +use std::pin::Pin; + +/// Get secondary authority index for the given epoch and slot. +pub(crate) fn secondary_authority_index(slot: Slot, epoch: &Epoch) -> AuthorityIndex { + u64::from_le_bytes((epoch.randomness, slot).using_encoded(twox_64)) as AuthorityIndex % + epoch.authorities.len() as AuthorityIndex +} + +/// Try to claim an epoch slot. +/// If ticket is `None`, then the slot should be claimed using the fallback mechanism. +pub(crate) fn claim_slot( + slot: Slot, + epoch: &mut Epoch, + maybe_ticket: Option<(TicketId, TicketBody)>, + keystore: &KeystorePtr, +) -> Option<(SlotClaim, AuthorityId)> { + if epoch.authorities.is_empty() { + return None + } + + let mut vrf_sign_data = vrf::slot_claim_sign_data(&epoch.randomness, slot, epoch.epoch_idx); + + let (authority_idx, ticket_claim) = match maybe_ticket { + Some((ticket_id, ticket_body)) => { + debug!(target: LOG_TARGET, "[TRY PRIMARY (slot {slot}, tkt = {ticket_id:016x})]"); + + // TODO @davxy... this is annoying. + // If we lose the secret cache then to know if we are the ticket owner then looks + // like we need to regenerate the ticket-id using all our keys and check if the + // output matches with the onchain one... + // Is there a better way??? + let (authority_idx, ticket_secret) = epoch.tickets_aux.remove(&ticket_id)?; + debug!( + target: LOG_TARGET, + " got ticket: auth: {}, attempt: {}", + authority_idx, + ticket_body.attempt_idx + ); + + vrf_sign_data.push_transcript_data(&ticket_body.encode()); + + let reveal_vrf_input = vrf::revealed_key_input( + &epoch.randomness, + ticket_body.attempt_idx, + epoch.epoch_idx, + ); + vrf_sign_data + .push_vrf_input(reveal_vrf_input) + .expect("Sign data has enough space; qed"); + + // Sign some data using the erased key to enforce our ownership + let data = vrf_sign_data.challenge::<32>(); + let erased_pair = EphemeralPair::from_seed(&ticket_secret.seed); + let erased_signature = erased_pair.sign(&data); + + let claim = TicketClaim { erased_signature }; + (authority_idx, Some(claim)) + }, + None => { + debug!(target: LOG_TARGET, "[TRY SECONDARY (slot {slot})]"); + (secondary_authority_index(slot, epoch), None) + }, + }; + + let authority_id = epoch.authorities.get(authority_idx as usize)?; + + let vrf_signature = keystore + .bandersnatch_vrf_sign(AuthorityId::ID, authority_id.as_ref(), &vrf_sign_data) + .ok() + .flatten()?; + + if let Some(output) = vrf_signature.outputs.get(1) { + warn!(target: LOG_TARGET, "{:?}", output); + } + + let claim = SlotClaim { authority_idx, slot, vrf_signature, ticket_claim }; + + Some((claim, authority_id.clone())) +} + +/// Generate the tickets for the given epoch. +/// +/// Tickets additional information will be stored within the `Epoch` structure. +/// The additional information will be used later during the epoch to claim slots. +fn generate_epoch_tickets( + epoch: &mut Epoch, + keystore: &KeystorePtr, + ring_ctx: &RingContext, +) -> Vec { + let mut tickets = Vec::new(); + + let threshold = ticket_id_threshold( + epoch.config.redundancy_factor, + epoch.epoch_duration as u32, + epoch.config.attempts_number, + epoch.authorities.len() as u32, + ); + // TODO-SASS-P4 remove me + debug!(target: LOG_TARGET, "Generating tickets for epoch {} @ slot {}", epoch.epoch_idx, epoch.start_slot); + debug!(target: LOG_TARGET, " threshold: {threshold:016x}"); + + // We need a list of raw unwrapped keys + let pks: Vec<_> = epoch.authorities.iter().map(|a| *a.as_ref()).collect(); + + let tickets_aux = &mut epoch.tickets_aux; + let epoch = &epoch.inner; + + for (authority_idx, authority_id) in epoch.authorities.iter().enumerate() { + if !keystore.has_keys(&[(authority_id.to_raw_vec(), AuthorityId::ID)]) { + continue + } + + debug!(target: LOG_TARGET, ">>> Generating new ring prover key..."); + let prover = ring_ctx.prover(&pks, authority_idx).unwrap(); + debug!(target: LOG_TARGET, ">>> ...done"); + + let make_ticket = |attempt_idx| { + // Ticket id and threshold check. + let ticket_id_input = + vrf::ticket_id_input(&epoch.randomness, attempt_idx, epoch.epoch_idx); + let ticket_id_output = keystore + .bandersnatch_vrf_output(AuthorityId::ID, authority_id.as_ref(), &ticket_id_input) + .ok()??; + let ticket_id = vrf::make_ticket_id(&ticket_id_input, &ticket_id_output); + if ticket_id >= threshold { + return None + } + + // Erased key. + // TODO: @davxy maybe we can we make this as: + // part1 = OsRand() // stored in memory + // part2 = make_erased_seed(&seed_vrf_input, seed_vrf_output) // reproducible from auth + // erased_seed = hash(part1 ++ part2) + // In this way is not reproducible and not full secret is in memory + let (erased_pair, erased_seed) = EphemeralPair::generate(); + let erased_public = erased_pair.public(); + + // Revealed key. + let revealed_input = + vrf::revealed_key_input(&epoch.randomness, attempt_idx, epoch.epoch_idx); + let revealed_output = keystore + .bandersnatch_vrf_output(AuthorityId::ID, authority_id.as_ref(), &revealed_input) + .ok()??; + let revealed_seed = vrf::make_revealed_key_seed(&revealed_input, &revealed_output); + let revealed_public = EphemeralPair::from_seed(&revealed_seed).public(); + + let body = TicketBody { attempt_idx, erased_public, revealed_public }; + + debug!(target: LOG_TARGET, ">>> Creating ring proof for attempt {}", attempt_idx); + let sign_data = vrf::ticket_body_sign_data(&body, ticket_id_input); + + let signature = keystore + .bandersnatch_ring_vrf_sign( + AuthorityId::ID, + authority_id.as_ref(), + &sign_data, + &prover, + ) + .ok()??; + debug!(target: LOG_TARGET, ">>> ...done"); + + debug_assert_eq!(ticket_id_output, signature.outputs[0]); + + let ticket_envelope = TicketEnvelope { body, signature }; + let ticket_secret = TicketSecret { attempt_idx, seed: erased_seed }; + Some((ticket_id, ticket_envelope, ticket_secret)) + }; + + for attempt in 0..epoch.config.attempts_number { + if let Some((ticket_id, ticket_envelope, ticket_secret)) = make_ticket(attempt) { + debug!(target: LOG_TARGET, " → {ticket_id:016x}"); + tickets.push(ticket_envelope); + tickets_aux.insert(ticket_id, (authority_idx as u32, ticket_secret)); + } + } + } + + tickets +} + +struct SlotWorker { + client: Arc, + block_import: I, + env: E, + sync_oracle: SO, + justification_sync_link: L, + force_authoring: bool, + keystore: KeystorePtr, + epoch_changes: SharedEpochChanges, + slot_notification_sinks: SlotNotificationSinks, + genesis_config: Epoch, +} + +#[async_trait::async_trait] +impl sc_consensus_slots::SimpleSlotWorker + for SlotWorker +where + B: BlockT, + C: ProvideRuntimeApi + HeaderBackend + HeaderMetadata, + C::Api: SassafrasApi, + E: Environment + Sync, + E::Proposer: Proposer, + I: BlockImport + Send + Sync + 'static, + SO: SyncOracle + Send + Clone + Sync, + L: sc_consensus::JustificationSyncLink, + ER: std::error::Error + Send + 'static, +{ + type Claim = (SlotClaim, AuthorityId); + type SyncOracle = SO; + type JustificationSyncLink = L; + type CreateProposer = + Pin> + Send + 'static>>; + type Proposer = E::Proposer; + type BlockImport = I; + type AuxData = ViableEpochDescriptor, Epoch>; + + fn logging_target(&self) -> &'static str { + LOG_TARGET + } + + fn block_import(&mut self) -> &mut Self::BlockImport { + &mut self.block_import + } + + fn aux_data(&self, parent: &B::Header, slot: Slot) -> Result { + self.epoch_changes + .shared_data() + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent.hash(), + *parent.number(), + slot, + ) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or(ConsensusError::InvalidAuthoritiesSet) + } + + fn authorities_len(&self, epoch_descriptor: &Self::AuxData) -> Option { + self.epoch_changes + .shared_data() + .viable_epoch(epoch_descriptor, |slot| Epoch::genesis(&self.genesis_config, slot)) + .map(|epoch| epoch.as_ref().authorities.len()) + } + + async fn claim_slot( + &self, + parent_header: &B::Header, + slot: Slot, + epoch_descriptor: &ViableEpochDescriptor, Epoch>, + ) -> Option { + // Get the next slot ticket from the runtime. + let maybe_ticket = + self.client.runtime_api().slot_ticket(parent_header.hash(), slot).ok()?; + + let mut epoch_changes = self.epoch_changes.shared_data_locked(); + let mut epoch = epoch_changes.viable_epoch_mut(epoch_descriptor, |slot| { + Epoch::genesis(&self.genesis_config, slot) + })?; + + let claim = authorship::claim_slot(slot, &mut epoch.as_mut(), maybe_ticket, &self.keystore); + if claim.is_some() { + debug!(target: LOG_TARGET, "Claimed slot {}", slot); + } + claim + } + + fn notify_slot( + &self, + _parent_header: &B::Header, + slot: Slot, + epoch_descriptor: &ViableEpochDescriptor, Epoch>, + ) { + let sinks = &mut self.slot_notification_sinks.lock(); + sinks.retain_mut(|sink| match sink.try_send((slot, epoch_descriptor.clone())) { + Ok(()) => true, + Err(e) => + if e.is_full() { + warn!(target: LOG_TARGET, "Trying to notify a slot but the channel is full"); + true + } else { + false + }, + }); + } + + fn pre_digest_data(&self, _slot: Slot, claim: &Self::Claim) -> Vec { + vec![DigestItem::from(&claim.0)] + } + + async fn block_import_params( + &self, + header: B::Header, + header_hash: &B::Hash, + body: Vec, + storage_changes: StorageChanges, + (_, public): Self::Claim, + epoch_descriptor: Self::AuxData, + ) -> Result, ConsensusError> { + let signature = self + .keystore + .bandersnatch_sign( + ::ID, + public.as_ref(), + header_hash.as_ref(), + ) + .map_err(|e| ConsensusError::CannotSign(format!("{}. Key {:?}", e, public)))? + .map(|sig| AuthoritySignature::from(sig)) + .ok_or_else(|| { + ConsensusError::CannotSign(format!( + "Could not find key in keystore. Key {:?}", + public + )) + })?; + + let mut block = BlockImportParams::new(BlockOrigin::Own, header); + block.post_digests.push(DigestItem::from(&signature)); + block.body = Some(body); + block.state_action = + StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes)); + block + .insert_intermediate(INTERMEDIATE_KEY, SassafrasIntermediate:: { epoch_descriptor }); + + Ok(block) + } + + fn force_authoring(&self) -> bool { + self.force_authoring + } + + fn should_backoff(&self, _slot: Slot, _chain_head: &B::Header) -> bool { + // TODO-SASS-P3 + false + } + + fn sync_oracle(&mut self) -> &mut Self::SyncOracle { + &mut self.sync_oracle + } + + fn justification_sync_link(&mut self) -> &mut Self::JustificationSyncLink { + &mut self.justification_sync_link + } + + fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { + Box::pin( + self.env + .init(block) + .map_err(|e| ConsensusError::ClientImport(format!("{:?}", e))), + ) + } + + fn telemetry(&self) -> Option { + // TODO-SASS-P4 + None + } + + fn proposing_remaining_duration(&self, slot_info: &SlotInfo) -> Duration { + let parent_slot = find_slot_claim::(&slot_info.chain_head).ok().map(|d| d.slot); + + // TODO-SASS-P2 : clarify this field. In Sassafras this is part of 'self' + let block_proposal_slot_portion = sc_consensus_slots::SlotProportion::new(0.5); + + sc_consensus_slots::proposing_remaining_duration( + parent_slot, + slot_info, + &block_proposal_slot_portion, + None, + sc_consensus_slots::SlotLenienceType::Exponential, + self.logging_target(), + ) + } +} + +/// Authoring tickets generation worker. +/// +/// Listens on the client's import notification stream for blocks which contains new epoch +/// information, that is blocks that signals the begin of a new epoch. +/// This event here triggers the begin of the generation of tickets for the next epoch. +/// The tickets generated by the worker are saved within the epoch changes tree +/// and are volatile. +async fn start_tickets_worker( + client: Arc, + keystore: KeystorePtr, + epoch_changes: SharedEpochChanges, + select_chain: SC, + offchain_tx_pool_factory: OffchainTransactionPoolFactory, +) where + B: BlockT, + C: BlockchainEvents + ProvideRuntimeApi, + C::Api: SassafrasApi, + SC: SelectChain + 'static, +{ + let mut notifications = client.import_notification_stream(); + + while let Some(notification) = notifications.next().await { + let epoch_desc = match find_next_epoch_digest::(¬ification.header) { + Ok(Some(epoch_desc)) => epoch_desc, + Err(err) => { + warn!(target: LOG_TARGET, "Error fetching next epoch digest: {}", err); + continue + }, + _ => continue, + }; + + debug!(target: LOG_TARGET, "New epoch announced {:x?}", epoch_desc); + + let number = *notification.header.number(); + let position = if number == One::one() { + EpochIdentifierPosition::Genesis1 + } else { + EpochIdentifierPosition::Regular + }; + let epoch_identifier = EpochIdentifier { position, hash: notification.hash, number }; + + let mut epoch = match epoch_changes.shared_data().epoch(&epoch_identifier).cloned() { + Some(epoch) => epoch, + None => { + warn!( + target: LOG_TARGET, + "Unexpected missing epoch data for {:?}", epoch_identifier + ); + continue + }, + }; + + // Get the best block on which we will publish the tickets. + let best_hash = match select_chain.best_chain().await { + Ok(header) => header.hash(), + Err(err) => { + error!(target: LOG_TARGET, "Error fetching best chain block id: {}", err); + continue + }, + }; + + let ring_ctx = match client.runtime_api().ring_context(best_hash) { + Ok(Some(ctx)) => ctx, + Ok(None) => { + info!(target: LOG_TARGET, "Ring context not initialized yet"); + continue + }, + Err(err) => { + error!(target: LOG_TARGET, "Unable to read ring context: {}", err); + continue + }, + }; + + let tickets = generate_epoch_tickets(&mut epoch, &keystore, &ring_ctx); + if tickets.is_empty() { + continue + } + + // Register the offchain tx pool to be able to use it from the runtime. + let mut runtime_api = client.runtime_api(); + runtime_api + .register_extension(offchain_tx_pool_factory.offchain_transaction_pool(best_hash)); + + let err = match runtime_api.submit_tickets_unsigned_extrinsic(best_hash, tickets) { + Err(err) => Some(err.to_string()), + Ok(false) => Some("Unknown reason".to_string()), + _ => None, + }; + + match err { + None => { + // Cache tickets secret in the epoch changes tree (TODO: @davxy use the keystre) + epoch_changes + .shared_data() + .epoch_mut(&epoch_identifier) + .map(|target_epoch| target_epoch.tickets_aux = epoch.tickets_aux); + }, + Some(err) => { + error!(target: LOG_TARGET, "Unable to submit tickets: {}", err); + }, + } + } +} + +/// Worker for Sassafras which implements `Future`. This must be polled. +pub struct SassafrasWorker { + inner: Pin + Send + 'static>>, + slot_notification_sinks: SlotNotificationSinks, +} + +impl SassafrasWorker { + /// Return an event stream of notifications for when new slot happens, and the corresponding + /// epoch descriptor. + pub fn slot_notification_stream( + &self, + ) -> Receiver<(Slot, ViableEpochDescriptor, Epoch>)> { + const CHANNEL_BUFFER_SIZE: usize = 1024; + + let (sink, stream) = channel(CHANNEL_BUFFER_SIZE); + self.slot_notification_sinks.lock().push(sink); + stream + } +} + +impl Future for SassafrasWorker { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + self.inner.as_mut().poll(cx) + } +} + +/// Slot notification sinks. +type SlotNotificationSinks = Arc< + Mutex::Hash, NumberFor, Epoch>)>>>, +>; + +/// Parameters for Sassafras. +pub struct SassafrasWorkerParams { + /// The client to use + pub client: Arc, + /// The keystore that manages the keys of the node. + pub keystore: KeystorePtr, + /// The chain selection strategy + pub select_chain: SC, + /// The environment we are producing blocks for. + pub env: EN, + /// The underlying block-import object to supply our produced blocks to. + /// This must be a `SassafrasBlockImport` or a wrapper of it, otherwise + /// critical consensus logic will be omitted. + pub block_import: I, + /// A sync oracle + pub sync_oracle: SO, + /// Hook into the sync module to control the justification sync process. + pub justification_sync_link: L, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: CIDP, + /// Force authoring of blocks even if we are offline. + pub force_authoring: bool, + /// State shared between import queue and authoring worker. + pub sassafras_link: SassafrasLink, + /// The offchain transaction pool factory used for tickets submission. + pub offchain_tx_pool_factory: OffchainTransactionPoolFactory, +} + +/// Start the Sassafras worker. +pub fn start_sassafras( + SassafrasWorkerParams { + client, + keystore, + select_chain, + env, + block_import, + sync_oracle, + justification_sync_link, + create_inherent_data_providers, + force_authoring, + sassafras_link, + offchain_tx_pool_factory, + }: SassafrasWorkerParams, +) -> Result, ConsensusError> +where + B: BlockT, + C: ProvideRuntimeApi + + ProvideUncles + + BlockchainEvents + + HeaderBackend + + HeaderMetadata + + Send + + Sync + + 'static, + C::Api: SassafrasApi, + SC: SelectChain + 'static, + EN: Environment + Send + Sync + 'static, + EN::Proposer: Proposer, + I: BlockImport + Send + Sync + 'static, + SO: SyncOracle + Send + Sync + Clone + 'static, + L: sc_consensus::JustificationSyncLink + 'static, + CIDP: CreateInherentDataProviders + Send + Sync + 'static, + CIDP::InherentDataProviders: InherentDataProviderExt + Send, + ER: std::error::Error + Send + From + From + 'static, +{ + info!(target: LOG_TARGET, "🍁 Starting authorship worker"); + + let slot_notification_sinks = Arc::new(Mutex::new(Vec::new())); + + let slot_worker = SlotWorker { + client: client.clone(), + block_import, + env, + sync_oracle: sync_oracle.clone(), + justification_sync_link, + force_authoring, + keystore: keystore.clone(), + epoch_changes: sassafras_link.epoch_changes.clone(), + slot_notification_sinks: slot_notification_sinks.clone(), + genesis_config: sassafras_link.genesis_config.clone(), + }; + + let slot_worker = sc_consensus_slots::start_slot_worker( + sassafras_link.genesis_config.slot_duration, + select_chain.clone(), + sc_consensus_slots::SimpleSlotWorkerToSlotWorker(slot_worker), + sync_oracle, + create_inherent_data_providers, + ); + + let tickets_worker = start_tickets_worker( + client.clone(), + keystore, + sassafras_link.epoch_changes.clone(), + select_chain, + offchain_tx_pool_factory, + ); + + let inner = future::select(Box::pin(slot_worker), Box::pin(tickets_worker)); + + Ok(SassafrasWorker { inner: Box::pin(inner.map(|_| ())), slot_notification_sinks }) +} diff --git a/client/consensus/sassafras/src/aux_schema.rs b/client/consensus/sassafras/src/aux_schema.rs new file mode 100644 index 0000000000000..6b56011632671 --- /dev/null +++ b/client/consensus/sassafras/src/aux_schema.rs @@ -0,0 +1,172 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Schema for auxiliary data persistence. +//! +//! TODO-SASS-P2 : RENAME FROM aux_schema.rs => aux_data.rs + +use std::{collections::HashSet, sync::Arc}; + +use scale_codec::{Decode, Encode}; + +use sc_client_api::backend::AuxStore; +use sc_consensus_epochs::{EpochChangesFor, SharedEpochChanges}; + +use sc_client_api::{blockchain::Backend as _, Backend as BackendT}; +use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata, Result as ClientResult}; +use sp_consensus_sassafras::SassafrasBlockWeight; +use sp_runtime::traits::{Block as BlockT, NumberFor, SaturatedConversion, Zero}; + +use crate::Epoch; + +const SASSAFRAS_EPOCH_CHANGES_KEY: &[u8] = b"sassafras_epoch_changes"; + +/// The aux storage key used to store the block weight of the given block hash. +fn block_weight_key(block_hash: H) -> Vec { + (b"sassafras_block_weight", block_hash).encode() +} + +fn load_decode(backend: &B, key: &[u8]) -> ClientResult> +where + B: AuxStore, + T: Decode, +{ + match backend.get_aux(key)? { + Some(t) => T::decode(&mut &t[..]).map(Some).map_err(|e| { + ClientError::Backend(format!("Sassafras db is corrupted, Decode error: {}", e)) + }), + None => Ok(None), + } +} + +/// Update the epoch changes to persist after a change. +pub fn write_epoch_changes( + epoch_changes: &EpochChangesFor, + write_aux: F, +) -> R +where + F: FnOnce(&[(&'static [u8], &[u8])]) -> R, +{ + epoch_changes.using_encoded(|s| write_aux(&[(SASSAFRAS_EPOCH_CHANGES_KEY, s)])) +} + +/// Load or initialize persistent epoch change data from backend. +pub fn load_epoch_changes( + backend: &AS, +) -> ClientResult> { + let maybe_epoch_changes = + load_decode::<_, EpochChangesFor>(backend, SASSAFRAS_EPOCH_CHANGES_KEY)?; + + let epoch_changes = SharedEpochChanges::::new( + maybe_epoch_changes.unwrap_or_else(|| EpochChangesFor::::default()), + ); + + // Rebalance the tree after deserialization. this isn't strictly necessary + // since the tree is now rebalanced on every update operation. but since the + // tree wasn't rebalanced initially it's useful to temporarily leave it here + // to avoid having to wait until an import for rebalancing. + epoch_changes.shared_data().rebalance(); + + Ok(epoch_changes) +} + +/// Write the cumulative chain-weight of a block ot aux storage. +pub fn write_block_weight( + block_hash: H, + block_weight: SassafrasBlockWeight, + write_aux: F, +) -> R +where + F: FnOnce(&[(Vec, &[u8])]) -> R, +{ + let key = block_weight_key(block_hash); + block_weight.using_encoded(|s| write_aux(&[(key, s)])) +} + +/// Load the cumulative chain-weight associated with a block. +pub fn load_block_weight( + backend: &B, + block_hash: H, +) -> ClientResult> { + load_decode(backend, block_weight_key(block_hash).as_slice()) +} + +/// Reverts protocol aux data from the best block to at most the last finalized block. +/// +/// Epoch-changes and block weights announced after the revert point are removed. +pub fn revert(backend: Arc, blocks: NumberFor) -> ClientResult<()> +where + Block: BlockT, + Backend: BackendT, +{ + let blockchain = backend.blockchain(); + let best_number = blockchain.info().best_number; + let finalized = blockchain.info().finalized_number; + + let revertible = blocks.min(best_number - finalized); + if revertible == Zero::zero() { + return Ok(()) + } + + let revert_up_to_number = best_number - revertible; + let revert_up_to_hash = blockchain.hash(revert_up_to_number)?.ok_or(ClientError::Backend( + format!("Unexpected hash lookup failure for block number: {}", revert_up_to_number), + ))?; + + // Revert epoch changes tree. + + // This config is only used on-genesis. + let epoch_changes = load_epoch_changes::(&*backend)?; + let mut epoch_changes = epoch_changes.shared_data(); + + if revert_up_to_number == Zero::zero() { + // Special case, no epoch changes data were present on genesis. + *epoch_changes = EpochChangesFor::::new(); + } else { + let descendent_query = sc_consensus_epochs::descendent_query(blockchain); + epoch_changes.revert(descendent_query, revert_up_to_hash, revert_up_to_number); + } + + // Remove block weights added after the revert point. + + let mut weight_keys = HashSet::with_capacity(revertible.saturated_into()); + + let leaves = backend.blockchain().leaves()?.into_iter().filter(|&leaf| { + sp_blockchain::tree_route(blockchain, revert_up_to_hash, leaf) + .map(|route| route.retracted().is_empty()) + .unwrap_or_default() + }); + + for mut hash in leaves { + loop { + let meta = blockchain.header_metadata(hash)?; + if meta.number <= revert_up_to_number || !weight_keys.insert(block_weight_key(hash)) { + // We've reached the revert point or an already processed branch, stop here. + break + } + hash = meta.parent; + } + } + + let weight_keys: Vec<_> = weight_keys.iter().map(|val| val.as_slice()).collect(); + + // Write epoch changes and remove weights in one shot. + write_epoch_changes::(&epoch_changes, |values| { + AuxStore::insert_aux(&*backend, values, weight_keys.iter()) + }) +} diff --git a/client/consensus/sassafras/src/block_import.rs b/client/consensus/sassafras/src/block_import.rs new file mode 100644 index 0000000000000..1980d8243a5ff --- /dev/null +++ b/client/consensus/sassafras/src/block_import.rs @@ -0,0 +1,530 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Types and functions related to block import. + +use super::*; +use sc_client_api::{AuxDataOperations, FinalityNotification, PreCommitActions}; +use sp_blockchain::BlockStatus; + +/// Block-import handler for Sassafras. +/// +/// This scans each imported block for epoch change announcements. The announcements are +/// tracked in a tree (of all forks), and the import logic validates all epoch change +/// transitions, i.e. whether a given epoch change is expected or whether it is missing. +/// +/// The epoch change tree should be pruned as blocks are finalized. +pub struct SassafrasBlockImport { + inner: I, + client: Arc, + epoch_changes: SharedEpochChanges, + genesis_config: Epoch, +} + +impl Clone for SassafrasBlockImport { + fn clone(&self) -> Self { + SassafrasBlockImport { + inner: self.inner.clone(), + client: self.client.clone(), + epoch_changes: self.epoch_changes.clone(), + genesis_config: self.genesis_config.clone(), + } + } +} + +fn aux_storage_cleanup( + _client: &C, + _notification: &FinalityNotification, +) -> AuxDataOperations +where + B: BlockT, + C: HeaderMetadata + HeaderBackend, +{ + // TODO-SASS-P3 + Default::default() +} + +impl SassafrasBlockImport +where + C: AuxStore + + HeaderBackend + + HeaderMetadata + + PreCommitActions + + 'static, +{ + /// Constructor. + pub fn new( + inner: I, + client: Arc, + epoch_changes: SharedEpochChanges, + genesis_config: Epoch, + ) -> Self { + let client_weak = Arc::downgrade(&client); + let on_finality = move |notification: &FinalityNotification| { + if let Some(client) = client_weak.upgrade() { + aux_storage_cleanup(client.as_ref(), notification) + } else { + Default::default() + } + }; + client.register_finality_action(Box::new(on_finality)); + + SassafrasBlockImport { inner, client, epoch_changes, genesis_config } + } +} + +struct RecoverableEpochChanges { + old_epoch_changes: EpochChangesFor, + weak_lock: sc_consensus::shared_data::SharedDataLockedUpgradable>, +} + +impl RecoverableEpochChanges { + fn rollback(mut self) { + *self.weak_lock.upgrade() = self.old_epoch_changes; + } +} + +impl SassafrasBlockImport +where + C: AuxStore + HeaderBackend + HeaderMetadata, +{ + // The fork choice rule is that we pick the heaviest chain (i.e. more blocks built + // using primary mechanism), if there's a tie we go with the longest chain. + fn is_new_best( + &self, + curr_weight: u32, + curr_number: NumberFor, + parent_hash: B::Hash, + ) -> Result { + let info = self.client.info(); + + let new_best = if info.best_hash == parent_hash { + true + } else { + let best_weight = aux_schema::load_block_weight(&*self.client, &info.best_hash) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or_else(|| { + ConsensusError::ChainLookup("No block weight for best header.".into()) + })?; + curr_weight > best_weight || + (curr_weight == best_weight && curr_number > info.best_number) + }; + + Ok(new_best) + } + + fn import_epoch( + &mut self, + viable_epoch_desc: ViableEpochDescriptor, Epoch>, + next_epoch_desc: NextEpochDescriptor, + slot: Slot, + number: NumberFor, + hash: B::Hash, + parent_hash: B::Hash, + verbose: bool, + auxiliary: &mut Vec<(Vec, Option>)>, + ) -> Result, ConsensusError> { + let mut epoch_changes = self.epoch_changes.shared_data_locked(); + + let log_level = if verbose { log::Level::Debug } else { log::Level::Info }; + + let mut viable_epoch = epoch_changes + .viable_epoch(&viable_epoch_desc, |slot| Epoch::genesis(&self.genesis_config, slot)) + .ok_or_else(|| { + ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) + })? + .into_cloned(); + + if viable_epoch.as_ref().end_slot() <= slot { + // Some epochs must have been skipped as our current slot fits outside the + // current epoch. We will figure out which is the first skipped epoch and we + // will partially re-use its data for this "recovery" epoch. + let epoch_data = viable_epoch.as_mut(); + let skipped_epochs = (*slot - *epoch_data.start_slot) / epoch_data.epoch_duration; + let original_epoch_idx = epoch_data.epoch_idx; + + // NOTE: notice that we are only updating a local copy of the `Epoch`, this + // makes it so that when we insert the next epoch into `EpochChanges` below + // (after incrementing it), it will use the correct epoch index and start slot. + // We do not update the original epoch that may be reused because there may be + // some other forks where the epoch isn't skipped. + // Not updating the original epoch works because when we search the tree for + // which epoch to use for a given slot, we will search in-depth with the + // predicate `epoch.start_slot <= slot` which will still match correctly without + // requiring to update `start_slot` to the correct value. + epoch_data.epoch_idx += skipped_epochs; + epoch_data.start_slot = + Slot::from(*epoch_data.start_slot + skipped_epochs * epoch_data.epoch_duration); + warn!( + target: LOG_TARGET, + "Epoch(s) skipped from {} to {}", + original_epoch_idx, + epoch_data.epoch_idx + ); + } + + log!( + target: LOG_TARGET, + log_level, + "New epoch {} launching at block {} (block slot {} >= start slot {}).", + viable_epoch.as_ref().epoch_idx, + hash, + slot, + viable_epoch.as_ref().start_slot, + ); + + let next_epoch = viable_epoch.increment(next_epoch_desc); + + log!( + target: LOG_TARGET, + log_level, + "Next epoch starts at slot {}", + next_epoch.as_ref().start_slot, + ); + + let old_epoch_changes = (*epoch_changes).clone(); + + // Prune the tree of epochs not part of the finalized chain or + // that are not live anymore, and then track the given epoch change + // in the tree. + // NOTE: it is important that these operations are done in this + // order, otherwise if pruning after import the `is_descendent_of` + // used by pruning may not know about the block that is being + // imported. + let prune_and_import = || { + prune_finalized(self.client.clone(), &mut epoch_changes)?; + + epoch_changes + .import(descendent_query(&*self.client), hash, number, parent_hash, next_epoch) + .map_err(|e| { + ConsensusError::ClientImport(format!("Error importing epoch changes: {}", e)) + })?; + + Ok(()) + }; + + if let Err(e) = prune_and_import() { + warn!(target: LOG_TARGET, "Failed to launch next epoch: {}", e); + *epoch_changes = old_epoch_changes; + return Err(e) + } + + aux_schema::write_epoch_changes::(&*epoch_changes, |insert| { + auxiliary.extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + + Ok(RecoverableEpochChanges { old_epoch_changes, weak_lock: epoch_changes.release_mutex() }) + } +} + +impl SassafrasBlockImport +where + Block: BlockT, + Inner: BlockImport + Send + Sync, + Inner::Error: Into, + Client: HeaderBackend + + HeaderMetadata + + AuxStore + + ProvideRuntimeApi + + Send + + Sync, + Client::Api: SassafrasApi + ApiExt, +{ + /// Import whole state after a warp sync. + /// + /// This function makes multiple transactions to the DB. If one of them fails we may + /// end up in an inconsistent state and have to resync + async fn import_state( + &mut self, + mut block: BlockImportParams, + ) -> Result { + let hash = block.post_hash(); + let parent_hash = *block.header.parent_hash(); + let number = *block.header.number(); + + // Check for the unit tag. + block.remove_intermediate::<()>(INTERMEDIATE_KEY)?; + + // Import as best + block.fork_choice = Some(ForkChoiceStrategy::Custom(true)); + + // Reset block weight + aux_schema::write_block_weight(hash, 0, |values| { + block + .auxiliary + .extend(values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + + // First make the client import the state + let aux = match self.inner.import_block(block).await { + Ok(ImportResult::Imported(aux)) => aux, + Ok(r) => + return Err(ConsensusError::ClientImport(format!( + "Unexpected import result: {:?}", + r + ))), + Err(e) => return Err(e.into()), + }; + + // Read epoch info from the imported state + let curr_epoch = self.client.runtime_api().current_epoch(hash).map_err(|e| { + ConsensusError::ClientImport(sassafras_err::(Error::RuntimeApi(e)).into()) + })?; + let next_epoch = self.client.runtime_api().next_epoch(hash).map_err(|e| { + ConsensusError::ClientImport(sassafras_err::(Error::RuntimeApi(e)).into()) + })?; + + let mut epoch_changes = self.epoch_changes.shared_data(); + epoch_changes.reset(parent_hash, hash, number, curr_epoch.into(), next_epoch.into()); + + aux_schema::write_epoch_changes::(&*epoch_changes, |insert| { + self.client.insert_aux(insert, []) + }) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + Ok(ImportResult::Imported(aux)) + } +} + +#[async_trait::async_trait] +impl BlockImport for SassafrasBlockImport +where + Block: BlockT, + Inner: BlockImport + Send + Sync, + Inner::Error: Into, + Client: HeaderBackend + + HeaderMetadata + + AuxStore + + ProvideRuntimeApi + + Send + + Sync, + Client::Api: SassafrasApi + ApiExt, +{ + type Error = ConsensusError; + + async fn import_block( + &mut self, + mut block: BlockImportParams, + ) -> Result { + let hash = block.post_hash(); + let number = *block.header.number(); + let info = self.client.info(); + + let block_status = self + .client + .status(hash) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + // Skip protocol-specific logic if block already on-chain or importing blocks + // during initial sync, otherwise the check for epoch changes will error + // because trying to re-import an epoch change entry or because of missing epoch + // data in the tree, respectivelly. + if info.block_gap.map_or(false, |(s, e)| s <= number && number <= e) || + block_status == BlockStatus::InChain + { + // When re-importing existing block strip away intermediates. + // In case of initial sync intermediates should not be present... + let _ = block.remove_intermediate::>(INTERMEDIATE_KEY); + block.fork_choice = Some(ForkChoiceStrategy::Custom(false)); + return self.inner.import_block(block).await.map_err(Into::into) + } + + if block.with_state() { + return self.import_state(block).await + } + + let viable_epoch_desc = block + .remove_intermediate::>(INTERMEDIATE_KEY)? + .epoch_descriptor; + + let claim = find_slot_claim::(&block.header) + .map_err(|e| ConsensusError::ClientImport(e.into()))?; + let slot = claim.slot; + + let parent_hash = *block.header.parent_hash(); + let parent_header = self + .client + .header(parent_hash) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or_else(|| { + ConsensusError::ChainLookup( + sassafras_err(Error::::ParentUnavailable(parent_hash, hash)).into(), + ) + })?; + let parent_slot = find_slot_claim::(&parent_header) + .map(|claim| claim.slot) + .map_err(|e| ConsensusError::ClientImport(e.into()))?; + + // Make sure that slot number is strictly increasing + if slot <= parent_slot { + return Err(ConsensusError::ClientImport( + sassafras_err(Error::::SlotMustIncrease(parent_slot, slot)).into(), + )) + } + + // Check if there's any epoch change expected to happen at this slot. + // `epoch` is the epoch to verify the block under, and `first_in_epoch` is true + // if this is the first block in its chain for that epoch. + + let first_in_epoch = parent_slot < viable_epoch_desc.start_slot(); + + let next_epoch_digest = find_next_epoch_digest::(&block.header) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + match (first_in_epoch, next_epoch_digest.is_some()) { + (true, false) => + return Err(ConsensusError::ClientImport( + sassafras_err(Error::::ExpectedEpochChange(hash, slot)).into(), + )), + (false, true) => + return Err(ConsensusError::ClientImport( + sassafras_err(Error::::UnexpectedEpochChange).into(), + )), + _ => (), + } + + // Compute the total weight of the chain, including the imported block. + + let parent_weight = aux_schema::load_block_weight(&*self.client, parent_hash) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + .or_else(|| (*parent_header.number() == Zero::zero()).then(|| 0)) + .ok_or_else(|| { + ConsensusError::ClientImport( + sassafras_err(Error::::ParentBlockNoAssociatedWeight(hash)).into(), + ) + })?; + + let total_weight = parent_weight + claim.ticket_claim.is_some() as u32; + + aux_schema::write_block_weight(hash, total_weight, |values| { + block + .auxiliary + .extend(values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + + // If there's a pending epoch we'll try to update all the involved data while + // saving the previous epoch changes as well. In this way we can revert it if + // there's any error. + let epoch_changes_data = next_epoch_digest + .map(|next_epoch_desc| { + self.import_epoch( + viable_epoch_desc, + next_epoch_desc, + slot, + number, + hash, + parent_hash, + block.origin != BlockOrigin::NetworkInitialSync, + &mut block.auxiliary, + ) + }) + .transpose()?; + + // The fork choice rule is intentionally changed within the context of the + // epoch changes lock to avoid annoying race conditions on what is the current + // best block. That is, the best may be changed by the inner block import. + let is_new_best = self.is_new_best(total_weight, number, parent_hash)?; + block.fork_choice = Some(ForkChoiceStrategy::Custom(is_new_best)); + + let import_result = self.inner.import_block(block).await; + + // Revert to the original epoch changes in case there's an error + // importing the block + // TODO-SASS-P3: shouldn't we check for Ok(Imported(_))? + if import_result.is_err() { + if let Some(data) = epoch_changes_data { + data.rollback(); + } + } + + import_result.map_err(Into::into) + } + + async fn check_block( + &mut self, + block: BlockCheckParams, + ) -> Result { + self.inner.check_block(block).await.map_err(Into::into) + } +} + +/// Gets the best finalized block and its slot, and prunes the given epoch tree. +fn prune_finalized( + client: Arc, + epoch_changes: &mut EpochChangesFor, +) -> Result<(), ConsensusError> +where + B: BlockT, + C: HeaderBackend + HeaderMetadata, +{ + let info = client.info(); + + let finalized_slot = { + let finalized_header = client + .header(info.finalized_hash) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + .expect("finalized headers must exist in storage; qed"); + + find_slot_claim::(&finalized_header) + .expect("valid block header have a slot-claim; qed") + .slot + }; + + epoch_changes + .prune_finalized( + descendent_query(&*client), + &info.finalized_hash, + info.finalized_number, + finalized_slot, + ) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + Ok(()) +} + +/// Produce a Sassafras block-import object to be used later on in the construction of +/// an import-queue. +/// +/// Also returns a link object used to correctly instantiate the import queue +/// and authoring worker. +pub fn block_import( + genesis_config: Epoch, + inner_block_import: I, + client: Arc, +) -> ClientResult<(SassafrasBlockImport, SassafrasLink)> +where + C: AuxStore + + HeaderBackend + + HeaderMetadata + + PreCommitActions + + 'static, +{ + let epoch_changes = aux_schema::load_epoch_changes::(&*client)?; + + prune_finalized(client.clone(), &mut epoch_changes.shared_data())?; + + let link = SassafrasLink { + epoch_changes: epoch_changes.clone(), + genesis_config: genesis_config.clone(), + }; + + let block_import = + SassafrasBlockImport::new(inner_block_import, client, epoch_changes, genesis_config); + + Ok((block_import, link)) +} diff --git a/client/consensus/sassafras/src/inherents.rs b/client/consensus/sassafras/src/inherents.rs new file mode 100644 index 0000000000000..372a7a85eedf2 --- /dev/null +++ b/client/consensus/sassafras/src/inherents.rs @@ -0,0 +1,85 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Sassafras inherents structures and helpers. + +use sp_inherents::{Error, InherentData, InherentIdentifier}; +use std::ops::Deref; + +/// Inherent identifier. +pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"sassslot"; + +/// The type of inherent. +pub type InherentType = sp_consensus_slots::Slot; + +/// Auxiliary trait to extract inherent data. +pub trait SassafrasInherentData { + /// Get inherent data. + fn sassafras_get_inherent_data(&self) -> Result, Error>; + /// Put inherent data. + fn sassafras_put_inherent_data(&mut self, data: &InherentType) -> Result<(), Error>; + /// Replace inherent data. + fn sassafras_replace_inherent_data(&mut self, data: &InherentType); +} + +impl SassafrasInherentData for InherentData { + fn sassafras_get_inherent_data(&self) -> Result, Error> { + self.get_data(&INHERENT_IDENTIFIER) + } + + fn sassafras_put_inherent_data(&mut self, data: &InherentType) -> Result<(), Error> { + self.put_data(INHERENT_IDENTIFIER, data) + } + + fn sassafras_replace_inherent_data(&mut self, data: &InherentType) { + self.replace_data(INHERENT_IDENTIFIER, data); + } +} + +/// Provides the slot duration inherent data. +pub struct InherentDataProvider(InherentType); + +impl InherentDataProvider { + /// Create new inherent data provider from the given `slot`. + pub fn new(slot: InherentType) -> Self { + Self(slot) + } + + /// Creates the inherent data provider by calculating the slot from the given + /// `timestamp` and `duration`. + pub fn from_timestamp( + timestamp: sp_timestamp::Timestamp, + slot_duration: sp_consensus_slots::SlotDuration, + ) -> Self { + Self(InherentType::from_timestamp(timestamp, slot_duration)) + } +} + +impl Deref for InherentDataProvider { + type Target = InherentType; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +#[async_trait::async_trait] +impl sp_inherents::InherentDataProvider for InherentDataProvider { + async fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { + inherent_data.sassafras_put_inherent_data(&self.0) + } +} diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs new file mode 100644 index 0000000000000..5138985a9ea98 --- /dev/null +++ b/client/consensus/sassafras/src/lib.rs @@ -0,0 +1,401 @@ +// This file is part of Substrate. + +// This file is part of SubstrateNonepyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! # Sassafras +//! +//! TODO-SASS-P2: documentation + +// TODO-SASS-P2: remove this +//#![deny(warnings)] +#![forbid(unsafe_code, missing_docs)] + +use std::{ + collections::BTreeMap, + future::Future, + sync::Arc, + task::{Context, Poll}, + time::Duration, +}; + +use futures::{ + channel::mpsc::{channel, Receiver, Sender}, + prelude::*, +}; +use log::{debug, error, info, log, trace, warn}; +use parking_lot::Mutex; +use prometheus_endpoint::Registry; +use scale_codec::{Decode, Encode}; + +use sc_client_api::{backend::AuxStore, BlockchainEvents, ProvideUncles, UsageProvider}; +use sc_consensus::{ + block_import::{ + BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, + StateAction, + }, + import_queue::{BasicQueue, BoxJustificationImport, DefaultImportQueue}, + Verifier, +}; +use sc_consensus_epochs::{ + descendent_query, Epoch as EpochT, EpochChangesFor, EpochIdentifier, EpochIdentifierPosition, + SharedEpochChanges, ViableEpochDescriptor, +}; +use sc_consensus_slots::{CheckedHeader, InherentDataProviderExt, SlotInfo, StorageChanges}; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_application_crypto::AppCrypto; +use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata, Result as ClientResult}; +use sp_consensus::{ + BlockOrigin, Environment, Error as ConsensusError, Proposer, SelectChain, SyncOracle, +}; +use sp_consensus_slots::Slot; +use sp_core::Pair; +use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider as _}; +use sp_keystore::KeystorePtr; +use sp_runtime::{ + generic::OpaqueDigestItemId, + traits::{Block as BlockT, Header, NumberFor, One, Zero}, + DigestItem, +}; + +// Re-export some primitives. +pub use sp_consensus_sassafras::{ + digests::{ConsensusLog, NextEpochDescriptor, SlotClaim}, + vrf, AuthorityId, AuthorityIndex, AuthorityPair, AuthoritySignature, EpochConfiguration, + SassafrasApi, TicketBody, TicketClaim, TicketEnvelope, TicketId, RANDOMNESS_LENGTH, + SASSAFRAS_ENGINE_ID, +}; + +mod authorship; +mod aux_schema; +mod block_import; +mod inherents; +#[cfg(test)] +mod tests; +mod verification; + +// Export core components. +pub use authorship::{start_sassafras, SassafrasWorker, SassafrasWorkerParams}; +pub use aux_schema::revert; +pub use block_import::{block_import, SassafrasBlockImport}; +pub use inherents::{InherentDataProvider, InherentType}; +pub use verification::SassafrasVerifier; + +const LOG_TARGET: &str = "sassafras 🌳"; + +/// Intermediate key for Babe engine. +pub const INTERMEDIATE_KEY: &[u8] = b"sass1"; + +/// Errors encountered by the Sassafras routines. +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Multiple slot claim digests + #[error("Multiple slot-claim digests")] + MultipleSlotClaimDigests, + /// Missing slot claim digest + #[error("No slot-claim digest found")] + MissingSlotClaimDigest, + /// Multiple epoch change digests + #[error("Multiple epoch change digests")] + MultipleEpochChangeDigests, + /// Could not fetch epoch + #[error("Could not fetch epoch at {0:?}")] + FetchEpoch(B::Hash), + /// Header rejected: too far in the future + #[error("Header {0:?} rejected: too far in the future")] + TooFarInFuture(B::Hash), + /// Parent unavailable. Cannot import + #[error("Parent ({0}) of {1} unavailable. Cannot import")] + ParentUnavailable(B::Hash, B::Hash), + /// Slot number must increase + #[error("Slot number must increase: parent slot: {0}, this slot: {1}")] + SlotMustIncrease(Slot, Slot), + /// Header has a bad seal + #[error("Header {0:?} has a bad seal")] + HeaderBadSeal(B::Hash), + /// Header is unsealed + #[error("Header {0:?} is unsealed")] + HeaderUnsealed(B::Hash), + /// Slot author not found + #[error("Slot author not found")] + SlotAuthorNotFound, + /// Bad signature + #[error("Bad signature on {0:?}")] + BadSignature(B::Hash), + /// VRF verification failed + #[error("VRF verification failed")] + VrfVerificationFailed, + /// Missing VRF output entry in the signature + #[error("Missing signed VRF output")] + MissingSignedVrfOutput, + /// Mismatch during verification of reveal public + #[error("Reveal public mismatch")] + RevealPublicMismatch, + /// Unexpected authoring mechanism + #[error("Unexpected authoring mechanism")] + UnexpectedAuthoringMechanism, + /// Could not fetch parent header + #[error("Could not fetch parent header: {0}")] + FetchParentHeader(sp_blockchain::Error), + /// Expected epoch change to happen. + #[error("Expected epoch change to happen at {0:?}, s{1}")] + ExpectedEpochChange(B::Hash, Slot), + /// Unexpected epoch change + #[error("Unexpected epoch change")] + UnexpectedEpochChange, + /// Parent block has no associated weight + #[error("Parent block of {0} has no associated weight")] + ParentBlockNoAssociatedWeight(B::Hash), + /// Check inherents error + #[error("Checking inherents failed: {0}")] + CheckInherents(sp_inherents::Error), + /// Unhandled check inherents error + #[error("Checking inherents unhandled error: {}", String::from_utf8_lossy(.0))] + CheckInherentsUnhandled(sp_inherents::InherentIdentifier), + /// Create inherents error. + #[error("Creating inherents failed: {0}")] + CreateInherents(sp_inherents::Error), + /// Client error + #[error(transparent)] + Client(sp_blockchain::Error), + /// Runtime Api error. + #[error(transparent)] + RuntimeApi(sp_api::ApiError), + /// Fork tree error + #[error(transparent)] + ForkTree(Box>), +} + +impl From> for String { + fn from(error: Error) -> String { + error.to_string() + } +} + +// Convenience function for error logging +fn sassafras_err(err: Error) -> Error { + error!(target: LOG_TARGET, "{}", err); + err +} + +/// Secret seed +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +pub struct TicketSecret { + /// Attempt index + pub(crate) attempt_idx: u32, + /// Secret seed + pub(crate) seed: [u8; 32], +} + +/// Primitive epoch newtype. +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +pub struct Epoch { + pub(crate) inner: sp_consensus_sassafras::Epoch, + pub(crate) tickets_aux: BTreeMap, +} + +use std::ops::{Deref, DerefMut}; + +impl Deref for Epoch { + type Target = sp_consensus_sassafras::Epoch; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl DerefMut for Epoch { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + +impl From for Epoch { + fn from(epoch: sp_consensus_sassafras::Epoch) -> Self { + Epoch { inner: epoch, tickets_aux: Default::default() } + } +} + +impl EpochT for Epoch { + type NextEpochDescriptor = NextEpochDescriptor; + type Slot = Slot; + + fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { + sp_consensus_sassafras::Epoch { + epoch_idx: self.epoch_idx + 1, + start_slot: self.start_slot + self.epoch_duration, + slot_duration: self.slot_duration, + epoch_duration: self.epoch_duration, + authorities: descriptor.authorities, + randomness: descriptor.randomness, + config: descriptor.config.unwrap_or(self.config), + } + .into() + } + + fn start_slot(&self) -> Slot { + self.start_slot + } + + fn end_slot(&self) -> Slot { + self.start_slot + self.epoch_duration + } +} + +impl Epoch { + /// Create the genesis epoch (epoch #0). This is defined to start at the slot of + /// the first block, so that has to be provided. + pub fn genesis(config: &Epoch, slot: Slot) -> Epoch { + let mut epoch = config.clone(); + epoch.epoch_idx = 0; + epoch.start_slot = slot; + epoch + } +} + +/// Read protocol configuration from the blockchain state corresponding +/// to the last finalized block +pub fn finalized_configuration(client: &C) -> ClientResult +where + B: BlockT, + C: ProvideRuntimeApi + UsageProvider, + C::Api: SassafrasApi, +{ + let info = client.usage_info().chain; + let hash = info.finalized_state.map(|(hash, _)| hash).unwrap_or_else(|| { + debug!(target: LOG_TARGET, "Reading config from genesis"); + info.genesis_hash + }); + + let epoch = client.runtime_api().current_epoch(hash)?; + Ok(epoch.into()) +} + +/// Intermediate value passed to block importer from authoring or validation logic. +pub struct SassafrasIntermediate { + /// The epoch descriptor. + pub epoch_descriptor: ViableEpochDescriptor, Epoch>, +} + +/// Extract the Sassafras slot claim from the given header. +/// +/// Slot claim digest is mandatory, the function will return `Err` if none is found. +fn find_slot_claim(header: &B::Header) -> Result> { + if header.number().is_zero() { + // Genesis block doesn't contain a slot-claim so let's generate a + // dummy one jyst to not break any invariant in the rest of the code. + use sp_core::crypto::VrfSecret; + let pair = sp_consensus_sassafras::AuthorityPair::from_seed(&[0u8; 32]); + let data = vrf::slot_claim_sign_data(&Default::default(), 0.into(), 0); + return Ok(SlotClaim { + authority_idx: 0, + slot: 0.into(), + ticket_claim: None, + vrf_signature: pair.as_ref().vrf_sign(&data), + }) + } + + let mut claim: Option<_> = None; + for log in header.digest().logs() { + match (log.try_into(), claim.is_some()) { + (Ok(_), true) => return Err(sassafras_err(Error::MultipleSlotClaimDigests)), + (Err(_), _) => trace!(target: LOG_TARGET, "Ignoring digest not meant for us"), + (Ok(c), false) => claim = Some(c), + } + } + claim.ok_or_else(|| sassafras_err(Error::MissingSlotClaimDigest)) +} + +/// Extract the Sassafras epoch change digest from the given header, if it exists. +fn find_next_epoch_digest( + header: &B::Header, +) -> Result, Error> { + let mut epoch_digest: Option<_> = None; + for log in header.digest().logs() { + let log = log.try_to::(OpaqueDigestItemId::Consensus(&SASSAFRAS_ENGINE_ID)); + match (log, epoch_digest.is_some()) { + (Some(ConsensusLog::NextEpochData(_)), true) => + return Err(sassafras_err(Error::MultipleEpochChangeDigests)), + (Some(ConsensusLog::NextEpochData(epoch)), false) => epoch_digest = Some(epoch), + _ => trace!(target: LOG_TARGET, "Ignoring digest not meant for us"), + } + } + + Ok(epoch_digest) +} + +/// State that must be shared between the import queue and the authoring logic. +#[derive(Clone)] +pub struct SassafrasLink { + /// Epoch changes tree + epoch_changes: SharedEpochChanges, + /// Startup configuration. Read from runtime at last finalized block. + genesis_config: Epoch, +} + +impl SassafrasLink { + /// Get the config of this link. + pub fn genesis_config(&self) -> &Epoch { + &self.genesis_config + } +} + +/// Start an import queue for the Sassafras consensus algorithm. +/// +/// This method returns the import queue, some data that needs to be passed to the block authoring +/// logic (`SassafrasLink`), and a future that must be run to completion and is responsible for +/// listening to finality notifications and pruning the epoch changes tree. +/// +/// The block import object provided must be the `SassafrasBlockImport` or a wrapper of it, +/// otherwise crucial import logic will be omitted. +pub fn import_queue( + sassafras_link: SassafrasLink, + block_import: BI, + justification_import: Option>, + client: Arc, + select_chain: SelectChain, + create_inherent_data_providers: CIDP, + spawner: &impl sp_core::traits::SpawnEssentialNamed, + registry: Option<&Registry>, + telemetry: Option, +) -> ClientResult> +where + Client: ProvideRuntimeApi + + HeaderBackend + + HeaderMetadata + + AuxStore + + Send + + Sync + + 'static, + Client::Api: BlockBuilderApi + SassafrasApi + ApiExt, + BI: BlockImport + Send + Sync + 'static, + SelectChain: sp_consensus::SelectChain + 'static, + CIDP: CreateInherentDataProviders + Send + Sync + 'static, + CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, +{ + let verifier = SassafrasVerifier::new( + client, + select_chain, + create_inherent_data_providers, + sassafras_link.epoch_changes, + sassafras_link.genesis_config, + telemetry, + ); + + Ok(BasicQueue::new(verifier, Box::new(block_import), justification_import, spawner, registry)) +} diff --git a/client/consensus/sassafras/src/tests.rs b/client/consensus/sassafras/src/tests.rs new file mode 100644 index 0000000000000..7aadfd96458d5 --- /dev/null +++ b/client/consensus/sassafras/src/tests.rs @@ -0,0 +1,935 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Sassafras client tests + +// TODO @davxy +// Missing tests +// - verify block claimed via primary method +// - tests using tickets to claim slots. Curret tests just doesn't register any on-chain ticket +use super::*; + +use futures::executor::block_on; +use std::sync::Arc; + +use sc_block_builder::BlockBuilderProvider; +use sc_client_api::Finalizer; +use sc_consensus::{BlockImport, BoxJustificationImport}; +use sc_network_test::*; +use sc_transaction_pool_api::{OffchainTransactionPoolFactory, RejectAllTxPool}; +use sp_application_crypto::key_types::SASSAFRAS; +use sp_blockchain::Error as TestError; +use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; +use sp_consensus_sassafras::{EphemeralPublic, SlotDuration}; +use sp_core::crypto::UncheckedFrom; +use sp_keyring::BandersnatchKeyring as Keyring; +use sp_keystore::{testing::MemoryKeystore, Keystore}; +use sp_runtime::{Digest, DigestItem}; +use sp_timestamp::Timestamp; + +use substrate_test_runtime_client::{runtime::Block as TestBlock, Backend as TestBackend}; + +// Specialization of generic structures for test context. + +type TestHeader = ::Header; + +type TestClient = substrate_test_runtime_client::client::Client< + TestBackend, + substrate_test_runtime_client::ExecutorDispatch, + TestBlock, + substrate_test_runtime_client::runtime::RuntimeApi, +>; + +type TestSelectChain = + substrate_test_runtime_client::LongestChain; + +type TestBlockImportParams = BlockImportParams; + +type TestViableEpochDescriptor = sc_consensus_epochs::ViableEpochDescriptor; + +// Monomorphization of Sassafras structures for test context. + +type SassafrasIntermediate = crate::SassafrasIntermediate; + +type SassafrasBlockImport = crate::SassafrasBlockImport>; + +type SassafrasVerifier = crate::SassafrasVerifier< + TestBlock, + PeersFullClient, + TestSelectChain, + Box< + dyn CreateInherentDataProviders< + TestBlock, + (), + InherentDataProviders = (InherentDataProvider,), + >, + >, +>; + +type SassafrasLink = crate::SassafrasLink; + +// Epoch duration is slots +const EPOCH_DURATION: u64 = 6; +// Slot duration is milliseconds +const SLOT_DURATION: u64 = 1000; + +struct TestProposer { + client: Arc, + parent_hash: Hash, +} + +impl TestProposer { + fn propose_block(self, digest: Digest) -> TestBlock { + block_on(self.propose(InherentData::default(), digest, Duration::default(), None)) + .expect("Proposing block") + .block + } +} + +impl Proposer for TestProposer { + type Error = TestError; + type Proposal = future::Ready, Self::Error>>; + type ProofRecording = DisableProofRecording; + type Proof = (); + + fn propose( + self, + _: InherentData, + inherent_digests: Digest, + _: Duration, + _: Option, + ) -> Self::Proposal { + let block_builder = + self.client.new_block_at(self.parent_hash, inherent_digests, false).unwrap(); + + let block = match block_builder.build().map_err(|e| e.into()) { + Ok(b) => b.block, + Err(e) => return future::ready(Err(e)), + }; + + future::ready(Ok(Proposal { block, proof: (), storage_changes: Default::default() })) + } +} + +struct TestContext { + client: Arc, + backend: Arc, + link: SassafrasLink, + block_import: SassafrasBlockImport, + verifier: SassafrasVerifier, + keystore: KeystorePtr, +} + +fn create_test_verifier( + client: Arc, + link: &SassafrasLink, + config: Epoch, +) -> SassafrasVerifier { + let slot_duration = config.slot_duration; + let create_inherent_data_providers = Box::new(move |_, _| async move { + let slot = InherentDataProvider::from_timestamp(Timestamp::current(), slot_duration); + Ok((slot,)) + }); + + let (_, longest_chain) = TestClientBuilder::with_default_backend().build_with_longest_chain(); + + SassafrasVerifier::new( + client.clone(), + longest_chain, + create_inherent_data_providers, + link.epoch_changes.clone(), + config, + None, + ) +} + +fn create_test_block_import( + client: Arc, + config: Epoch, +) -> (SassafrasBlockImport, SassafrasLink) { + crate::block_import(config, client.clone(), client.clone()) + .expect("can initialize block-import") +} + +fn create_test_keystore(authority: Keyring) -> KeystorePtr { + let keystore = MemoryKeystore::new(); + keystore + .bandersnatch_generate_new(SASSAFRAS, Some(&authority.to_seed())) + .unwrap(); + keystore.into() +} + +fn create_test_epoch() -> Epoch { + sp_consensus_sassafras::Epoch { + epoch_idx: 0, + start_slot: 0.into(), + slot_duration: SlotDuration::from_millis(SLOT_DURATION), + epoch_duration: EPOCH_DURATION, + authorities: vec![ + Keyring::Alice.public().into(), + Keyring::Bob.public().into(), + Keyring::Charlie.public().into(), + ], + randomness: [0; 32], + config: EpochConfiguration { redundancy_factor: 1, attempts_number: 32 }, + } + .into() +} + +impl TestContext { + fn new() -> Self { + let (client, backend) = TestClientBuilder::with_default_backend().build_with_backend(); + let client = Arc::new(client); + + // Note: configuration is loaded using the `TestClient` instance as the runtime-api + // provider. In practice this will use the values defined within the test runtime + // defined in the `substrate_test_runtime` crate. + let config = crate::finalized_configuration(&*client).expect("config available"); + + let (block_import, link) = create_test_block_import(client.clone(), config.clone()); + + // Create a keystore with default testing key + let keystore = create_test_keystore(Keyring::Alice); + + let verifier = create_test_verifier(client.clone(), &link, config.clone()); + + Self { client, backend, link, block_import, verifier, keystore } + } + + // This is a bit hacky solution to use `TestContext` as an `Environment` implementation + fn new_with_pre_built_components( + client: Arc, + backend: Arc, + link: SassafrasLink, + block_import: SassafrasBlockImport, + keystore: KeystorePtr, + ) -> Self { + let verifier = create_test_verifier(client.clone(), &link, link.genesis_config.clone()); + Self { client, backend, link, block_import, verifier, keystore } + } + + fn import_block(&mut self, mut params: TestBlockImportParams) -> Result { + let post_hash = params.post_hash(); + + if params.post_digests.is_empty() { + // Assume that the seal has not been removed yet. Remove it here... + // NOTE: digest may be empty because of some test intentionally clearing up + // the whole digest logs. + if let Some(seal) = params.header.digest_mut().pop() { + params.post_digests.push(seal); + } + } + + block_on(self.block_import.import_block(params)).map(|ir| match ir { + ImportResult::Imported(_) => post_hash, + _ => panic!("Unexpected outcome"), + }) + } + + fn verify_block(&mut self, params: TestBlockImportParams) -> TestBlockImportParams { + block_on(self.verifier.verify(params)).unwrap() + } + + fn epoch_data(&self, parent_hash: &Hash, parent_number: u64, slot: Slot) -> Epoch { + self.link + .epoch_changes + .shared_data() + .epoch_data_for_child_of( + descendent_query(&*self.client), + &parent_hash, + parent_number, + slot, + |slot| Epoch::genesis(&self.link.genesis_config, slot), + ) + .unwrap() + .unwrap() + } + + fn epoch_descriptor( + &self, + parent_hash: &Hash, + parent_number: u64, + slot: Slot, + ) -> TestViableEpochDescriptor { + self.link + .epoch_changes + .shared_data() + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent_hash, + parent_number, + slot, + ) + .unwrap() + .unwrap() + } + + // Propose a block + fn propose_block(&mut self, parent_hash: Hash, slot: Option) -> TestBlockImportParams { + let parent_header = self.client.header(parent_hash).unwrap().unwrap(); + let parent_number = *parent_header.number(); + + let public = self.keystore.bandersnatch_public_keys(SASSAFRAS)[0]; + + let proposer = block_on(self.init(&parent_header)).unwrap(); + + let slot = slot.unwrap_or_else(|| { + let parent_claim = find_slot_claim::(&parent_header).unwrap(); + parent_claim.slot + 1 + }); + + // TODO DAVXY: here maybe we can use the epoch.randomness??? + let epoch = self.epoch_data(&parent_hash, parent_number, slot); + let sign_data = + vrf::slot_claim_sign_data(&self.link.genesis_config.randomness, slot, epoch.epoch_idx); + let vrf_signature = self + .keystore + .bandersnatch_vrf_sign(SASSAFRAS, &public, &sign_data) + .unwrap() + .unwrap(); + + let claim = SlotClaim { slot, authority_idx: 0, vrf_signature, ticket_claim: None }; + let digest = sp_runtime::generic::Digest { logs: vec![DigestItem::from(&claim)] }; + + let mut block = proposer.propose_block(digest); + + let epoch_descriptor = self.epoch_descriptor(&parent_hash, parent_number, slot); + + // Sign the pre-sealed hash of the block and then add it to the digest. + let hash = block.header.hash(); + let signature: AuthoritySignature = self + .keystore + .bandersnatch_sign(SASSAFRAS, &public, hash.as_ref()) + .unwrap() + .unwrap() + .into(); + let seal = DigestItem::from(&signature); + block.header.digest_mut().push(seal); + + let mut params = BlockImportParams::new(BlockOrigin::Own, block.header); + params.fork_choice = Some(ForkChoiceStrategy::LongestChain); + params.body = Some(block.extrinsics); + params.insert_intermediate(INTERMEDIATE_KEY, SassafrasIntermediate { epoch_descriptor }); + + params + } + + // Propose and import a new block on top of the given parent. + // This skips verification. + fn propose_and_import_block(&mut self, parent_hash: Hash, slot: Option) -> Hash { + let params = self.propose_block(parent_hash, slot); + self.import_block(params).unwrap() + } + + // Propose and import n valid blocks that are built on top of the given parent. + // The proposer takes care of producing epoch change digests according to the epoch + // duration (which is set by the test runtime). + fn propose_and_import_blocks(&mut self, mut parent_hash: Hash, n: usize) -> Vec { + let mut hashes = Vec::with_capacity(n); + for _ in 0..n { + let hash = self.propose_and_import_block(parent_hash, None); + hashes.push(hash); + parent_hash = hash; + } + hashes + } +} + +// Check that protocol config returned by the runtime interface is equal to the expected one +#[test] +fn tests_assumptions_sanity_check() { + let env = TestContext::new(); + assert_eq!(env.link.genesis_config, create_test_epoch()); + // Protocol needs at least two VRF ios + assert!(sp_core::bandersnatch::vrf::MAX_VRF_IOS >= 2); +} + +#[test] +fn claim_secondary_slots_works() { + let mut epoch = create_test_epoch(); + epoch.epoch_idx = 1; + epoch.start_slot = 6.into(); + epoch.randomness = [2; 32]; + + let authorities = [Keyring::Alice, Keyring::Bob, Keyring::Charlie]; + + let mut assignments = vec![usize::MAX; epoch.epoch_duration as usize]; + + for (auth_idx, auth_id) in authorities.iter().enumerate() { + let keystore = create_test_keystore(*auth_id); + + for slot in 0..epoch.epoch_duration { + if let Some((claim, auth_id2)) = + authorship::claim_slot(slot.into(), &mut epoch, None, &keystore) + { + assert_eq!(claim.authority_idx as usize, auth_idx); + assert_eq!(claim.slot, Slot::from(slot)); + assert_eq!(claim.ticket_claim, None); + assert_eq!(auth_id.public(), auth_id2.into()); + + // Check that this slot has not been assigned before + assert_eq!(assignments[slot as usize], usize::MAX); + assignments[slot as usize] = auth_idx; + } + } + } + // Check that every slot has been assigned + assert!(assignments.iter().all(|v| *v != usize::MAX)); + println!("secondary slots assignments: {:?}", assignments); +} + +#[test] +fn claim_primary_slots_works() { + // Here the test is deterministic. + // If a node has in its epoch `tickets_aux` the information corresponding to the + // ticket that is presented. Then the claim ticket should just return the + // ticket auxiliary information. + let mut epoch = create_test_epoch(); + epoch.randomness = [2; 32]; + epoch.epoch_idx = 1; + epoch.start_slot = 6.into(); + + let keystore = create_test_keystore(Keyring::Alice); + let alice_authority_idx = 0_u32; + + let ticket_id = 123; + let erased_public = EphemeralPublic::unchecked_from([0; 32]); + let revealed_public = erased_public.clone(); + let ticket_body = TicketBody { attempt_idx: 0, erased_public, revealed_public }; + let ticket_secret = TicketSecret { attempt_idx: 0, seed: [0; 32] }; + + // Fail if we have authority key in our keystore but not ticket aux data + // ticket-aux = None && authority-key = Some => claim = None + + let claim = authorship::claim_slot( + 0.into(), + &mut epoch, + Some((ticket_id, ticket_body.clone())), + &keystore, + ); + + assert!(claim.is_none()); + assert!(epoch.tickets_aux.is_empty()); + + // Success if we have ticket aux data and the authority key in our keystore + // ticket-aux = Some && authority-key = Some => claim = Some + + epoch + .tickets_aux + .insert(ticket_id, (alice_authority_idx, ticket_secret.clone())); + + let (claim, auth_id) = authorship::claim_slot( + 0.into(), + &mut epoch, + Some((ticket_id, ticket_body.clone())), + &keystore, + ) + .unwrap(); + + assert!(epoch.tickets_aux.is_empty()); + assert_eq!(claim.authority_idx, alice_authority_idx); + assert_eq!(auth_id, Keyring::Alice.public().into()); + + // Fail if we have ticket aux data but not the authority key in out keystore + // ticket-aux = Some && authority-key = None => claim = None + + epoch.tickets_aux.insert(ticket_id, (alice_authority_idx + 1, ticket_secret)); + + let claim = + authorship::claim_slot(0.into(), &mut epoch, Some((ticket_id, ticket_body)), &keystore); + assert!(claim.is_none()); + assert!(epoch.tickets_aux.is_empty()); +} + +#[test] +fn import_rejects_block_without_slot_claim() { + let mut env = TestContext::new(); + + let mut import_params = env.propose_block(env.client.info().genesis_hash, Some(999.into())); + // Remove logs from the header + import_params.header.digest_mut().logs.clear(); + + let res = env.import_block(import_params); + + assert_eq!(res.unwrap_err().to_string(), "Import failed: No slot-claim digest found"); +} + +#[test] +fn import_rejects_block_with_unexpected_epoch_changes() { + let mut env = TestContext::new(); + + let hash1 = env.propose_and_import_block(env.client.info().genesis_hash, None); + + let mut import_params = env.propose_block(hash1, None); + // Insert an epoch change announcement when it is not required. + let digest_data = ConsensusLog::NextEpochData(NextEpochDescriptor { + authorities: env.link.genesis_config.authorities.clone(), + randomness: env.link.genesis_config.randomness, + config: None, + }) + .encode(); + let digest_item = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, digest_data); + let digest = import_params.header.digest_mut(); + digest.logs.insert(digest.logs.len() - 1, digest_item); + + let res = env.import_block(import_params); + + assert_eq!(res.unwrap_err().to_string(), "Import failed: Unexpected epoch change"); +} + +#[test] +fn import_rejects_block_with_missing_epoch_changes() { + let mut env = TestContext::new(); + + let blocks = + env.propose_and_import_blocks(env.client.info().genesis_hash, EPOCH_DURATION as usize); + + let mut import_params = env.propose_block(blocks[EPOCH_DURATION as usize - 1], None); + + let digest = import_params.header.digest_mut(); + // Remove the epoch change announcement. + // (Implementation detail: should be the second to last entry, just before the seal) + digest.logs.remove(digest.logs.len() - 2); + + let res = env.import_block(import_params); + + assert!(res + .unwrap_err() + .to_string() + .contains("Import failed: Expected epoch change to happen")); +} + +#[test] +fn importing_block_one_sets_genesis_epoch() { + let mut env = TestContext::new(); + + let block_hash = env.propose_and_import_block(env.client.info().genesis_hash, Some(999.into())); + + let epoch_for_second_block = env.epoch_data(&block_hash, 1, 1000.into()); + let genesis_epoch = Epoch::genesis(&env.link.genesis_config, 999.into()); + assert_eq!(epoch_for_second_block, genesis_epoch); +} + +#[test] +fn allows_to_skip_epochs() { + // Test scenario. + // Epoch lenght: 6 slots + // + // Block# : [ 1 2 3 4 5 6 ][ 7 - - - - - ][ - - - - - - ][ 8 ... ] + // Slot# : [ 1 2 3 4 5 6 ][ 7 8 9 10 11 12 ][ 13 14 15 16 17 18 ][ 19 ... ] + // Epoch# : [ 0 ][ 1 ][ skipped ][ 3 ] + // + // As a recovery strategy, a fallback epoch 3 is created by reusing part of the + // configuration created for epoch 2. + let mut env = TestContext::new(); + + let blocks = env.propose_and_import_blocks(env.client.info().genesis_hash, 7); + + // First block after the a skipped epoch (block #8 @ slot #19) + let block = env.propose_and_import_block(*blocks.last().unwrap(), Some(19.into())); + + let epoch_changes = env.link.epoch_changes.shared_data(); + let epochs: Vec<_> = epoch_changes.tree().iter().collect(); + assert_eq!(epochs.len(), 3); + assert_eq!(*epochs[0].0, blocks[0]); + assert_eq!(*epochs[0].1, 1); + assert_eq!(*epochs[1].0, blocks[6]); + assert_eq!(*epochs[1].1, 7); + assert_eq!(*epochs[2].0, block); + assert_eq!(*epochs[2].1, 8); + + // Fist block in E0 (B1)) announces E0 (this is special) + let data = epoch_changes + .epoch(&EpochIdentifier { + position: EpochIdentifierPosition::Genesis0, + hash: blocks[0], + number: 1, + }) + .unwrap(); + assert_eq!(data.epoch_idx, 0); + assert_eq!(data.start_slot, Slot::from(1)); + + // First block in E0 (B1) also announces E1 + let data = epoch_changes + .epoch(&EpochIdentifier { + position: EpochIdentifierPosition::Genesis1, + hash: blocks[0], + number: 1, + }) + .unwrap(); + assert_eq!(data.epoch_idx, 1); + assert_eq!(data.start_slot, Slot::from(7)); + + // First block in E1 (B7) announces E2 + // NOTE: config is used by E3 without altering epoch node values. + // This will break as soon as our assumptions about how fork-tree traversal works + // are not met anymore (this is a good thing) + let data = epoch_changes + .epoch(&EpochIdentifier { + position: EpochIdentifierPosition::Regular, + hash: blocks[6], + number: 7, + }) + .unwrap(); + assert_eq!(data.epoch_idx, 2); + assert_eq!(data.start_slot, Slot::from(13)); + + // First block in E3 (B8) announced E4. + let data = epoch_changes + .epoch(&EpochIdentifier { + position: EpochIdentifierPosition::Regular, + hash: block, + number: 8, + }) + .unwrap(); + assert_eq!(data.epoch_idx, 4); + assert_eq!(data.start_slot, Slot::from(25)); +} + +#[test] +fn finalization_prunes_epoch_changes_and_removes_weights() { + let mut env = TestContext::new(); + + let canon = env.propose_and_import_blocks(env.client.info().genesis_hash, 21); + + let _fork1 = env.propose_and_import_blocks(canon[0], 10); + let _fork2 = env.propose_and_import_blocks(canon[7], 10); + let _fork3 = env.propose_and_import_blocks(canon[11], 8); + + let epoch_changes = env.link.epoch_changes.clone(); + + // We should be tracking a total of 9 epochs in the fork tree + assert_eq!(epoch_changes.shared_data().tree().iter().count(), 8); + // And only one root + assert_eq!(epoch_changes.shared_data().tree().roots().count(), 1); + + // Pre-finalize scenario. + // + // X(#y): a block (number y) announcing the next epoch data. + // Information for epoch starting at block #19 is produced on three different forks + // at block #13. + // + // Finalize block #14 + // + // *---------------- F(#13) --#18 < fork #2 + // / + // A(#1) ---- B(#7) ----#8----------#12---- C(#13) ---- D(#19) ------#21 < canon + // \ \ + // \ *---- G(#13) ---- H(#19) ---#20 < fork #3 + // \ + // *-----E(#7)---#11 < fork #1 + + // Finalize block #10 so that on next epoch change the tree is pruned + env.client.finalize_block(canon[13], None, true).unwrap(); + let canon_tail = env.propose_and_import_blocks(*canon.last().unwrap(), 4); + + // Post-finalize scenario. + // + // B(#7)------ C(#13) ---- D(#19) ------Z(#25) + + let epoch_changes = epoch_changes.shared_data(); + let epoch_changes: Vec<_> = epoch_changes.tree().iter().map(|(h, _, _)| *h).collect(); + + assert_eq!(epoch_changes, vec![canon[6], canon[12], canon[18], canon_tail[3]]); + + // TODO-SASS-P3 + //todo!("Requires aux_storage_cleanup"); +} + +#[test] +fn revert_prunes_epoch_changes_and_removes_weights() { + let mut env = TestContext::new(); + + let canon = env.propose_and_import_blocks(env.client.info().genesis_hash, 21); + let fork1 = env.propose_and_import_blocks(canon[0], 10); + let fork2 = env.propose_and_import_blocks(canon[7], 10); + let fork3 = env.propose_and_import_blocks(canon[11], 8); + + let epoch_changes = env.link.epoch_changes.clone(); + + // We should be tracking a total of 9 epochs in the fork tree + assert_eq!(epoch_changes.shared_data().tree().iter().count(), 8); + // And only one root + assert_eq!(epoch_changes.shared_data().tree().roots().count(), 1); + + // Pre-revert scenario. + // + // X(#y): a block (number y) announcing the next epoch data. + // Information for epoch starting at block #19 is produced on three different forks + // at block #13. + // One branch starts before the revert point (epoch data should be maintained). + // One branch starts after the revert point (epoch data should be removed). + // + // *----------------- F(#13) --#18 < fork #2 + // / + // A(#1) ---- B(#7) ----#8----+-----#12----- C(#13) ---- D(#19) ------#21 < canon + // \ ^ \ + // \ revert *---- G(#13) ---- H(#19) ---#20 < fork #3 + // \ to #10 + // *-----E(#7)---#11 < fork #1 + + // Revert canon chain to block #10 (best(21) - 11) + crate::revert(env.backend.clone(), 11).unwrap(); + + // Post-revert expected scenario. + // + // + // *----------------- F(#13) --#18 + // / + // A(#1) ---- B(#7) ----#8----#10 + // \ + // *------ E(#7)---#11 + + // Load and check epoch changes. + + let actual_nodes = aux_schema::load_epoch_changes::(&*env.client) + .unwrap() + .shared_data() + .tree() + .iter() + .map(|(h, _, _)| *h) + .collect::>(); + + let expected_nodes = vec![ + canon[0], // A + canon[6], // B + fork2[4], // F + fork1[5], // E + ]; + + assert_eq!(actual_nodes, expected_nodes); + + let weight_data_check = |hashes: &[Hash], expected: bool| { + hashes.iter().all(|hash| { + aux_schema::load_block_weight(&*env.client, hash).unwrap().is_some() == expected + }) + }; + assert!(weight_data_check(&canon[..10], true)); + assert!(weight_data_check(&canon[10..], false)); + assert!(weight_data_check(&fork1, true)); + assert!(weight_data_check(&fork2, true)); + assert!(weight_data_check(&fork3, false)); +} + +#[test] +fn revert_stops_at_last_finalized() { + let mut env = TestContext::new(); + + let canon = env.propose_and_import_blocks(env.client.info().genesis_hash, 3); + + // Finalize best block + env.client.finalize_block(canon[2], None, false).unwrap(); + + // Reverts canon chain down to last finalized block + crate::revert(env.backend.clone(), 100).expect("revert should work for baked test scenario"); + + let weight_data_check = |hashes: &[Hash], expected: bool| { + hashes.iter().all(|hash| { + aux_schema::load_block_weight(&*env.client, hash).unwrap().is_some() == expected + }) + }; + assert!(weight_data_check(&canon, true)); +} + +#[test] +fn verify_block_claimed_via_secondary_method() { + let mut env = TestContext::new(); + + let blocks = env.propose_and_import_blocks(env.client.info().genesis_hash, 7); + + let in_params = env.propose_block(blocks[6], Some(9.into())); + + let _out_params = env.verify_block(in_params); +} + +// //================================================================================================= +// // More complex tests involving communication between multiple nodes. +// // +// // These tests are performed via a specially crafted test network. +// // Closer to integration test than unit tests... +// //================================================================================================= + +impl Environment for TestContext { + type CreateProposer = future::Ready>; + type Proposer = TestProposer; + type Error = TestError; + + fn init(&mut self, parent_header: &TestHeader) -> Self::CreateProposer { + future::ready(Ok(TestProposer { + client: self.client.clone(), + parent_hash: parent_header.hash(), + })) + } +} + +struct PeerData { + link: SassafrasLink, + block_import: SassafrasBlockImport, +} + +type SassafrasPeer = Peer, SassafrasBlockImport>; + +#[derive(Default)] +struct SassafrasTestNet { + peers: Vec, +} + +impl TestNetFactory for SassafrasTestNet { + type BlockImport = SassafrasBlockImport; + type Verifier = SassafrasVerifier; + type PeerData = Option; + + fn make_block_import( + &self, + client: PeersClient, + ) -> ( + BlockImportAdapter, + Option>, + Option, + ) { + let client = client.as_client(); + + let config = crate::finalized_configuration(&*client).expect("config available"); + let (block_import, link) = create_test_block_import(client.clone(), config); + + (BlockImportAdapter::new(block_import.clone()), None, Some(PeerData { link, block_import })) + } + + fn make_verifier(&self, client: PeersClient, maybe_link: &Option) -> Self::Verifier { + let client = client.as_client(); + + let data = maybe_link.as_ref().expect("data provided to verifier instantiation"); + + let config = crate::finalized_configuration(&*client).expect("config available"); + create_test_verifier(client.clone(), &data.link, config) + } + + fn peer(&mut self, i: usize) -> &mut SassafrasPeer { + &mut self.peers[i] + } + + fn peers(&self) -> &Vec { + &self.peers + } + + fn peers_mut(&mut self) -> &mut Vec { + &mut self.peers + } + + fn mut_peers)>(&mut self, closure: F) { + closure(&mut self.peers); + } +} + +// Multiple nodes authoring and validating blocks +#[tokio::test] +async fn sassafras_network_progress() { + env_logger::init(); + let net = SassafrasTestNet::new(3); + let net = Arc::new(Mutex::new(net)); + + let peers = [Keyring::Alice, Keyring::Bob, Keyring::Charlie]; + + let mut import_notifications = Vec::new(); + let mut sassafras_workers = Vec::new(); + + for (peer_id, auth_id) in peers.iter().enumerate() { + let mut net = net.lock(); + let peer = net.peer(peer_id); + let client = peer.client().as_client(); + let backend = peer.client().as_backend(); + let select_chain = peer.select_chain().expect("Full client has select_chain"); + let keystore = create_test_keystore(*auth_id); + let data = peer.data.as_ref().expect("sassafras link set up during initialization"); + + let env = TestContext::new_with_pre_built_components( + client.clone(), + backend.clone(), + data.link.clone(), + data.block_import.clone(), + keystore.clone(), + ); + + // Run the imported block number is less than five and we don't receive a block produced + // by us and one produced by another peer. + let mut got_own = false; + let mut got_other = false; + let import_futures = client + .import_notification_stream() + .take_while(move |n| { + future::ready( + n.header.number() < &5 || { + if n.origin == BlockOrigin::Own { + got_own = true; + } else { + got_other = true; + } + !(got_own && got_other) + }, + ) + }) + .for_each(|_| future::ready(())); + import_notifications.push(import_futures); + + //let slot_duration = data.link.genesis_config.slot_duration(); + let client_clone = client.clone(); + let create_inherent_data_providers = Box::new(move |parent, _| { + // Get the slot of the parent header and just increase this slot. + // + // Below we will running everything in one big future. If we would use + // time based slot, it can happen that on babe instance imports a block from + // another babe instance and then tries to build a block in the same slot making + // this test fail. + let parent_header = client_clone.header(parent).ok().flatten().unwrap(); + let slot = Slot::from(find_slot_claim::(&parent_header).unwrap().slot + 1); + async move { Ok((InherentDataProvider::new(slot),)) } + }); + let sassafras_params = SassafrasWorkerParams { + client: client.clone(), + keystore, + select_chain, + env, + block_import: data.block_import.clone(), + sassafras_link: data.link.clone(), + sync_oracle: DummyOracle, + justification_sync_link: (), + force_authoring: false, + create_inherent_data_providers, + offchain_tx_pool_factory: OffchainTransactionPoolFactory::new( + RejectAllTxPool::default(), + ), + }; + let sassafras_worker = start_sassafras(sassafras_params).unwrap(); + sassafras_workers.push(sassafras_worker); + } + + future::select( + futures::future::poll_fn(move |cx| { + let mut net = net.lock(); + net.poll(cx); + net.peers().iter().for_each(|peer| { + peer.failed_verifications().iter().next().map(|(h, e)| { + panic!("Verification failed for {:?}: {}", h, e); + }); + }); + Poll::<()>::Pending + }), + future::select(future::join_all(import_notifications), future::join_all(sassafras_workers)), + ) + .await; +} diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs new file mode 100644 index 0000000000000..08679331a70e1 --- /dev/null +++ b/client/consensus/sassafras/src/verification.rs @@ -0,0 +1,467 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Types and functions related to block verification. + +use super::*; +use crate::inherents::SassafrasInherentData; +use sp_core::{ + crypto::{VrfPublic, Wraps}, + ed25519::Pair as EphemeralPair, +}; + +// Allowed slot drift. +const MAX_SLOT_DRIFT: u64 = 1; + +/// Verified information +struct VerifiedHeaderInfo { + /// Authority index. + authority_id: AuthorityId, + /// Seal digest found within the header. + seal_digest: DigestItem, +} + +/// Check a header has been signed by the right key. If the slot is too far in +/// the future, an error will be returned. If successful, returns the pre-header +/// and the digest item containing the seal. +/// +/// The seal must be the last digest. Otherwise, the whole header is considered +/// unsigned. This is required for security and must not be changed. +/// +/// The given header can either be from a primary or secondary slot assignment, +/// with each having different validation logic. +fn check_header( + mut header: B::Header, + claim: &SlotClaim, + slot_now: Slot, + epoch: &Epoch, + origin: BlockOrigin, + maybe_ticket: Option<(TicketId, TicketBody)>, +) -> Result, Error> { + // Check that the slot is not in the future, with some drift being allowed. + if claim.slot > slot_now + MAX_SLOT_DRIFT { + // header.digest_mut().push(seal); + return Ok(CheckedHeader::Deferred(header, claim.slot)) + } + + let Some(authority_id) = epoch.authorities.get(claim.authority_idx as usize) else { + return Err(sassafras_err(Error::SlotAuthorNotFound)) + }; + + // Check header signature (aka the Seal) + + let seal_digest = header + .digest_mut() + .pop() + .ok_or_else(|| sassafras_err(Error::HeaderUnsealed(header.hash())))?; + + let signature = AuthoritySignature::try_from(&seal_digest) + .map_err(|_| sassafras_err(Error::HeaderBadSeal(header.hash())))?; + + let pre_hash = header.hash(); + if !AuthorityPair::verify(&signature, &pre_hash, authority_id) { + return Err(sassafras_err(Error::BadSignature(pre_hash))) + } + + // Optionally check ticket ownership + + let mut sign_data = vrf::slot_claim_sign_data(&epoch.randomness, claim.slot, epoch.epoch_idx); + + match (&maybe_ticket, &claim.ticket_claim) { + (Some((_ticket_id, ticket_body)), ticket_claim) => { + debug!(target: LOG_TARGET, "checking primary"); + + sign_data.push_transcript_data(&ticket_body.encode()); + + // Revealed key check + let revealed_input = vrf::revealed_key_input( + &epoch.randomness, + ticket_body.attempt_idx, + epoch.epoch_idx, + ); + let revealed_output = claim + .vrf_signature + .outputs + .get(1) + .ok_or_else(|| sassafras_err(Error::MissingSignedVrfOutput))?; + let revealed_seed = vrf::make_revealed_key_seed(&revealed_input, &revealed_output); + let revealed_public = EphemeralPair::from_seed(&revealed_seed).public(); + if revealed_public != ticket_body.revealed_public { + return Err(sassafras_err(Error::RevealPublicMismatch)) + } + sign_data.push_vrf_input(revealed_input).expect("Can't fail; qed"); + + if let Some(ticket_claim) = ticket_claim { + // Optional check, increases some score... + let challenge = sign_data.challenge::<32>(); + if !EphemeralPair::verify( + &ticket_claim.erased_signature, + &challenge, + &ticket_body.erased_public, + ) { + return Err(sassafras_err(Error::BadSignature(pre_hash))) + } + } + }, + (None, None) => { + debug!(target: LOG_TARGET, "checking secondary"); + let idx = authorship::secondary_authority_index(claim.slot, epoch); + if idx != claim.authority_idx { + error!(target: LOG_TARGET, "Bad secondary authority index"); + return Err(Error::SlotAuthorNotFound) + } + }, + (None, Some(_)) => + if origin != BlockOrigin::NetworkInitialSync { + warn!(target: LOG_TARGET, "Unexpected primary authoring mechanism"); + return Err(Error::UnexpectedAuthoringMechanism) + }, + } + + // Check per-slot vrf proof + if !authority_id.as_inner_ref().vrf_verify(&sign_data, &claim.vrf_signature) { + warn!(target: LOG_TARGET, ">>> VERIFICATION FAILED (pri = {})!!!", maybe_ticket.is_some()); + return Err(sassafras_err(Error::VrfVerificationFailed)) + } + warn!(target: LOG_TARGET, ">>> VERIFICATION OK (pri = {})!!!", maybe_ticket.is_some()); + + let info = VerifiedHeaderInfo { authority_id: authority_id.clone(), seal_digest }; + + Ok(CheckedHeader::Checked(header, info)) +} + +/// A verifier for Sassafras blocks. +pub struct SassafrasVerifier { + client: Arc, + select_chain: SelectChain, + create_inherent_data_providers: CIDP, + epoch_changes: SharedEpochChanges, + genesis_config: Epoch, + telemetry: Option, +} + +impl SassafrasVerifier { + /// Constructor. + pub fn new( + client: Arc, + select_chain: SelectChain, + create_inherent_data_providers: CIDP, + epoch_changes: SharedEpochChanges, + genesis_config: Epoch, + telemetry: Option, + ) -> Self { + SassafrasVerifier { + client, + select_chain, + create_inherent_data_providers, + epoch_changes, + genesis_config, + telemetry, + } + } +} + +impl SassafrasVerifier +where + Block: BlockT, + Client: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, + Client::Api: BlockBuilderApi + SassafrasApi, + SelectChain: sp_consensus::SelectChain, + CIDP: CreateInherentDataProviders, +{ + async fn check_inherents( + &self, + block: Block, + at_hash: Block::Hash, + inherent_data: InherentData, + create_inherent_data_providers: CIDP::InherentDataProviders, + ) -> Result<(), Error> { + let inherent_res = self + .client + .runtime_api() + .check_inherents(at_hash, block, inherent_data) + .map_err(Error::RuntimeApi)?; + + if !inherent_res.ok() { + for (i, e) in inherent_res.into_errors() { + match create_inherent_data_providers.try_handle_error(&i, &e).await { + Some(res) => res.map_err(|e| Error::CheckInherents(e))?, + None => return Err(Error::CheckInherentsUnhandled(i)), + } + } + } + + Ok(()) + } + + async fn check_and_report_equivocation( + &self, + slot_now: Slot, + slot: Slot, + header: &Block::Header, + author: &AuthorityId, + origin: &BlockOrigin, + ) -> Result<(), Error> { + // Don't report any equivocations during initial sync as they are most likely stale. + if *origin == BlockOrigin::NetworkInitialSync { + return Ok(()) + } + + // Check if authorship of this header is an equivocation and return a proof if so. + let equivocation_proof = match sc_consensus_slots::check_equivocation( + &*self.client, + slot_now, + slot, + header, + author, + ) + .map_err(Error::Client)? + { + Some(proof) => proof, + None => return Ok(()), + }; + + info!( + target: LOG_TARGET, + "🌳 Slot author {:?} is equivocating at slot {} with headers {:?} and {:?}", + author, + slot, + equivocation_proof.first_header.hash(), + equivocation_proof.second_header.hash(), + ); + + // Get the best block on which we will build and send the equivocation report. + let best_hash = self + .select_chain + .best_chain() + .await + .map(|h| h.hash()) + .map_err(|e| Error::Client(e.into()))?; + + // Generate a key ownership proof. We start by trying to generate the key owernship proof + // at the parent of the equivocating header, this will make sure that proof generation is + // successful since it happens during the on-going session (i.e. session keys are available + // in the state to be able to generate the proof). This might fail if the equivocation + // happens on the first block of the session, in which case its parent would be on the + // previous session. If generation on the parent header fails we try with best block as + // well. + let generate_key_owner_proof = |at_hash: Block::Hash| { + self.client + .runtime_api() + .generate_key_ownership_proof(at_hash, slot, equivocation_proof.offender.clone()) + .map_err(Error::RuntimeApi) + }; + + let parent_hash = *header.parent_hash(); + let key_owner_proof = match generate_key_owner_proof(parent_hash)? { + Some(proof) => proof, + None => match generate_key_owner_proof(best_hash)? { + Some(proof) => proof, + None => { + debug!(target: "babe", "Equivocation offender is not part of the authority set."); + return Ok(()) + }, + }, + }; + + // submit equivocation report at best block. + self.client + .runtime_api() + .submit_report_equivocation_unsigned_extrinsic( + best_hash, + equivocation_proof, + key_owner_proof, + ) + .map_err(Error::RuntimeApi)?; + + info!(target: LOG_TARGET, "Submitted equivocation report for author {:?}", author); + + Ok(()) + } +} + +#[async_trait::async_trait] +impl Verifier + for SassafrasVerifier +where + Block: BlockT, + Client: HeaderMetadata + + HeaderBackend + + ProvideRuntimeApi + + Send + + Sync + + AuxStore, + Client::Api: BlockBuilderApi + SassafrasApi, + SelectChain: sp_consensus::SelectChain, + CIDP: CreateInherentDataProviders + Send + Sync, + CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, +{ + async fn verify( + &mut self, + mut block: BlockImportParams, + ) -> Result, String> { + trace!( + target: LOG_TARGET, + "🌳 Verifying origin: {:?} header: {:?} justification(s): {:?} body: {:?}", + block.origin, + block.header, + block.justifications, + block.body, + ); + + if block.with_state() { + // When importing whole state we don't calculate epoch descriptor, but rather + // read it from the state after import. We also skip all verifications + // because there's no parent state and we trust the sync module to verify + // that the state is correct and finalized. + // Just insert a tag to notify that this is indeed a Sassafras block to the + // `BlockImport` implementation. + block.insert_intermediate(INTERMEDIATE_KEY, ()); + return Ok(block) + } + + let hash = block.header.hash(); + let parent_hash = *block.header.parent_hash(); + + let create_inherent_data_providers = self + .create_inherent_data_providers + .create_inherent_data_providers(parent_hash, ()) + .await + .map_err(|e| Error::::Client(sp_consensus::Error::from(e).into()))?; + + let slot_now = create_inherent_data_providers.slot(); + + let parent_header_metadata = self + .client + .header_metadata(parent_hash) + .map_err(Error::::FetchParentHeader)?; + + let claim = find_slot_claim::(&block.header)?; + + let (checked_header, epoch_descriptor) = { + let epoch_changes = self.epoch_changes.shared_data(); + let epoch_descriptor = epoch_changes + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent_hash, + parent_header_metadata.number, + claim.slot, + ) + .map_err(|e| Error::::ForkTree(Box::new(e)))? + .ok_or(Error::::FetchEpoch(parent_hash))?; + let viable_epoch = epoch_changes + .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.genesis_config, slot)) + .ok_or(Error::::FetchEpoch(parent_hash))?; + + let maybe_ticket = self + .client + .runtime_api() + .slot_ticket(parent_hash, claim.slot) + .ok() + .unwrap_or_else(|| None); + + let checked_header = check_header::( + block.header.clone(), + &claim, + slot_now, + viable_epoch.as_ref(), + block.origin, + maybe_ticket, + )?; + + (checked_header, epoch_descriptor) + }; + + match checked_header { + CheckedHeader::Checked(pre_header, verified_info) => { + // The header is valid but let's check if there was something else already + // proposed at the same slot by the given author. If there was, we will + // report the equivocation to the runtime. + if let Err(err) = self + .check_and_report_equivocation( + slot_now, + claim.slot, + &block.header, + &verified_info.authority_id, + &block.origin, + ) + .await + { + warn!( + target: LOG_TARGET, + "Error checking/reporting equivocation: {}", err + ); + } + + // If the body is passed through, we need to use the runtime to check that the + // internally-set timestamp in the inherents actually matches the slot set in the + // seal. + if let Some(inner_body) = block.body { + let new_block = Block::new(pre_header.clone(), inner_body); + + if !block.state_action.skip_execution_checks() { + // TODO-SASS-P3 : @davxy??? DOC + let mut inherent_data = create_inherent_data_providers + .create_inherent_data() + .await + .map_err(Error::::CreateInherents)?; + inherent_data.sassafras_replace_inherent_data(&claim.slot); + self.check_inherents( + new_block.clone(), + parent_hash, + inherent_data, + create_inherent_data_providers, + ) + .await?; + } + + let (_, inner_body) = new_block.deconstruct(); + block.body = Some(inner_body); + } + + trace!(target: LOG_TARGET, "Checked {:?}; importing.", pre_header); + telemetry!( + self.telemetry; + CONSENSUS_TRACE; + "sassafras.checked_and_importing"; + "pre_header" => ?pre_header, + ); + + block.header = pre_header; + block.post_hash = Some(hash); + block.post_digests.push(verified_info.seal_digest); + block.insert_intermediate( + INTERMEDIATE_KEY, + SassafrasIntermediate:: { epoch_descriptor }, + ); + + Ok(block) + }, + CheckedHeader::Deferred(a, b) => { + debug!(target: LOG_TARGET, "Checking {:?} failed; {:?}, {:?}.", hash, a, b); + telemetry!( + self.telemetry; + CONSENSUS_DEBUG; + "sassafras.header_too_far_in_future"; + "hash" => ?hash, "a" => ?a, "b" => ?b + ); + Err(Error::::TooFarInFuture(hash).into()) + }, + } + } +} diff --git a/client/keystore/src/local.rs b/client/keystore/src/local.rs index 97bc7c71a4a58..9fe69113aae22 100644 --- a/client/keystore/src/local.rs +++ b/client/keystore/src/local.rs @@ -23,6 +23,8 @@ use sp_application_crypto::{AppCrypto, AppPair, IsWrappedBy}; use sp_core::bandersnatch; #[cfg(feature = "bls-experimental")] use sp_core::{bls377, bls381}; +#[cfg(feature = "bls-experimental")] +use sp_core::{bls377, bls381}; use sp_core::{ crypto::{ByteArray, ExposeSecret, KeyTypeId, Pair as CorePair, SecretString, VrfSecret}, ecdsa, ed25519, sr25519, diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 6dcd8b8e4bace..9c81421f0516d 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -36,7 +36,7 @@ exit-future = "0.2.0" pin-project = "1.0.12" serde = "1.0.163" serde_json = "1.0.85" -sc-keystore = { version = "4.0.0-dev", path = "../keystore" } +sc-keystore = { version = "4.0.0-dev", path = "../keystore", features = ["bandersnatch-experimental"] } sp-runtime = { version = "24.0.0", path = "../../primitives/runtime" } sp-trie = { version = "22.0.0", path = "../../primitives/trie" } sp-externalities = { version = "0.19.0", path = "../../primitives/externalities" } diff --git a/frame/sassafras/Cargo.toml b/frame/sassafras/Cargo.toml new file mode 100644 index 0000000000000..ea25275ba5c91 --- /dev/null +++ b/frame/sassafras/Cargo.toml @@ -0,0 +1,52 @@ +[package] +name = "pallet-sassafras" +version = "0.3.4-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "Consensus extension module for Sassafras consensus." +readme = "README.md" +publish = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +scale-codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +log = { version = "0.4.17", default-features = false } +pallet-session = { version = "4.0.0-dev", default-features = false, path = "../session" } +pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../timestamp" } +sp-consensus-sassafras = { version = "0.3.4-dev", default-features = false, path = "../../primitives/consensus/sassafras", features = ["serde"] } +sp-io = { version = "23.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } + +[dev-dependencies] +array-bytes = "6.1" +sp-core = { version = "21.0.0", path = "../../primitives/core" } +env_logger = "0.10" + +[features] +default = ["std"] +std = [ + "scale-codec/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "log/std", + "pallet-session/std", + "pallet-timestamp/std", + "scale-info/std", + "sp-consensus-sassafras/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", +] +runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/sassafras/README.md b/frame/sassafras/README.md new file mode 100644 index 0000000000000..b14c7045f6001 --- /dev/null +++ b/frame/sassafras/README.md @@ -0,0 +1,16 @@ +Sassafras +========= + +Consensus extension module for Sassafras consensus. + +TODO: protocol description + +### References + +* [w3f introduction](https://research.web3.foundation/en/latest/polkadot/block-production/SASSAFRAS.html): + a fairly friendly overview to the protocol building blocks; +* [research paper](https://github.com/w3f/research/tree/master/docs/papers/sass) + from Web3 foundation; +* [ring-vrg paper](https://github.com/w3f/ring-vrf/papers/ring_vrf) + from Web3 foundation; +* [zcash zk-snarks](https://arxiv.org/pdf/2008.00881.pdf); diff --git a/frame/sassafras/src/benchmarking.rs b/frame/sassafras/src/benchmarking.rs new file mode 100644 index 0000000000000..f4788b06b46fd --- /dev/null +++ b/frame/sassafras/src/benchmarking.rs @@ -0,0 +1,53 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for the Sassafras pallet. + +use super::*; + +use frame_benchmarking::benchmarks; +use frame_system::RawOrigin; +use sp_consensus_sassafras::VrfOutput; + +// Makes a dummy ticket envelope. +// The resulting ticket-id is not very important and is expected to be below the +// configured threshold (which is guaranteed because we are using mock::TEST_EPOCH_CONFIGURATION). +fn make_dummy_ticket(attempt_idx: u32) -> TicketEnvelope { + let mut output_enc: &[u8] = &[ + 0x0c, 0x1a, 0x83, 0x5e, 0x56, 0x9b, 0x18, 0xa0, 0xd9, 0x13, 0x39, 0x7e, 0xb9, 0x5a, 0x39, + 0x83, 0xf3, 0xc5, 0x73, 0xf6, 0xb1, 0x35, 0xa6, 0x48, 0xa3, 0x83, 0xac, 0x3b, 0xb8, 0x43, + 0xa7, 0x3d, + ]; + let output = VrfOutput::decode(&mut output_enc).unwrap(); + let data = TicketData { attempt_idx, erased_public: Default::default() }; + TicketEnvelope { data, vrf_preout: output, ring_proof: () } +} + +benchmarks! { + submit_tickets { + let x in 0 .. ::MaxTickets::get(); + + let tickets: BoundedVec::MaxTickets> = + (0..x).map(make_dummy_ticket).collect::>().try_into().unwrap(); + }: _(RawOrigin::None, tickets) + + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(1), + crate::mock::Test, + ) +} diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs new file mode 100644 index 0000000000000..e54fc52918446 --- /dev/null +++ b/frame/sassafras/src/lib.rs @@ -0,0 +1,1011 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Extension module for Sassafras consensus. +//! +//! Sassafras is a constant-time block production protocol that aims to ensure that +//! there is exactly one block produced with constant time intervals rather multiple +//! or none. +//! +//! We run a lottery to distribute block production slots in an epoch and to fix the +//! order validators produce blocks by the beginning of an epoch. +//! +//! Each validator signs the same VRF input and publish the output onchain. This +//! value is their lottery ticket that can be validated against their public key. +//! +//! We want to keep lottery winners secret, i.e. do not publish their public keys. +//! At the begin of the epoch all the validators tickets are published but not their +//! public keys. +//! +//! A valid tickets are validated when an honest validator reclaims it on block +//! production. +//! +//! To prevent submission of fake tickets, resulting in empty slots, the validator +//! when submitting the ticket accompanies it with a SNARK of the statement: "Here's +//! my VRF output that has been generated using the given VRF input and my secret +//! key. I'm not telling you my keys, but my public key is among those of the +//! nominated validators", that is validated before the lottery. +//! +//! To anonymously publish the ticket to the chain a validator sends their tickets +//! to a random validator who later puts it on-chain as a transaction. + +#![deny(warnings)] +#![warn(unused_must_use, unsafe_code, unused_variables, unused_imports, missing_docs)] +#![cfg_attr(not(feature = "std"), no_std)] + +use log::{debug, error, warn}; +use scale_codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; + +use frame_support::{traits::Get, weights::Weight, BoundedVec, WeakBoundedVec}; +use frame_system::{ + offchain::{SendTransactionTypes, SubmitTransaction}, + pallet_prelude::{BlockNumberFor, HeaderFor}, +}; +use sp_consensus_sassafras::{ + digests::{ConsensusLog, NextEpochDescriptor, SlotClaim}, + vrf, AuthorityId, Epoch, EpochConfiguration, EquivocationProof, Randomness, Slot, SlotDuration, + TicketBody, TicketEnvelope, TicketId, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, +}; +use sp_io::hashing; +use sp_runtime::{ + generic::DigestItem, + traits::{One, Saturating}, + BoundToRuntimeAppPublic, +}; +use sp_std::prelude::Vec; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +#[cfg(all(feature = "std", test))] +mod mock; +#[cfg(all(feature = "std", test))] +mod tests; + +// To manage epoch changes via session pallet instead of the built-in method +// method (`SameAuthoritiesForever`). +pub mod session; + +// Re-export pallet symbols. +pub use pallet::*; + +const LOG_TARGET: &str = "sassafras::runtime 🌳"; + +const RANDOMNESS_VRF_CONTEXT: &[u8] = b"SassafrasRandomness"; + +/// Tickets related metadata that is commonly used together. +#[derive(Debug, Default, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, Clone, Copy)] +pub struct TicketsMetadata { + /// Number of tickets available into the tickets buffers. + /// The array index is computed as epoch index modulo 2. + pub tickets_count: [u32; 2], + /// Number of outstanding tickets segments requiring to be sorted and stored + /// in one of the epochs tickets buffer + pub segments_count: u32, +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + /// The Sassafras pallet. + #[pallet::pallet] + pub struct Pallet(_); + + /// Configuration parameters. + #[pallet::config] + pub trait Config: frame_system::Config + SendTransactionTypes> { + /// The amount of time, in milliseconds, that each slot should last. + #[pallet::constant] + type SlotDuration: Get; + + /// The amount of time, in slots, that each epoch should last. + #[pallet::constant] + type EpochDuration: Get; + + /// Sassafras requires some logic to be triggered on every block to query for whether an + /// epoch has ended and to perform the transition to the next epoch. + /// + /// Typically, the `ExternalTrigger` type should be used. An internal trigger should only + /// be used when no other module is responsible for changing authority set. + type EpochChangeTrigger: EpochChangeTrigger; + + /// Max number of authorities allowed + #[pallet::constant] + type MaxAuthorities: Get; + + /// Max number of tickets that are considered for each epoch. + #[pallet::constant] + type MaxTickets: Get; + } + + /// Sassafras runtime errors. + #[pallet::error] + pub enum Error { + /// Submitted configuration is invalid. + InvalidConfiguration, + } + + /// Current epoch index. + #[pallet::storage] + #[pallet::getter(fn epoch_index)] + pub type EpochIndex = StorageValue<_, u64, ValueQuery>; + + /// Current epoch authorities. + #[pallet::storage] + #[pallet::getter(fn authorities)] + pub type Authorities = + StorageValue<_, WeakBoundedVec, ValueQuery>; + + /// Next epoch authorities. + #[pallet::storage] + #[pallet::getter(fn next_authorities)] + pub type NextAuthorities = + StorageValue<_, WeakBoundedVec, ValueQuery>; + + /// The slot at which the first epoch started. + /// This is `None` until the first block is imported on chain. + #[pallet::storage] + #[pallet::getter(fn genesis_slot)] + pub type GenesisSlot = StorageValue<_, Slot, ValueQuery>; + + /// Current slot number. + #[pallet::storage] + #[pallet::getter(fn current_slot)] + pub type CurrentSlot = StorageValue<_, Slot, ValueQuery>; + + /// Current epoch randomness. + #[pallet::storage] + #[pallet::getter(fn randomness)] + pub type CurrentRandomness = StorageValue<_, Randomness, ValueQuery>; + + /// Next epoch randomness. + #[pallet::storage] + #[pallet::getter(fn next_randomness)] + pub type NextRandomness = StorageValue<_, Randomness, ValueQuery>; + + /// Randomness accumulator. + #[pallet::storage] + pub type RandomnessAccumulator = StorageValue<_, Randomness, ValueQuery>; + + /// Temporary value (cleared at block finalization) which is `Some` + /// if per-block initialization has already been called for current block. + #[pallet::storage] + #[pallet::getter(fn initialized)] + pub type Initialized = StorageValue<_, SlotClaim>; + + /// The configuration for the current epoch. + #[pallet::storage] + #[pallet::getter(fn config)] + pub type EpochConfig = StorageValue<_, EpochConfiguration, ValueQuery>; + + /// The configuration for the next epoch. + #[pallet::storage] + #[pallet::getter(fn next_config)] + pub type NextEpochConfig = StorageValue<_, EpochConfiguration>; + + /// Pending epoch configuration change that will be set as `NextEpochConfig` when the next + /// epoch is enacted. + /// In other words, a config change submitted during epoch N will be enacted on epoch N+2. + /// This is to maintain coherence for already submitted tickets for epoch N+1 that where + /// computed using configuration parameters stored for epoch N+1. + #[pallet::storage] + pub(super) type PendingEpochConfigChange = StorageValue<_, EpochConfiguration>; + + /// Stored tickets metadata. + #[pallet::storage] + pub type TicketsMeta = StorageValue<_, TicketsMetadata, ValueQuery>; + + /// Tickets identifiers. + /// The key is a tuple composed by: + /// - `u8` equal to epoch-index mod 2 + /// - `u32` equal to the slot-index. + #[pallet::storage] + pub type TicketsIds = StorageMap<_, Identity, (u8, u32), TicketId>; + + /// Tickets to be used for current and next epoch. + #[pallet::storage] + pub type TicketsData = StorageMap<_, Identity, TicketId, TicketBody>; + + /// Next epoch tickets accumulator. + /// Special `u32::MAX` key is reserved for a partially sorted segment. + // This bound is set as `MaxTickets` in the unlucky case where we receive one Ticket at a time. + // The max capacity is thus MaxTickets^2. Not much, given that we save `TicketIds` here. + #[pallet::storage] + pub type NextTicketsSegments = + StorageMap<_, Identity, u32, BoundedVec, ValueQuery>; + + /// Parameters used to verify tickets validity via ring-proof + /// In practice: Updatable Universal Reference String and the seed. + #[pallet::storage] + #[pallet::getter(fn ring_context)] + pub type RingContext = StorageValue<_, vrf::RingContext>; + + /// Genesis configuration for Sassafras protocol. + #[pallet::genesis_config] + #[derive(frame_support::DefaultNoBound)] + pub struct GenesisConfig { + /// Genesis authorities. + pub authorities: Vec, + /// Genesis epoch configuration. + pub epoch_config: EpochConfiguration, + /// Phantom config + #[serde(skip)] + pub _phantom: sp_std::marker::PhantomData, + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) { + Pallet::::initialize_genesis_authorities(&self.authorities); + EpochConfig::::put(self.epoch_config.clone()); + + // TODO: davxy... remove for pallet tests + warn!(target: LOG_TARGET, "Constructing testing ring context (in build)"); + let ring_ctx = vrf::RingContext::new_testing(); + warn!(target: LOG_TARGET, "... done"); + RingContext::::set(Some(ring_ctx.clone())); + } + } + + #[pallet::hooks] + impl Hooks> for Pallet { + /// Block initialization + fn on_initialize(now: BlockNumberFor) -> Weight { + // Since `initialize` can be called twice (e.g. if session pallet is used) + // let's ensure that we only do the initialization once per block. + if Self::initialized().is_some() { + return Weight::zero() + } + + let claim = >::digest() + .logs + .iter() + .filter_map(|item| item.pre_runtime_try_to::(&SASSAFRAS_ENGINE_ID)) + .next() + .expect("Valid block must have a slot claim. qed"); + + CurrentSlot::::put(claim.slot); + + // On the first non-zero block (i.e. block #1) this is where the first epoch + // (epoch #0) actually starts. We need to adjust internal storage accordingly. + if *GenesisSlot::::get() == 0 { + debug!(target: LOG_TARGET, ">>> GENESIS SLOT: {:?}", claim.slot); + Self::initialize_genesis_epoch(claim.slot) + } + + Initialized::::put(claim); + + // Enact epoch change, if necessary. + T::EpochChangeTrigger::trigger::(now); + + Weight::zero() + } + + /// Block finalization + fn on_finalize(_now: BlockNumberFor) { + // TODO @davxy: check if is a disabled validator? + + // At the end of the block, we can safely include the new VRF output from + // this block into the randomness accumulator. If we've determined + // that this block was the first in a new epoch, the changeover logic has + // already occurred at this point. + let claim = Initialized::::take() + .expect("Finalization is called after initialization; qed."); + + let claim_input = vrf::slot_claim_input( + &Self::randomness(), + CurrentSlot::::get(), + EpochIndex::::get(), + ); + let claim_output = claim + .vrf_signature + .outputs + .get(0) + .expect("Presence should have been already checked by the client; qed"); + let randomness = + claim_output.make_bytes::(RANDOMNESS_VRF_CONTEXT, &claim_input); + + Self::deposit_randomness(&randomness); + + // If we are in the epoch's second half, we start sorting the next epoch tickets. + let epoch_duration = T::EpochDuration::get(); + let current_slot_idx = Self::slot_index(claim.slot); + if current_slot_idx >= epoch_duration / 2 { + let mut metadata = TicketsMeta::::get(); + if metadata.segments_count != 0 { + let epoch_idx = EpochIndex::::get() + 1; + let epoch_tag = (epoch_idx & 1) as u8; + let slots_left = epoch_duration.checked_sub(current_slot_idx).unwrap_or(1); + Self::sort_tickets( + u32::max(1, metadata.segments_count / slots_left as u32), + epoch_tag, + &mut metadata, + ); + TicketsMeta::::set(metadata); + } + } + } + } + + #[pallet::call] + impl Pallet { + /// Submit next epoch tickets. + /// + /// TODO-SASS-P3: this is an unsigned extrinsic. Can we remove the weight? + #[pallet::call_index(0)] + #[pallet::weight({0})] + pub fn submit_tickets( + origin: OriginFor, + tickets: BoundedVec, + ) -> DispatchResult { + ensure_none(origin)?; + + debug!(target: LOG_TARGET, "Received {} tickets", tickets.len()); + + debug!(target: LOG_TARGET, "LOADING RING CTX"); + let Some(ring_ctx) = RingContext::::get() else { + return Err("Ring context not initialized".into()) + }; + debug!(target: LOG_TARGET, "... Loaded"); + + // TODO @davxy this should be done once per epoch and with the NEXT EPOCH AUTHORITIES!!! + // For this we need the `ProofVerifier` to be serializable @svasilyev + let pks: Vec<_> = Self::authorities().iter().map(|auth| *auth.as_ref()).collect(); + debug!(target: LOG_TARGET, "Building verifier. Ring size {}", pks.len()); + let verifier = ring_ctx.verifier(pks.as_slice()).unwrap(); + debug!(target: LOG_TARGET, "... Built"); + + // Check tickets score + let next_auth = Self::next_authorities(); + let next_config = Self::next_config().unwrap_or_else(|| Self::config()); + // Current slot should be less than half of epoch duration. + let epoch_duration = T::EpochDuration::get(); + let ticket_threshold = sp_consensus_sassafras::ticket_id_threshold( + next_config.redundancy_factor, + epoch_duration as u32, + next_config.attempts_number, + next_auth.len() as u32, + ); + + // Get next epoch params + let randomness = NextRandomness::::get(); + let epoch_idx = EpochIndex::::get() + 1; + + let mut segment = BoundedVec::with_max_capacity(); + for ticket in tickets { + debug!(target: LOG_TARGET, "Checking ring proof"); + + let ticket_id_input = + vrf::ticket_id_input(&randomness, ticket.body.attempt_idx, epoch_idx); + let Some(ticket_id_output) = ticket.signature.outputs.get(0) else { + debug!(target: LOG_TARGET, "Missing ticket vrf output from ring signature"); + continue + }; + let ticket_id = vrf::make_ticket_id(&ticket_id_input, &ticket_id_output); + if ticket_id >= ticket_threshold { + debug!(target: LOG_TARGET, "Over threshold"); + continue + } + + let sign_data = vrf::ticket_body_sign_data(&ticket.body, ticket_id_input); + + if ticket.signature.verify(&sign_data, &verifier) { + TicketsData::::set(ticket_id, Some(ticket.body)); + segment + .try_push(ticket_id) + .expect("has same length as bounded input vector; qed"); + } else { + debug!(target: LOG_TARGET, "Proof verification failure"); + } + } + + if !segment.is_empty() { + debug!(target: LOG_TARGET, "Appending segment with {} tickets", segment.len()); + segment.iter().for_each(|t| debug!(target: LOG_TARGET, " + {t:16x}")); + let mut metadata = TicketsMeta::::get(); + NextTicketsSegments::::insert(metadata.segments_count, segment); + metadata.segments_count += 1; + TicketsMeta::::set(metadata); + } + + Ok(()) + } + + /// Plan an epoch config change. + /// + /// The epoch config change is recorded and will be announced at the begin of the + /// next epoch together with next epoch authorities information. + /// In other words the configuration will be activated one epoch after. + /// Multiple calls to this method will replace any existing planned config change that had + /// not been enacted yet. + /// + /// TODO-SASS-P4: proper weight + #[pallet::call_index(1)] + #[pallet::weight({0})] + pub fn plan_config_change( + origin: OriginFor, + config: EpochConfiguration, + ) -> DispatchResult { + ensure_root(origin)?; + + ensure!( + config.redundancy_factor != 0 && config.attempts_number != 0, + Error::::InvalidConfiguration + ); + PendingEpochConfigChange::::put(config); + Ok(()) + } + + /// Report authority equivocation. + /// + /// This method will verify the equivocation proof and validate the given key ownership + /// proof against the extracted offender. If both are valid, the offence will be reported. + /// + /// This extrinsic must be called unsigned and it is expected that only block authors will + /// call it (validated in `ValidateUnsigned`), as such if the block author is defined it + /// will be defined as the equivocation reporter. + /// + /// TODO-SASS-P4: proper weight + #[pallet::call_index(2)] + #[pallet::weight({0})] + pub fn report_equivocation_unsigned( + origin: OriginFor, + _equivocation_proof: EquivocationProof>, + //key_owner_proof: T::KeyOwnerProof, + ) -> DispatchResult { + ensure_none(origin)?; + + // Self::do_report_equivocation( + // T::HandleEquivocation::block_author(), + // *equivocation_proof, + // key_owner_proof, + // ) + Ok(()) + } + } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + + fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { + if let Call::submit_tickets { tickets } = call { + // Discard tickets not coming from the local node or that are not + // yet included in a block + debug!( + target: LOG_TARGET, + "Validating unsigned from {} source", + match source { + TransactionSource::Local => "local", + TransactionSource::InBlock => "in-block", + TransactionSource::External => "external", + } + ); + + if source == TransactionSource::External { + // TODO-SASS-P2: double check this `Local` requirement... + // If we only allow these txs on block production, then there is less chance to + // submit our tickets if we don't have enough authoring slots. + // If we have 0 slots => we have zero chances. + // Maybe this is one valid reason to introduce proxies. + // In short the question is >>> WHO HAS THE RIGHT TO SUBMIT A TICKET? <<< + // A) The current epoch validators + // B) Doesn't matter as far as the tickets are good (i.e. RVRF verify is ok) + // TODO @davxy: maybe we also provide a signed extrinsic to submit tickets + // where the submitter doesn't pay if the tickets are good? + warn!( + target: LOG_TARGET, + "Rejecting unsigned transaction from external sources.", + ); + return InvalidTransaction::BadSigner.into() + } + + // Current slot should be less than half of epoch duration. + let epoch_duration = T::EpochDuration::get(); + + let current_slot_idx = Self::current_slot_index(); + if current_slot_idx > epoch_duration / 2 { + warn!(target: LOG_TARGET, "Timeout to propose tickets, bailing out.",); + return InvalidTransaction::Stale.into() + } + + // This should be set such that it is discarded after the first epoch half + let tickets_longevity = epoch_duration / 2 - current_slot_idx; + let tickets_tag = tickets.using_encoded(|bytes| hashing::blake2_256(bytes)); + + ValidTransaction::with_tag_prefix("Sassafras") + .priority(TransactionPriority::max_value()) + .longevity(tickets_longevity) + .and_provides(tickets_tag) + .propagate(true) + .build() + } else { + InvalidTransaction::Call.into() + } + } + } +} + +// Inherent methods +impl Pallet { + /// Determine whether an epoch change should take place at this block. + /// Assumes that initialization has already taken place. + pub fn should_end_epoch(now: BlockNumberFor) -> bool { + // The epoch has technically ended during the passage of time between this block and the + // last, but we have to "end" the epoch now, since there is no earlier possible block we + // could have done it. + // + // The exception is for block 1: the genesis has slot 0, so we treat epoch 0 as having + // started at the slot of block 1. We want to use the same randomness and validator set as + // signalled in the genesis, so we don't rotate the epoch. + now != One::one() && Self::current_slot_index() >= T::EpochDuration::get() + } + + /// Current slot index with respect to current epoch. + fn current_slot_index() -> u64 { + Self::slot_index(CurrentSlot::::get()) + } + + /// Slot index with respect to current epoch. + fn slot_index(slot: Slot) -> u64 { + if *GenesisSlot::::get() == 0 { + return 0 + } + slot.checked_sub(Self::current_epoch_start().into()).unwrap_or(u64::MAX) + } + + /// Remove all tickets related data. + /// + /// May not be efficient as the calling places may repeat some of this operations + /// but is a very extraordinary operation (hopefully never happens in production) + /// and better safe than sorry. + fn reset_tickets_data() { + let tickets_metadata = TicketsMeta::::get(); + + // Remove even-epoch data. + let tickets_count = tickets_metadata.tickets_count[0]; + (0..tickets_count).into_iter().for_each(|idx| { + if let Some(id) = TicketsIds::::get((0, idx)) { + TicketsData::::remove(id); + } + }); + + // Remove odd-epoch data. + let tickets_count = tickets_metadata.tickets_count[1]; + (0..tickets_count).into_iter().for_each(|idx| { + if let Some(id) = TicketsIds::::get((1, idx)) { + TicketsData::::remove(id); + } + }); + + // Remove all outstanding tickets segments. + (0..tickets_metadata.segments_count).into_iter().for_each(|i| { + NextTicketsSegments::::remove(i); + }); + NextTicketsSegments::::remove(u32::MAX); + + // Reset tickets metadata + TicketsMeta::::set(Default::default()); + } + + /// Enact an epoch change. + /// + /// Should be done on every block where `should_end_epoch` has returned `true`, and the caller + /// is the only caller of this function. + /// + /// Typically, this is not handled directly, but by a higher-level component implementing the + /// `EpochChangeTrigger` or `OneSessionHandler` trait. + /// + /// If we detect one or more skipped epochs the policy is to use the authorities and values + /// from the first skipped epoch. The tickets are invalidated. + pub(crate) fn enact_epoch_change( + authorities: WeakBoundedVec, + next_authorities: WeakBoundedVec, + ) { + // PRECONDITION: caller has done initialization. + // If using the internal trigger or the session pallet then this is guaranteed. + debug_assert!(Self::initialized().is_some()); + + // Update authorities + Authorities::::put(authorities); + NextAuthorities::::put(&next_authorities); + + // Update epoch index + let mut epoch_idx = EpochIndex::::get() + .checked_add(1) + .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); + + let slot_idx = CurrentSlot::::get().saturating_sub(Self::epoch_start(epoch_idx)); + if slot_idx >= T::EpochDuration::get() { + // Detected one or more skipped epochs, clear tickets data and recompute epoch index. + Self::reset_tickets_data(); + let skipped_epochs = u64::from(slot_idx) / T::EpochDuration::get(); + epoch_idx += skipped_epochs; + warn!(target: LOG_TARGET, "Detected {} skipped epochs, resuming from epoch {}", skipped_epochs, epoch_idx); + } + + let mut tickets_metadata = TicketsMeta::::get(); + + EpochIndex::::put(epoch_idx); + + let next_epoch_index = epoch_idx + .checked_add(1) + .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); + + // Updates current epoch randomness and computes the *next* epoch randomness. + let next_randomness = Self::update_randomness(next_epoch_index); + + if let Some(config) = NextEpochConfig::::take() { + EpochConfig::::put(config); + } + + let next_config = PendingEpochConfigChange::::take(); + if let Some(next_config) = next_config.clone() { + NextEpochConfig::::put(next_config); + } + + // After we update the current epoch, we signal the *next* epoch change + // so that nodes can track changes. + let next_epoch = NextEpochDescriptor { + authorities: next_authorities.to_vec(), + randomness: next_randomness, + config: next_config, + }; + Self::deposit_consensus(ConsensusLog::NextEpochData(next_epoch)); + + let epoch_tag = (epoch_idx & 1) as u8; + // Optionally finish sorting + if tickets_metadata.segments_count != 0 { + Self::sort_tickets(tickets_metadata.segments_count, epoch_tag, &mut tickets_metadata); + } + + // Clear the "prev ≡ next (mod 2)" epoch tickets counter and bodies. + // Ids are left since are just cyclically overwritten on-the-go. + let next_epoch_tag = epoch_tag ^ 1; + let prev_epoch_tickets_count = &mut tickets_metadata.tickets_count[next_epoch_tag as usize]; + if *prev_epoch_tickets_count != 0 { + (0..*prev_epoch_tickets_count).into_iter().for_each(|idx| { + if let Some(id) = TicketsIds::::get((next_epoch_tag, idx)) { + TicketsData::::remove(id); + } + }); + *prev_epoch_tickets_count = 0; + TicketsMeta::::set(tickets_metadata); + } + } + + /// Call this function on epoch change to update the randomness. + /// Returns the next epoch randomness. + fn update_randomness(next_epoch_index: u64) -> Randomness { + let curr_randomness = NextRandomness::::get(); + CurrentRandomness::::put(curr_randomness); + + let accumulator = RandomnessAccumulator::::get(); + let mut s = Vec::with_capacity(2 * curr_randomness.len() + 8); + s.extend_from_slice(&curr_randomness); + s.extend_from_slice(&next_epoch_index.to_le_bytes()); + s.extend_from_slice(&accumulator); + + let next_randomness = hashing::blake2_256(&s); + NextRandomness::::put(&next_randomness); + + next_randomness + } + + /// Finds the start slot of the current epoch. Only guaranteed to give correct results after + /// `initialize` of the first block in the chain (as its result is based off of `GenesisSlot`). + pub fn current_epoch_start() -> Slot { + Self::epoch_start(EpochIndex::::get()) + } + + fn epoch_start(epoch_index: u64) -> Slot { + const PROOF: &str = "slot number is u64; it should relate in some way to wall clock time; \ + if u64 is not enough we should crash for safety; qed."; + + let epoch_start = epoch_index.checked_mul(T::EpochDuration::get()).expect(PROOF); + + epoch_start.checked_add(*GenesisSlot::::get()).expect(PROOF).into() + } + + fn deposit_consensus(new: U) { + let log = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, new.encode()); + >::deposit_log(log) + } + + fn deposit_randomness(randomness: &Randomness) { + let mut s = RandomnessAccumulator::::get().to_vec(); + s.extend_from_slice(randomness); + let accumulator = hashing::blake2_256(&s); + RandomnessAccumulator::::put(accumulator); + } + + // Initialize authorities on genesis phase. + fn initialize_genesis_authorities(authorities: &[AuthorityId]) { + // Genesis authorities may have been initialized via other means (e.g. via session pallet). + // If this function has already been called with some authorities, then the new list + // should be match the previously set one. + let prev_authorities = Authorities::::get(); + if !prev_authorities.is_empty() { + if prev_authorities.to_vec() == authorities { + return + } else { + panic!("Authorities already were already initialized"); + } + } + + let bounded_authorities = + WeakBoundedVec::<_, T::MaxAuthorities>::try_from(authorities.to_vec()) + .expect("Initial number of authorities should be lower than T::MaxAuthorities"); + Authorities::::put(&bounded_authorities); + NextAuthorities::::put(&bounded_authorities); + } + + fn initialize_genesis_epoch(genesis_slot: Slot) { + GenesisSlot::::put(genesis_slot); + + // Deposit a log because this is the first block in epoch #0. + // We use the same values as genesis because we haven't collected any randomness yet. + let next = NextEpochDescriptor { + authorities: Self::authorities().to_vec(), + randomness: Self::randomness(), + config: None, + }; + Self::deposit_consensus(ConsensusLog::NextEpochData(next)); + } + + /// Current epoch information. + pub fn current_epoch() -> Epoch { + let epoch_idx = EpochIndex::::get(); + Epoch { + epoch_idx, + start_slot: Self::epoch_start(epoch_idx), + slot_duration: SlotDuration::from_millis(T::SlotDuration::get()), + epoch_duration: T::EpochDuration::get(), + authorities: Self::authorities().to_vec(), + randomness: Self::randomness(), + config: Self::config(), + } + } + + /// Next epoch information. + pub fn next_epoch() -> Epoch { + let epoch_idx = EpochIndex::::get() + .checked_add(1) + .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); + Epoch { + epoch_idx, + start_slot: Self::epoch_start(epoch_idx), + slot_duration: SlotDuration::from_millis(T::SlotDuration::get()), + epoch_duration: T::EpochDuration::get(), + authorities: Self::next_authorities().to_vec(), + randomness: Self::next_randomness(), + config: Self::next_config().unwrap_or_else(|| Self::config()), + } + } + + /// Fetch expected ticket-id for the given slot according to an "outside-in" sorting strategy. + /// + /// Given an ordered sequence of tickets [t0, t1, t2, ..., tk] to be assigned to n slots, + /// with n >= k, then the tickets are assigned to the slots according to the following + /// strategy: + /// + /// slot-index : [ 0, 1, 2, ............ , n ] + /// tickets : [ t1, t3, t5, ... , t4, t2, t0 ]. + /// + /// With slot-index computed as `epoch_start() - slot`. + /// + /// If `slot` value falls within the current epoch then we fetch tickets from the current epoch + /// tickets list. + /// + /// If `slot` value falls within the next epoch then we fetch tickets from the next epoch + /// tickets ids list. Note that in this case we may have not finished receiving all the tickets + /// for that epoch yet. The next epoch tickets should be considered "stable" only after the + /// current epoch first half slots were elapsed (see `submit_tickets_unsigned_extrinsic`). + /// + /// Returns `None` if, according to the sorting strategy, there is no ticket associated to the + /// specified slot-index (happend if a ticket falls in the middle of an epoch and n > k), + /// or if the slot falls beyond the next epoch. + pub fn slot_ticket_id(slot: Slot) -> Option { + let epoch_idx = EpochIndex::::get(); + let duration = T::EpochDuration::get(); + let mut slot_idx = Self::slot_index(slot); + let mut tickets_meta = TicketsMeta::::get(); + + let get_ticket_idx = |slot_idx| { + let ticket_idx = if slot_idx < duration / 2 { + 2 * slot_idx + 1 + } else { + 2 * (duration - (slot_idx + 1)) + }; + debug!( + target: LOG_TARGET, + ">>>>>>>> SLOT-IDX {} -> TICKET-IDX {}", + slot_idx, + ticket_idx + ); + ticket_idx as u32 + }; + + let mut epoch_tag = (epoch_idx & 1) as u8; + + if duration <= slot_idx && slot_idx < 2 * duration { + // Try to get a ticket for the next epoch. Since its state values were not enacted yet, + // we may have to finish sorting the tickets. + epoch_tag ^= 1; + slot_idx -= duration; + if tickets_meta.segments_count != 0 { + Self::sort_tickets(tickets_meta.segments_count, epoch_tag, &mut tickets_meta); + TicketsMeta::::set(tickets_meta); + } + } else if slot_idx >= 2 * duration { + return None + } + + let ticket_idx = get_ticket_idx(slot_idx); + if ticket_idx < tickets_meta.tickets_count[epoch_tag as usize] { + TicketsIds::::get((epoch_tag, ticket_idx)) + } else { + None + } + } + + /// Returns ticket id and data associated to the given `slot`. + /// + /// Refer to the `slot_ticket_id` documentation for the slot-ticket association + /// criteria. + pub fn slot_ticket(slot: Slot) -> Option<(TicketId, TicketBody)> { + Self::slot_ticket_id(slot).and_then(|id| TicketsData::::get(id).map(|body| (id, body))) + } + + // Lexicographically sort the tickets who belongs to the next epoch. + // + // Tickets are fetched from at most `max_segments` segments. + // + // The resulting sorted vector is optionally truncated to contain at most `MaxTickets` + // entries. If all the segments were consumed then the sorted vector is saved as the + // next epoch tickets, else it is saved to be used by next calls to this function. + fn sort_tickets(mut max_segments: u32, epoch_tag: u8, metadata: &mut TicketsMetadata) { + max_segments = max_segments.min(metadata.segments_count); + let max_tickets = T::MaxTickets::get() as usize; + + // Fetch the sorted result (if any). + let mut sorted_segment = NextTicketsSegments::::take(u32::MAX).into_inner(); + + let mut require_sort = max_segments != 0; + + // There is an upper bound to check only if we already sorted the max number + // of allowed tickets. + let mut upper_bound = *sorted_segment.get(max_tickets).unwrap_or(&TicketId::MAX); + + // Consume at most `max_iter` segments. + // During the process remove every stale ticket from `TicketsData` storage. + for _ in 0..max_segments { + metadata.segments_count -= 1; + let segment = NextTicketsSegments::::take(metadata.segments_count); + + // Merge only elements below the current sorted segment sup. + segment.iter().for_each(|id| { + if id < &upper_bound { + sorted_segment.push(*id); + } else { + TicketsData::::remove(id); + } + }); + if sorted_segment.len() > max_tickets { + require_sort = false; + // Sort and truncate good tickets. + sorted_segment.sort_unstable(); + sorted_segment[max_tickets..].iter().for_each(|id| TicketsData::::remove(id)); + sorted_segment.truncate(max_tickets); + upper_bound = sorted_segment[max_tickets - 1]; + } + } + + if require_sort { + sorted_segment.sort_unstable(); + } + + if metadata.segments_count == 0 { + // Sorting is over, write to next epoch map. + sorted_segment.iter().enumerate().for_each(|(i, id)| { + TicketsIds::::insert((epoch_tag, i as u32), id); + }); + metadata.tickets_count[epoch_tag as usize] = sorted_segment.len() as u32; + } else { + // Keep the partial result for next calls. + NextTicketsSegments::::insert(u32::MAX, BoundedVec::truncate_from(sorted_segment)); + } + } + + /// Submit next epoch validator tickets via an unsigned extrinsic constructed with a call to + /// `submit_unsigned_transaction`. + /// + /// The submitted tickets are added to the next epoch outstanding tickets as long as the + /// extrinsic is called within the first half of the epoch. Tickets received during the + /// second half are dropped. + /// + /// TODO-SASS-P3: use pass a bounded vector??? + pub fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool { + let tickets = BoundedVec::truncate_from(tickets); + let call = Call::submit_tickets { tickets }; + match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { + Ok(_) => true, + Err(e) => { + error!(target: LOG_TARGET, "Error submitting tickets {:?}", e); + false + }, + } + } + + /// Submits an equivocation via an unsigned extrinsic. + /// + /// Unsigned extrinsic is created with a call to `report_equivocation_unsigned`. + pub fn submit_unsigned_equivocation_report( + equivocation_proof: EquivocationProof>, + //key_owner_proof: T::KeyOwnerProof, + ) -> bool { + let call = Call::report_equivocation_unsigned { + equivocation_proof, + // key_owner_proof, + }; + + match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { + Ok(()) => true, + Err(e) => { + error!(target: LOG_TARGET, "Error submitting equivocation report: {:?}", e); + false + }, + } + } +} + +/// Trigger an epoch change, if any should take place. +pub trait EpochChangeTrigger { + /// Trigger an epoch change, if any should take place. This should be called + /// during every block, after initialization is done. + fn trigger(now: BlockNumberFor); +} + +/// A type signifying to Sassafras that an external trigger for epoch changes +/// (e.g. pallet-session) is used. +pub struct ExternalTrigger; + +impl EpochChangeTrigger for ExternalTrigger { + fn trigger(_: BlockNumberFor) {} // nothing - trigger is external. +} + +/// A type signifying to Sassafras that it should perform epoch changes with an internal +/// trigger, recycling the same authorities forever. +pub struct SameAuthoritiesForever; + +impl EpochChangeTrigger for SameAuthoritiesForever { + fn trigger(now: BlockNumberFor) { + if >::should_end_epoch(now) { + let authorities = >::authorities(); + let next_authorities = authorities.clone(); + + >::enact_epoch_change(authorities, next_authorities); + } + } +} + +impl BoundToRuntimeAppPublic for Pallet { + type Public = AuthorityId; +} diff --git a/frame/sassafras/src/mock.rs b/frame/sassafras/src/mock.rs new file mode 100644 index 0000000000000..7d177069849a8 --- /dev/null +++ b/frame/sassafras/src/mock.rs @@ -0,0 +1,358 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test utilities for Sassafras pallet. + +use crate::{self as pallet_sassafras, SameAuthoritiesForever, *}; + +use frame_support::traits::{ConstU32, ConstU64, OnFinalize, OnInitialize}; +use sp_consensus_sassafras::{ + digests::SlotClaim, + vrf::{RingProver, VrfSignature}, + AuthorityIndex, AuthorityPair, EpochConfiguration, Slot, TicketBody, TicketEnvelope, TicketId, +}; +use sp_core::{ + crypto::{ByteArray, Pair, UncheckedFrom, VrfSecret, Wraps}, + ed25519::Public as EphemeralPublic, + H256, U256, +}; +use sp_runtime::{ + testing::{Digest, DigestItem, Header, TestXt}, + traits::IdentityLookup, + BuildStorage, +}; + +const SLOT_DURATION: u64 = 1000; +const EPOCH_DURATION: u64 = 10; +const MAX_TICKETS: u32 = 6; + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type Nonce = u64; + type RuntimeCall = RuntimeCall; + type Hash = H256; + type Version = (); + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Block = frame_system::mocking::MockBlock; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type PalletInfo = PalletInfo; + type AccountData = u128; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); //Sassafras; + type MinimumPeriod = ConstU64<1>; + type WeightInfo = (); +} + +impl frame_system::offchain::SendTransactionTypes for Test +where + RuntimeCall: From, +{ + type OverarchingCall = RuntimeCall; + type Extrinsic = TestXt; +} + +impl pallet_sassafras::Config for Test { + type SlotDuration = ConstU64; + type EpochDuration = ConstU64; + type EpochChangeTrigger = SameAuthoritiesForever; + type MaxAuthorities = ConstU32<10>; + type MaxTickets = ConstU32; +} + +frame_support::construct_runtime!( + pub enum Test + { + System: frame_system, + Sassafras: pallet_sassafras, + } +); + +// Default used for most of the tests and benchmarks. +// +// The redundancy factor has been set to max value to accept all submitted +// tickets without worrying about the threshold. +pub const TEST_EPOCH_CONFIGURATION: EpochConfiguration = + EpochConfiguration { redundancy_factor: u32::MAX, attempts_number: 32 }; + +/// Build and returns test storage externalities +pub fn new_test_ext(authorities_len: usize) -> sp_io::TestExternalities { + new_test_ext_with_pairs(authorities_len, false).1 +} + +/// Build and returns test storage externalities and authority set pairs used +/// by Sassafras genesis configuration. +pub fn new_test_ext_with_pairs( + authorities_len: usize, + with_ring_context: bool, +) -> (Vec, sp_io::TestExternalities) { + // @davxy temporary logging facility + // env_logger::init(); + + let pairs = (0..authorities_len) + .map(|i| AuthorityPair::from_seed(&U256::from(i).into())) + .collect::>(); + + let authorities = pairs.iter().map(|p| p.public()).collect(); + + let mut storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + pallet_sassafras::GenesisConfig:: { + authorities, + epoch_config: TEST_EPOCH_CONFIGURATION, + _phantom: sp_std::marker::PhantomData, + } + .assimilate_storage(&mut storage) + .unwrap(); + + let mut ext: sp_io::TestExternalities = storage.into(); + + if with_ring_context { + ext.execute_with(|| { + log::debug!("Building new testing ring context"); + let ring_ctx = vrf::RingContext::new_testing(); + RingContext::::set(Some(ring_ctx.clone())); + }); + } + + (pairs, ext) +} + +fn make_ticket_with_prover( + attempt: u32, + pair: &AuthorityPair, + prover: &RingProver, +) -> TicketEnvelope { + log::debug!("attempt: {}", attempt); + + // Values are referring to the next epoch + let epoch = Sassafras::epoch_index() + 1; + let randomness = Sassafras::next_randomness(); + + // Make a dummy ephemeral public that hopefully is unique within one test instance. + // In the tests, the values within the erased public are just used to compare + // ticket bodies, so it is not important to be a valid key. + let mut raw: [u8; 32] = [0; 32]; + raw.copy_from_slice(&pair.public().as_slice()[0..32]); + let erased_public = EphemeralPublic::unchecked_from(raw); + let revealed_public = erased_public.clone(); + + let ticket_id_input = vrf::ticket_id_input(&randomness, attempt, epoch); + + let body = TicketBody { attempt_idx: attempt, erased_public, revealed_public }; + let sign_data = vrf::ticket_body_sign_data(&body, ticket_id_input); + + let signature = pair.as_ref().ring_vrf_sign(&sign_data, &prover); + + // Ticket-id can be generated via vrf-preout. + // We don't care that much about its value here. + TicketEnvelope { body, signature } +} + +pub fn make_prover(pair: &AuthorityPair) -> RingProver { + let public = pair.public(); + let mut prover_idx = None; + + let ring_ctx = Sassafras::ring_context().unwrap(); + + let pks: Vec = Sassafras::authorities() + .iter() + .enumerate() + .map(|(idx, auth)| { + if public == *auth { + prover_idx = Some(idx); + } + *auth.as_ref() + }) + .collect(); + + log::debug!("Building prover. Ring size: {}", pks.len()); + let prover = ring_ctx.prover(&pks, prover_idx.unwrap()).unwrap(); + log::debug!("Done"); + + prover +} + +/// Construct at most `attempts` tickets envelopes for the given `slot`. +/// TODO-SASS-P3: filter out invalid tickets according to test threshold. +/// E.g. by passing an optional threshold +pub fn make_tickets(attempts: u32, pair: &AuthorityPair) -> Vec { + let prover = make_prover(pair); + (0..attempts) + .into_iter() + .map(|attempt| make_ticket_with_prover(attempt, pair, &prover)) + .collect() +} + +pub fn make_ticket_body(attempt_idx: u32, pair: &AuthorityPair) -> (TicketId, TicketBody) { + // Values are referring to the next epoch + let epoch = Sassafras::epoch_index() + 1; + let randomness = Sassafras::next_randomness(); + + let ticket_id_input = vrf::ticket_id_input(&randomness, attempt_idx, epoch); + let ticket_id_output = pair.as_inner_ref().vrf_output(&ticket_id_input); + + let id = vrf::make_ticket_id(&ticket_id_input, &ticket_id_output); + + // Make a dummy ephemeral public that hopefully is unique within one test instance. + // In the tests, the values within the erased public are just used to compare + // ticket bodies, so it is not important to be a valid key. + let mut raw: [u8; 32] = [0; 32]; + raw[..16].copy_from_slice(&pair.public().as_slice()[0..16]); + raw[16..].copy_from_slice(&id.to_le_bytes()); + let erased_public = EphemeralPublic::unchecked_from(raw); + let revealed_public = erased_public.clone(); + + let body = TicketBody { attempt_idx, erased_public, revealed_public }; + + (id, body) +} + +pub fn make_ticket_bodies(number: u32, pair: &AuthorityPair) -> Vec<(TicketId, TicketBody)> { + (0..number).into_iter().map(|i| make_ticket_body(i, pair)).collect() +} + +/// Persist the given tickets in `segments_count` separated segments by appending +/// them to the storage segments list. +/// +/// If segments_count > tickets.len() => segments_count = tickets.len() +pub fn persist_next_epoch_tickets_as_segments( + tickets: &[(TicketId, TicketBody)], + mut segments_count: usize, +) { + if segments_count > tickets.len() { + segments_count = tickets.len(); + } + let segment_len = tickets.len() / segments_count; + + // Update metadata + let mut meta = TicketsMeta::::get(); + meta.segments_count += segments_count as u32; + TicketsMeta::::set(meta); + + for i in 0..segments_count { + let segment: Vec = tickets[i * segment_len..(i + 1) * segment_len] + .iter() + .map(|(id, body)| { + TicketsData::::set(id, Some(body.clone())); + *id + }) + .collect(); + let segment = BoundedVec::truncate_from(segment); + NextTicketsSegments::::insert(i as u32, segment); + } +} + +pub fn persist_next_epoch_tickets(tickets: &[(TicketId, TicketBody)]) { + persist_next_epoch_tickets_as_segments(tickets, 1); + // Force sorting of next epoch tickets (enactment) by explicitly querying the first of them. + let next_epoch = Sassafras::next_epoch(); + assert_eq!(TicketsMeta::::get().segments_count, 1); + Sassafras::slot_ticket(next_epoch.start_slot).unwrap(); + assert_eq!(TicketsMeta::::get().segments_count, 0); +} + +fn slot_claim_vrf_signature(slot: Slot, pair: &AuthorityPair) -> VrfSignature { + let mut epoch = Sassafras::epoch_index(); + let mut randomness = Sassafras::randomness(); + + // Check if epoch is going to change on initialization. + let epoch_start = Sassafras::current_epoch_start(); + if epoch_start != 0_u64 && slot >= epoch_start + EPOCH_DURATION { + epoch += slot.saturating_sub(epoch_start).saturating_div(EPOCH_DURATION); + randomness = crate::NextRandomness::::get(); + } + + let data = vrf::slot_claim_sign_data(&randomness, slot, epoch); + pair.as_ref().vrf_sign(&data) +} + +/// Construct a `PreDigest` instance for the given parameters. +pub fn make_slot_claim( + authority_idx: AuthorityIndex, + slot: Slot, + pair: &AuthorityPair, +) -> SlotClaim { + let vrf_signature = slot_claim_vrf_signature(slot, pair); + SlotClaim { authority_idx, slot, vrf_signature, ticket_claim: None } +} + +/// Construct a `Digest` with a `SlotClaim` item. +pub fn make_digest(authority_idx: AuthorityIndex, slot: Slot, pair: &AuthorityPair) -> Digest { + let claim = make_slot_claim(authority_idx, slot, pair); + Digest { logs: vec![DigestItem::from(&claim)] } +} + +pub fn initialize_block( + number: u64, + slot: Slot, + parent_hash: H256, + pair: &AuthorityPair, +) -> Digest { + let digest = make_digest(0, slot, pair); + System::reset_events(); + System::initialize(&number, &parent_hash, &digest); + Sassafras::on_initialize(number); + digest +} + +pub fn finalize_block(number: u64) -> Header { + Sassafras::on_finalize(number); + System::finalize() +} + +/// Progress the pallet state up to the given block `number` and `slot`. +pub fn go_to_block(number: u64, slot: Slot, pair: &AuthorityPair) -> Digest { + Sassafras::on_finalize(System::block_number()); + let parent_hash = System::finalize().hash(); + + let digest = make_digest(0, slot, pair); + + System::reset_events(); + System::initialize(&number, &parent_hash, &digest); + Sassafras::on_initialize(number); + + digest +} + +/// Progress the pallet state up to the given block `number`. +/// Slots will grow linearly accordingly to blocks. +pub fn progress_to_block(number: u64, pair: &AuthorityPair) -> Option { + let mut slot = Sassafras::current_slot() + 1; + let mut digest = None; + for i in System::block_number() + 1..=number { + let dig = go_to_block(i, slot, pair); + digest = Some(dig); + slot = slot + 1; + } + digest +} diff --git a/frame/sassafras/src/session.rs b/frame/sassafras/src/session.rs new file mode 100644 index 0000000000000..b70748a7f153c --- /dev/null +++ b/frame/sassafras/src/session.rs @@ -0,0 +1,116 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Sassafras implementation of traits required by session pallet. + +use super::*; +use frame_support::traits::{EstimateNextSessionRotation, Hooks, OneSessionHandler}; +use pallet_session::ShouldEndSession; +use sp_runtime::{traits::SaturatedConversion, Permill}; + +impl ShouldEndSession> for Pallet { + fn should_end_session(now: BlockNumberFor) -> bool { + // It might be (and it is in current implementation) that session module is calling + // `should_end_session` from it's own `on_initialize` handler, in which case it's + // possible that Sassafras's own `on_initialize` has not run yet, so let's ensure that we + // have initialized the pallet and updated the current slot. + Self::on_initialize(now); + Self::should_end_epoch(now) + } +} + +impl OneSessionHandler for Pallet { + type Key = AuthorityId; + + fn on_genesis_session<'a, I: 'a>(validators: I) + where + I: Iterator, + { + let authorities: Vec<_> = validators.map(|(_, k)| k).collect(); + Self::initialize_genesis_authorities(&authorities); + } + + fn on_new_session<'a, I: 'a>(_changed: bool, validators: I, queued_validators: I) + where + I: Iterator, + { + let authorities = validators.map(|(_account, k)| k).collect(); + let bounded_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( + authorities, + Some( + "Warning: The session has more validators than expected. \ + A runtime configuration adjustment may be needed.", + ), + ); + + let next_authorities = queued_validators.map(|(_account, k)| k).collect(); + let next_bounded_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( + next_authorities, + Some( + "Warning: The session has more queued validators than expected. \ + A runtime configuration adjustment may be needed.", + ), + ); + + Self::enact_epoch_change(bounded_authorities, next_bounded_authorities) + } + + fn on_disabled(i: u32) { + Self::deposit_consensus(ConsensusLog::OnDisabled(i)) + } +} + +impl EstimateNextSessionRotation> for Pallet { + fn average_session_length() -> BlockNumberFor { + T::EpochDuration::get().saturated_into() + } + + fn estimate_current_session_progress(_now: BlockNumberFor) -> (Option, Weight) { + let elapsed = CurrentSlot::::get().saturating_sub(Self::current_epoch_start()) + 1; + let progress = Permill::from_rational(*elapsed, T::EpochDuration::get()); + + // TODO-SASS-P2: Read: Current Slot, Epoch Index, Genesis Slot + (Some(progress), T::DbWeight::get().reads(3)) + } + + /// Return the _best guess_ block number, at which the next epoch change is predicted to happen. + /// + /// Returns None if the prediction is in the past; This implies an internal error and should + /// not happen under normal circumstances. + /// + /// In other word, this is only accurate if no slots are missed. Given missed slots, the slot + /// number will grow while the block number will not. Hence, the result can be interpreted as an + /// upper bound. + // + // ## IMPORTANT NOTE + // + // This implementation is linked to how [`should_session_change`] is working. This might need + // to be updated accordingly, if the underlying mechanics of slot and epochs change. + fn estimate_next_session_rotation( + now: BlockNumberFor, + ) -> (Option>, Weight) { + let next_slot = Self::current_epoch_start().saturating_add(T::EpochDuration::get()); + let upper_bound = next_slot.checked_sub(*CurrentSlot::::get()).map(|slots_remaining| { + // This is a best effort guess. Drifts in the slot/block ratio will cause errors here. + let blocks_remaining: BlockNumberFor = slots_remaining.saturated_into(); + now.saturating_add(blocks_remaining) + }); + + // TODO-SASS-P2: Read: Current Slot, Epoch Index, Genesis Slot + (upper_bound, T::DbWeight::get().reads(3)) + } +} diff --git a/frame/sassafras/src/tests.rs b/frame/sassafras/src/tests.rs new file mode 100644 index 0000000000000..4b2b22d64d3ec --- /dev/null +++ b/frame/sassafras/src/tests.rs @@ -0,0 +1,663 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for Sassafras pallet. + +use crate::*; +use mock::*; + +use sp_consensus_sassafras::Slot; +use sp_runtime::traits::Get; + +fn h2b(hex: &str) -> [u8; N] { + array_bytes::hex2array_unchecked(hex) +} + +fn b2h(bytes: [u8; N]) -> String { + array_bytes::bytes2hex("", &bytes) +} + +#[test] +fn genesis_values_assumptions_check() { + new_test_ext(4).execute_with(|| { + assert_eq!(Sassafras::authorities().len(), 4); + assert_eq!(EpochConfig::::get(), TEST_EPOCH_CONFIGURATION); + }); +} + +// Tests if the sorted tickets are assigned to each slot outside-in. +#[test] +fn slot_ticket_id_outside_in_fetch() { + let genesis_slot = Slot::from(100); + let max_tickets: u32 = ::MaxTickets::get(); + assert_eq!(max_tickets, 6); + + // Current epoch tickets + let curr_tickets: Vec = (0..max_tickets).map(|i| i as TicketId).collect(); + + let next_tickets: Vec = + (0..max_tickets - 1).map(|i| (i + max_tickets) as TicketId).collect(); + + new_test_ext(4).execute_with(|| { + curr_tickets + .iter() + .enumerate() + .for_each(|(i, id)| TicketsIds::::insert((0, i as u32), id)); + + next_tickets + .iter() + .enumerate() + .for_each(|(i, id)| TicketsIds::::insert((1, i as u32), id)); + + TicketsMeta::::set(TicketsMetadata { + tickets_count: [curr_tickets.len() as u32, next_tickets.len() as u32], + segments_count: 0, + }); + + // Before initializing `GenesisSlot` value the pallet always return the first slot + // This is a kind of special hardcoded case that should never happen in practice + // (i.e. the first thing the pallet does is to initialize the genesis slot). + + assert_eq!(Sassafras::slot_ticket_id(0.into()), Some(curr_tickets[1])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 0), Some(curr_tickets[1])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 1), Some(curr_tickets[1])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 100), Some(curr_tickets[1])); + + // Initialize genesis slot.. + GenesisSlot::::set(genesis_slot); + + // Try fetch a ticket for a slot before current epoch. + assert_eq!(Sassafras::slot_ticket_id(0.into()), None); + + // Current epoch tickets. + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 0), Some(curr_tickets[1])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 1), Some(curr_tickets[3])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 2), Some(curr_tickets[5])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 3), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 4), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 5), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 6), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 7), Some(curr_tickets[4])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 8), Some(curr_tickets[2])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 9), Some(curr_tickets[0])); + + // Next epoch tickets (note that only 5 tickets are available) + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 10), Some(next_tickets[1])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 11), Some(next_tickets[3])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 12), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 13), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 14), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 15), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 16), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 17), Some(next_tickets[4])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 18), Some(next_tickets[2])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 19), Some(next_tickets[0])); + + // Try fetch tickets for slots beyend next epoch. + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 20), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 42), None); + }); +} + +#[test] +fn on_first_block_after_genesis() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + + let digest = initialize_block(start_block, start_slot, Default::default(), &pairs[0]); + + // Post-initialization status + + assert!(Initialized::::get().is_some()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot); + assert_eq!(Sassafras::epoch_index(), 0); + assert_eq!(Sassafras::current_epoch_start(), start_slot); + assert_eq!(Sassafras::current_slot_index(), 0); + assert_eq!(Sassafras::randomness(), [0; 32]); + assert_eq!(NextRandomness::::get(), [0; 32]); + assert_eq!(RandomnessAccumulator::::get(), [0; 32]); + + let header = finalize_block(start_block); + + // Post-finalization status + + assert!(Initialized::::get().is_none()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot); + assert_eq!(Sassafras::epoch_index(), 0); + assert_eq!(Sassafras::current_epoch_start(), start_slot); + assert_eq!(Sassafras::current_slot_index(), 0); + assert_eq!(Sassafras::randomness(), [0; 32]); + assert_eq!(NextRandomness::::get(), [0; 32]); + println!("{}", b2h(RandomnessAccumulator::::get())); + assert_eq!( + RandomnessAccumulator::::get(), + h2b("416f7e78a0390e14677782ea22102ba749eb9de7d02df46b39d1e3d6e6759c62"), + ); + + // Header data check + + assert_eq!(header.digest.logs.len(), 2); + assert_eq!(header.digest.logs[0], digest.logs[0]); + + // Genesis epoch start deposits consensus + let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData( + sp_consensus_sassafras::digests::NextEpochDescriptor { + authorities: NextAuthorities::::get().to_vec(), + randomness: NextRandomness::::get(), + config: None, + }, + ); + let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); + assert_eq!(header.digest.logs[1], consensus_digest) + }) +} + +#[test] +fn on_normal_block() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + let start_slot = Slot::from(100); + let start_block = 1; + let end_block = start_block + 1; + + ext.execute_with(|| { + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); + + // We don't want to trigger an epoch change in this test. + let epoch_duration: u64 = ::EpochDuration::get(); + assert!(epoch_duration > end_block); + + // Progress to block 2 + let digest = progress_to_block(end_block, &pairs[0]).unwrap(); + + // Post-initialization status + + assert!(Initialized::::get().is_some()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + 1); + assert_eq!(Sassafras::epoch_index(), 0); + assert_eq!(Sassafras::current_epoch_start(), start_slot); + assert_eq!(Sassafras::current_slot_index(), 1); + assert_eq!(Sassafras::randomness(), [0; 32]); + assert_eq!(NextRandomness::::get(), [0; 32]); + println!("{}", b2h(RandomnessAccumulator::::get())); + assert_eq!( + RandomnessAccumulator::::get(), + h2b("416f7e78a0390e14677782ea22102ba749eb9de7d02df46b39d1e3d6e6759c62"), + ); + + let header = finalize_block(end_block); + + // Post-finalization status + + assert!(Initialized::::get().is_none()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + 1); + assert_eq!(Sassafras::epoch_index(), 0); + assert_eq!(Sassafras::current_epoch_start(), start_slot); + assert_eq!(Sassafras::current_slot_index(), 1); + assert_eq!(Sassafras::randomness(), [0; 32]); + assert_eq!(NextRandomness::::get(), [0; 32]); + println!("{}", b2h(RandomnessAccumulator::::get())); + assert_eq!( + RandomnessAccumulator::::get(), + h2b("eab1c5692bf3255ae46b2e732d061700fcd51ab57f029ad39983ceae5214a713"), + ); + + // Header data check + + assert_eq!(header.digest.logs.len(), 1); + assert_eq!(header.digest.logs[0], digest.logs[0]); + }); +} + +#[test] +fn produce_epoch_change_digest_no_config() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); + + // We want to trigger an epoch change in this test. + let epoch_duration: u64 = ::EpochDuration::get(); + let end_block = start_block + epoch_duration; + + let digest = progress_to_block(end_block, &pairs[0]).unwrap(); + + // Post-initialization status + + assert!(Initialized::::get().is_some()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + epoch_duration); + assert_eq!(Sassafras::epoch_index(), 1); + assert_eq!(Sassafras::current_epoch_start(), start_slot + epoch_duration); + assert_eq!(Sassafras::current_slot_index(), 0); + assert_eq!(Sassafras::randomness(), [0; 32]); + println!("{}", b2h(NextRandomness::::get())); + assert_eq!( + NextRandomness::::get(), + h2b("cb52dcf3b0caca956453d42004ac1b8005a26be669c2aaf534548e0b4c872a52"), + ); + println!("{}", b2h(RandomnessAccumulator::::get())); + assert_eq!( + RandomnessAccumulator::::get(), + h2b("ce3e3aeae02c85a8e0c8ee0ff0b120484df4551491ac2296e40147634ca4c58c"), + ); + + let header = finalize_block(end_block); + + // Post-finalization status + + assert!(Initialized::::get().is_none()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + epoch_duration); + assert_eq!(Sassafras::epoch_index(), 1); + assert_eq!(Sassafras::current_epoch_start(), start_slot + epoch_duration); + assert_eq!(Sassafras::current_slot_index(), 0); + assert_eq!(Sassafras::randomness(), [0; 32]); + println!("{}", b2h(NextRandomness::::get())); + assert_eq!( + NextRandomness::::get(), + h2b("cb52dcf3b0caca956453d42004ac1b8005a26be669c2aaf534548e0b4c872a52"), + ); + println!("{}", b2h(RandomnessAccumulator::::get())); + assert_eq!( + RandomnessAccumulator::::get(), + h2b("1288d911ca5deb9c514149d4fdb64ebf94e63989e09e03bc69218319456d4ec9"), + ); + + // Header data check + + assert_eq!(header.digest.logs.len(), 2); + assert_eq!(header.digest.logs[0], digest.logs[0]); + // Deposits consensus log on epoch change + let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData( + sp_consensus_sassafras::digests::NextEpochDescriptor { + authorities: NextAuthorities::::get().to_vec(), + randomness: NextRandomness::::get(), + config: None, + }, + ); + let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); + assert_eq!(header.digest.logs[1], consensus_digest) + }) +} + +#[test] +fn produce_epoch_change_digest_with_config() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); + + let config = EpochConfiguration { redundancy_factor: 1, attempts_number: 123 }; + Sassafras::plan_config_change(RuntimeOrigin::root(), config.clone()).unwrap(); + + // We want to trigger an epoch change in this test. + let epoch_duration: u64 = ::EpochDuration::get(); + let end_block = start_block + epoch_duration; + + let digest = progress_to_block(end_block, &pairs[0]).unwrap(); + + let header = finalize_block(end_block); + + // Header data check. + // Skip pallet status checks that were already performed by other tests. + + assert_eq!(header.digest.logs.len(), 2); + assert_eq!(header.digest.logs[0], digest.logs[0]); + // Deposits consensus log on epoch change + let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData( + sp_consensus_sassafras::digests::NextEpochDescriptor { + authorities: NextAuthorities::::get().to_vec(), + randomness: NextRandomness::::get(), + config: Some(config), // We are mostly interested in this + }, + ); + let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); + assert_eq!(header.digest.logs[1], consensus_digest) + }) +} + +#[test] +fn segments_incremental_sort_works() { + let (pairs, mut ext) = new_test_ext_with_pairs(1, false); + let pair = &pairs[0]; + let segments_count = 14; + let start_slot = Slot::from(100); + let start_block = 1; + + ext.execute_with(|| { + let max_tickets: u32 = ::MaxTickets::get(); + let tickets_count = segments_count * max_tickets; + + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); + + // Manually populate the segments to skip the threshold check + let mut tickets = make_ticket_bodies(tickets_count, pair); + persist_next_epoch_tickets_as_segments(&tickets, segments_count as usize); + + let epoch_duration: u64 = ::EpochDuration::get(); + + // Proceed to half of the epoch (sortition should not have been started yet) + let half_epoch_block = start_block + epoch_duration / 2; + progress_to_block(half_epoch_block, pair); + + // Check that next epoch tickets sortition is not started yet + let meta = TicketsMeta::::get(); + assert_eq!(meta.segments_count, segments_count); + assert_eq!(meta.tickets_count, [0, 0]); + + // Follow incremental sortition block by block + + progress_to_block(half_epoch_block + 1, pair); + let meta = TicketsMeta::::get(); + assert_eq!(meta.segments_count, 12); + assert_eq!(meta.tickets_count, [0, 0]); + + progress_to_block(half_epoch_block + 2, pair); + let meta = TicketsMeta::::get(); + assert_eq!(meta.segments_count, 9); + assert_eq!(meta.tickets_count, [0, 0]); + + progress_to_block(half_epoch_block + 3, pair); + let meta = TicketsMeta::::get(); + assert_eq!(meta.segments_count, 6); + assert_eq!(meta.tickets_count, [0, 0]); + + progress_to_block(half_epoch_block + 4, pair); + let meta = TicketsMeta::::get(); + assert_eq!(meta.segments_count, 3); + assert_eq!(meta.tickets_count, [0, 0]); + + let header = finalize_block(half_epoch_block + 4); + + // Sort should be finished now. + // Check that next epoch tickets count have the correct value. + // Bigger ticket ids were discarded during sortition. + let meta = TicketsMeta::::get(); + assert_eq!(meta.segments_count, 0); + assert_eq!(meta.tickets_count, [0, max_tickets]); + assert_eq!(header.digest.logs.len(), 1); + // No tickets for the current epoch + assert_eq!(TicketsIds::::get((0, 0)), None); + + // Check persistence of good tickets + tickets.sort_by_key(|t| t.0); + (0..max_tickets as usize).into_iter().for_each(|i| { + let id = TicketsIds::::get((1, i as u32)).unwrap(); + let body = TicketsData::::get(id).unwrap(); + assert_eq!((id, body), tickets[i]); + }); + // Check removal of bad tickets + (max_tickets as usize..tickets.len()).into_iter().for_each(|i| { + assert!(TicketsIds::::get((1, i as u32)).is_none()); + assert!(TicketsData::::get(tickets[i].0).is_none()); + }); + + // The next block will be the first produced on the new epoch, + // At this point the tickets are found already sorted and ready to be used. + let slot = Sassafras::current_slot() + 1; + let number = System::block_number() + 1; + initialize_block(number, slot, header.hash(), pair); + let header = finalize_block(number); + // Epoch changes digest is also produced + assert_eq!(header.digest.logs.len(), 2); + }); +} + +#[test] +fn tickets_fetch_works_after_epoch_change() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + let pair = &pairs[0]; + let start_slot = Slot::from(100); + let start_block = 1; + + ext.execute_with(|| { + let max_tickets: u32 = ::MaxTickets::get(); + + initialize_block(start_block, start_slot, Default::default(), pair); + + // We don't want to trigger an epoch change in this test. + let epoch_duration: u64 = ::EpochDuration::get(); + assert!(epoch_duration > 2); + progress_to_block(2, &pairs[0]).unwrap(); + + // Persist tickets as three different segments. + let tickets = make_ticket_bodies(3 * max_tickets, pair); + persist_next_epoch_tickets_as_segments(&tickets, 3); + + // Progress up to the last epoch slot (do not enact epoch change) + progress_to_block(epoch_duration, &pairs[0]).unwrap(); + + // At this point next tickets should have been sorted and ready to be used + assert_eq!( + TicketsMeta::::get(), + TicketsMetadata { segments_count: 0, tickets_count: [0, 6] }, + ); + + // Compute and sort the tickets ids (aka tickets scores) + let mut expected_ids: Vec<_> = tickets.into_iter().map(|(id, _)| id).collect(); + expected_ids.sort(); + expected_ids.truncate(max_tickets as usize); + + // Check if we can fetch next epoch tickets ids (outside-in). + let slot = Sassafras::current_slot(); + assert_eq!(Sassafras::slot_ticket_id(slot + 1).unwrap(), expected_ids[1]); + assert_eq!(Sassafras::slot_ticket_id(slot + 2).unwrap(), expected_ids[3]); + assert_eq!(Sassafras::slot_ticket_id(slot + 3).unwrap(), expected_ids[5]); + assert!(Sassafras::slot_ticket_id(slot + 4).is_none()); + assert!(Sassafras::slot_ticket_id(slot + 7).is_none()); + assert_eq!(Sassafras::slot_ticket_id(slot + 8).unwrap(), expected_ids[4]); + assert_eq!(Sassafras::slot_ticket_id(slot + 9).unwrap(), expected_ids[2]); + assert_eq!(Sassafras::slot_ticket_id(slot + 10).unwrap(), expected_ids[0]); + assert!(Sassafras::slot_ticket_id(slot + 11).is_none()); + + // Enact epoch change by progressing one more block + + progress_to_block(epoch_duration + 1, &pairs[0]).unwrap(); + + let meta = TicketsMeta::::get(); + assert_eq!(meta.segments_count, 0); + assert_eq!(meta.tickets_count, [0, 6]); + + // Check if we can fetch thisepoch tickets ids (outside-in). + let slot = Sassafras::current_slot(); + assert_eq!(Sassafras::slot_ticket_id(slot).unwrap(), expected_ids[1]); + assert_eq!(Sassafras::slot_ticket_id(slot + 1).unwrap(), expected_ids[3]); + assert_eq!(Sassafras::slot_ticket_id(slot + 2).unwrap(), expected_ids[5]); + assert!(Sassafras::slot_ticket_id(slot + 3).is_none()); + assert!(Sassafras::slot_ticket_id(slot + 6).is_none()); + assert_eq!(Sassafras::slot_ticket_id(slot + 7).unwrap(), expected_ids[4]); + assert_eq!(Sassafras::slot_ticket_id(slot + 8).unwrap(), expected_ids[2]); + assert_eq!(Sassafras::slot_ticket_id(slot + 9).unwrap(), expected_ids[0]); + assert!(Sassafras::slot_ticket_id(slot + 10).is_none()); + }); +} + +#[test] +fn block_allowed_to_skip_epochs() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + let pair = &pairs[0]; + let start_slot = Slot::from(100); + let start_block = 1; + + ext.execute_with(|| { + let epoch_duration: u64 = ::EpochDuration::get(); + + initialize_block(start_block, start_slot, Default::default(), pair); + + let tickets = make_ticket_bodies(3, pair); + persist_next_epoch_tickets(&tickets); + + let next_random = NextRandomness::::get(); + + // We want to skip 2 epochs in this test. + let offset = 3 * epoch_duration; + go_to_block(start_block + offset, start_slot + offset, &pairs[0]); + + // Post-initialization status + + assert!(Initialized::::get().is_some()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + offset); + assert_eq!(Sassafras::epoch_index(), 3); + assert_eq!(Sassafras::current_epoch_start(), start_slot + offset); + assert_eq!(Sassafras::current_slot_index(), 0); + + // Tickets data has been discarded + let meta = TicketsMeta::::get(); + assert_eq!(meta, TicketsMetadata::default()); + + tickets.iter().for_each(|(id, _)| { + let data = TicketsData::::get(id); + assert!(data.is_none()); + }); + // We used the last known next epoch randomness as a fallback + assert_eq!(next_random, Sassafras::randomness()); + }); +} + +#[test] +fn obsolete_tickets_are_removed_on_epoch_change() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + let pair = &pairs[0]; + let start_slot = Slot::from(100); + let start_block = 1; + + ext.execute_with(|| { + let epoch_duration: u64 = ::EpochDuration::get(); + + initialize_block(start_block, start_slot, Default::default(), pair); + + let tickets = make_ticket_bodies(10, pair); + let mut epoch1_tickets = tickets[..4].to_vec(); + let mut epoch2_tickets = tickets[4..].to_vec(); + + // Persist some tickets for next epoch (N) + persist_next_epoch_tickets(&epoch1_tickets); + assert_eq!(TicketsMeta::::get().tickets_count, [0, 4]); + // Check next epoch tickets presence + epoch1_tickets.sort_by_key(|t| t.0); + (0..epoch1_tickets.len()).into_iter().for_each(|i| { + let id = TicketsIds::::get((1, i as u32)).unwrap(); + let body = TicketsData::::get(id).unwrap(); + assert_eq!((id, body), epoch1_tickets[i]); + }); + + // Advance one epoch to enact the tickets + go_to_block(start_block + epoch_duration, start_slot + epoch_duration, pair); + assert_eq!(TicketsMeta::::get().tickets_count, [0, 4]); + + // Persist some tickets for next epoch (N+1) + persist_next_epoch_tickets(&epoch2_tickets); + assert_eq!(TicketsMeta::::get().tickets_count, [6, 4]); + epoch2_tickets.sort_by_key(|t| t.0); + // Check for this epoch and next epoch tickets presence + (0..epoch1_tickets.len()).into_iter().for_each(|i| { + let id = TicketsIds::::get((1, i as u32)).unwrap(); + let body = TicketsData::::get(id).unwrap(); + assert_eq!((id, body), epoch1_tickets[i]); + }); + (0..epoch2_tickets.len()).into_iter().for_each(|i| { + let id = TicketsIds::::get((0, i as u32)).unwrap(); + let body = TicketsData::::get(id).unwrap(); + assert_eq!((id, body), epoch2_tickets[i]); + }); + + // Advance to epoch 2 and check for cleanup + + go_to_block(start_block + 2 * epoch_duration, start_slot + 2 * epoch_duration, pair); + assert_eq!(TicketsMeta::::get().tickets_count, [6, 0]); + + (0..epoch1_tickets.len()).into_iter().for_each(|i| { + let id = TicketsIds::::get((1, i as u32)).unwrap(); + assert!(TicketsData::::get(id).is_none()); + }); + (0..epoch2_tickets.len()).into_iter().for_each(|i| { + let id = TicketsIds::::get((0, i as u32)).unwrap(); + let body = TicketsData::::get(id).unwrap(); + assert_eq!((id, body), epoch2_tickets[i]); + }); + }) +} + +// TODO davxy: create a read_tickets method which reads pre-constructed good tickets +// from a file. Creating this stuff "on-the-fly" is just too much expensive +// +// A valid ring-context is required for this test since we are passing though the +// `submit_ticket` call which tests for ticket validity. +#[test] +fn submit_tickets_with_ring_proof_check_works() { + let (pairs, mut ext) = new_test_ext_with_pairs(10, true); + let pair = &pairs[0]; + let segments_count = 3; + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + let max_tickets: u32 = ::MaxTickets::get(); + let attempts_number = segments_count * max_tickets; + + // Tweak the epoch config to discard some of the tickets + let mut config = EpochConfig::::get(); + config.redundancy_factor = 7; + config.attempts_number = attempts_number; + EpochConfig::::set(config); + + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); + + // Check state before tickets submission + assert_eq!( + TicketsMeta::::get(), + TicketsMetadata { segments_count: 0, tickets_count: [0, 0] }, + ); + + // Populate the segments via the `submit_tickets` + let tickets = make_tickets(attempts_number, pair); + let segment_len = tickets.len() / segments_count as usize; + for i in 0..segments_count as usize { + println!("Submit tickets"); + let segment = + tickets[i * segment_len..(i + 1) * segment_len].to_vec().try_into().unwrap(); + Sassafras::submit_tickets(RuntimeOrigin::none(), segment).unwrap(); + } + + // Check state after submission + assert_eq!( + TicketsMeta::::get(), + TicketsMetadata { segments_count, tickets_count: [0, 0] }, + ); + + finalize_block(start_block); + + // Check against the expected results given the known inputs + assert_eq!(NextTicketsSegments::::get(0).len(), 6); + assert_eq!(NextTicketsSegments::::get(1).len(), 1); + assert_eq!(NextTicketsSegments::::get(2).len(), 2); + }) +} diff --git a/primitives/consensus/babe/src/lib.rs b/primitives/consensus/babe/src/lib.rs index c083bfd9a313e..646d59a2e4c99 100644 --- a/primitives/consensus/babe/src/lib.rs +++ b/primitives/consensus/babe/src/lib.rs @@ -324,6 +324,7 @@ where /// sure that all usages of `OpaqueKeyOwnershipProof` refer to the same type. #[derive(Decode, Encode, PartialEq, TypeInfo)] pub struct OpaqueKeyOwnershipProof(Vec); + impl OpaqueKeyOwnershipProof { /// Create a new `OpaqueKeyOwnershipProof` using the given encoded /// representation. diff --git a/primitives/consensus/sassafras/Cargo.toml b/primitives/consensus/sassafras/Cargo.toml new file mode 100644 index 0000000000000..14de323c73617 --- /dev/null +++ b/primitives/consensus/sassafras/Cargo.toml @@ -0,0 +1,50 @@ +[package] +name = "sp-consensus-sassafras" +version = "0.3.4-dev" +authors = ["Parity Technologies "] +description = "Primitives for Sassafras consensus" +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +documentation = "https://docs.rs/sp-consensus-sassafras" +readme = "README.md" +publish = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +scale-codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.163", default-features = false, features = ["derive"], optional = true } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } +sp-application-crypto = { version = "23.0.0", default-features = false, path = "../../application-crypto", features = ["bandersnatch-experimental"] } +sp-consensus-slots = { version = "0.10.0-dev", default-features = false, path = "../slots" } +sp-core = { version = "21.0.0", default-features = false, path = "../../core", features = ["bandersnatch-experimental"] } +sp-runtime = { version = "24.0.0", default-features = false, path = "../../runtime" } +sp-std = { version = "8.0.0", default-features = false, path = "../../std" } + +[features] +default = ["std"] +std = [ + "scale-codec/std", + "scale-info/std", + "serde/std", + "sp-api/std", + "sp-application-crypto/std", + "sp-consensus-slots/std", + "sp-core/std", + "sp-runtime/std", + "sp-std/std", +] + +# Serde support without relying on std features. +serde = [ + "dep:serde", + "scale-info/serde", + "sp-application-crypto/serde", + "sp-consensus-slots/serde", + "sp-core/serde", + "sp-runtime/serde", +] diff --git a/primitives/consensus/sassafras/README.md b/primitives/consensus/sassafras/README.md new file mode 100644 index 0000000000000..f632ce5ba534d --- /dev/null +++ b/primitives/consensus/sassafras/README.md @@ -0,0 +1,12 @@ +Primitives for SASSAFRAS. + +# ⚠️ WARNING ⚠️ + +The crate interfaces and structures are highly experimental and may be subject +to significant changes. + +Depends on upstream experimental feature: `bandersnatch-experimental`. + +These structs were mostly extracted from the main SASSAFRAS protocol PR: https://github.com/paritytech/substrate/pull/11879. + +Tracking issue: https://github.com/paritytech/substrate/issues/11515. diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs new file mode 100644 index 0000000000000..95a305099de55 --- /dev/null +++ b/primitives/consensus/sassafras/src/digests.rs @@ -0,0 +1,98 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Sassafras digests structures and helpers. + +use crate::{ + ticket::TicketClaim, vrf::VrfSignature, AuthorityId, AuthorityIndex, AuthoritySignature, + EpochConfiguration, Randomness, Slot, SASSAFRAS_ENGINE_ID, +}; + +use scale_codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; + +use sp_runtime::{DigestItem, RuntimeDebug}; +use sp_std::vec::Vec; + +/// Epoch slot claim digest entry. +/// +/// This is mandatory for each block. +#[derive(Clone, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub struct SlotClaim { + /// Authority index that claimed the slot. + pub authority_idx: AuthorityIndex, + /// Corresponding slot number. + pub slot: Slot, + /// Slot claim VRF signature. + pub vrf_signature: VrfSignature, + /// Ticket auxiliary information for claim check. + pub ticket_claim: Option, +} + +/// Information about the next epoch. +/// +/// This is mandatory in the first block of each epoch. +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] +pub struct NextEpochDescriptor { + /// Authorities list. + pub authorities: Vec, + /// Epoch randomness. + pub randomness: Randomness, + /// Epoch configurable parameters. + /// + /// If not present previous epoch parameters are used. + pub config: Option, +} + +/// Runtime digest entries. +/// +/// Entries which may be generated by on-chain code. +#[derive(Decode, Encode, Clone, PartialEq, Eq)] +pub enum ConsensusLog { + /// Provides information about the next epoch parameters. + #[codec(index = 1)] + NextEpochData(NextEpochDescriptor), + /// Disable the authority with given index. + #[codec(index = 2)] + OnDisabled(AuthorityIndex), +} + +impl TryFrom<&DigestItem> for SlotClaim { + type Error = (); + fn try_from(item: &DigestItem) -> Result { + item.pre_runtime_try_to(&SASSAFRAS_ENGINE_ID).ok_or(()) + } +} + +impl From<&SlotClaim> for DigestItem { + fn from(claim: &SlotClaim) -> Self { + DigestItem::PreRuntime(SASSAFRAS_ENGINE_ID, claim.encode()) + } +} + +impl TryFrom<&DigestItem> for AuthoritySignature { + type Error = (); + fn try_from(item: &DigestItem) -> Result { + item.seal_try_to(&SASSAFRAS_ENGINE_ID).ok_or(()) + } +} + +impl From<&AuthoritySignature> for DigestItem { + fn from(signature: &AuthoritySignature) -> Self { + DigestItem::Seal(SASSAFRAS_ENGINE_ID, signature.encode()) + } +} diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs new file mode 100644 index 0000000000000..651e97850b756 --- /dev/null +++ b/primitives/consensus/sassafras/src/lib.rs @@ -0,0 +1,175 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Primitives for Sassafras consensus. + +#![deny(warnings)] +#![forbid(unsafe_code, missing_docs, unused_variables, unused_imports)] +#![cfg_attr(not(feature = "std"), no_std)] + +use scale_codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; +use sp_core::crypto::KeyTypeId; +use sp_runtime::{ConsensusEngineId, RuntimeDebug}; +use sp_std::vec::Vec; + +pub use sp_consensus_slots::{Slot, SlotDuration}; + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +pub mod digests; +pub mod ticket; +pub mod vrf; + +pub use ticket::{ + ticket_id_threshold, EphemeralPublic, EphemeralSignature, TicketBody, TicketClaim, + TicketEnvelope, TicketId, +}; + +mod app { + use sp_application_crypto::{app_crypto, bandersnatch, key_types::SASSAFRAS}; + app_crypto!(bandersnatch, SASSAFRAS); +} + +/// Key type identifier. +pub const KEY_TYPE: KeyTypeId = sp_application_crypto::key_types::SASSAFRAS; + +/// Consensus engine identifier. +pub const SASSAFRAS_ENGINE_ID: ConsensusEngineId = *b"SASS"; + +/// VRF output length for per-slot randomness. +pub const RANDOMNESS_LENGTH: usize = 32; + +/// Index of an authority. +pub type AuthorityIndex = u32; + +/// Sassafras authority keypair. Necessarily equivalent to the schnorrkel public key used in +/// the main Sassafras module. If that ever changes, then this must, too. +#[cfg(feature = "std")] +pub type AuthorityPair = app::Pair; + +/// Sassafras authority signature. +pub type AuthoritySignature = app::Signature; + +/// Sassafras authority identifier. Necessarily equivalent to the schnorrkel public key used in +/// the main Sassafras module. If that ever changes, then this must, too. +pub type AuthorityId = app::Public; + +/// Weight of a Sassafras block. +/// Primary blocks have a weight of 1 whereas secondary blocks have a weight of 0. +pub type SassafrasBlockWeight = u32; + +/// An equivocation proof for multiple block authorships on the same slot (i.e. double vote). +pub type EquivocationProof = sp_consensus_slots::EquivocationProof; + +/// Randomness required by some protocol's operations. +pub type Randomness = [u8; RANDOMNESS_LENGTH]; + +/// Configuration data that can be modified on epoch change. +#[derive( + Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo, Default, +)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct EpochConfiguration { + /// Tickets threshold redundancy factor. + pub redundancy_factor: u32, + /// Tickets attempts for each validator. + pub attempts_number: u32, +} + +/// Sassafras epoch information +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, TypeInfo)] +pub struct Epoch { + /// The epoch index. + pub epoch_idx: u64, + /// The starting slot of the epoch. + pub start_slot: Slot, + /// Slot duration in milliseconds. + pub slot_duration: SlotDuration, + /// Duration of epoch in slots. + pub epoch_duration: u64, + /// Authorities for the epoch. + pub authorities: Vec, + /// Randomness for the epoch. + pub randomness: Randomness, + /// Epoch configuration. + pub config: EpochConfiguration, +} + +/// An opaque type used to represent the key ownership proof at the runtime API boundary. +/// +/// The inner value is an encoded representation of the actual key ownership proof which will be +/// parameterized when defining the runtime. At the runtime API boundary this type is unknown and +/// as such we keep this opaque representation, implementors of the runtime API will have to make +/// sure that all usages of `OpaqueKeyOwnershipProof` refer to the same type. +#[derive(Decode, Encode, PartialEq, TypeInfo)] +pub struct OpaqueKeyOwnershipProof(Vec); + +// Runtime API. +sp_api::decl_runtime_apis! { + /// API necessary for block authorship with Sassafras. + pub trait SassafrasApi { + /// Get ring context to be used for ticket construction and verification. + fn ring_context() -> Option; + + /// Submit next epoch validator tickets via an unsigned extrinsic. + /// This method returns `false` when creation of the extrinsics fails. + fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool; + + /// Get ticket id associated to the given slot. + fn slot_ticket_id(slot: Slot) -> Option; + + /// Get ticket id and data associated to the given slot. + fn slot_ticket(slot: Slot) -> Option<(TicketId, TicketBody)>; + + /// Current epoch information. + fn current_epoch() -> Epoch; + + /// Next epoch information. + fn next_epoch() -> Epoch; + + /// Generates a proof of key ownership for the given authority in the current epoch. + /// + /// An example usage of this module is coupled with the session historical module to prove + /// that a given authority key is tied to a given staking identity during a specific + /// session. Proofs of key ownership are necessary for submitting equivocation reports. + /// + /// NOTE: even though the API takes a `slot` as parameter the current implementations + /// ignores this parameter and instead relies on this method being called at the correct + /// block height, i.e. any point at which the epoch for the given slot is live on-chain. + /// Future implementations will instead use indexed data through an offchain worker, not + /// requiring older states to be available. + fn generate_key_ownership_proof( + slot: Slot, + authority_id: AuthorityId, + ) -> Option; + + /// Submits an unsigned extrinsic to report an equivocation. + /// + /// The caller must provide the equivocation proof and a key ownership proof (should be + /// obtained using `generate_key_ownership_proof`). The extrinsic will be unsigned and + /// should only be accepted for local authorship (not to be broadcast to the network). This + /// method returns `None` when creation of the extrinsic fails, e.g. if equivocation + /// reporting is disabled for the given runtime (i.e. this method is hardcoded to return + /// `None`). Only useful in an offchain context. + fn submit_report_equivocation_unsigned_extrinsic( + equivocation_proof: EquivocationProof, + key_owner_proof: OpaqueKeyOwnershipProof, + ) -> bool; + } +} diff --git a/primitives/consensus/sassafras/src/ticket.rs b/primitives/consensus/sassafras/src/ticket.rs new file mode 100644 index 0000000000000..42d9d64434dd8 --- /dev/null +++ b/primitives/consensus/sassafras/src/ticket.rs @@ -0,0 +1,93 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Primitives related to tickets. + +use crate::vrf::RingVrfSignature; +use scale_codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; + +pub use sp_core::ed25519::{Public as EphemeralPublic, Signature as EphemeralSignature}; + +/// Ticket identifier. +/// +/// Its value is the output of a VRF whose inputs cannot be controlled by the +/// ticket's creator (refer to [`crate::vrf::ticket_id_input`] parameters). +/// Because of this, it is also used as the ticket score to compare against +/// the epoch ticket's threshold to decide if the ticket is worth being considered +/// for slot assignment (refer to [`ticket_id_threshold`]). +pub type TicketId = u128; + +/// Ticket data persisted on-chain. +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub struct TicketBody { + /// Attempt index. + pub attempt_idx: u32, + /// Ephemeral public key which gets erased when the ticket is claimed. + pub erased_public: EphemeralPublic, + /// Ephemeral public key which gets exposed when the ticket is claimed. + pub revealed_public: EphemeralPublic, +} + +/// Ticket ring vrf signature. +pub type TicketSignature = RingVrfSignature; + +/// Ticket envelope used on during submission. +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub struct TicketEnvelope { + /// Ticket body. + pub body: TicketBody, + /// Ring signature. + pub signature: TicketSignature, +} + +/// Ticket claim information filled by the block author. +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub struct TicketClaim { + /// Signature verified via `TicketBody::erased_public`. + pub erased_signature: EphemeralSignature, +} + +/// Computes ticket-id maximum allowed value for a given epoch. +/// +/// Only ticket identifiers below this threshold should be considered for slot +/// assignment. +/// +/// The value is computed as +/// +/// TicketId::MAX*(redundancy*slots)/(attempts*validators) +/// +/// Where: +/// - `redundancy`: redundancy factor; +/// - `slots`: number of slots in epoch; +/// - `attempts`: max number of tickets attempts per validator; +/// - `validators`: number of validators in epoch. +/// +/// If `attempts * validators = 0` then we return 0. +pub fn ticket_id_threshold( + redundancy: u32, + slots: u32, + attempts: u32, + validators: u32, +) -> TicketId { + let den = attempts as u64 * validators as u64; + let num = redundancy as u64 * slots as u64; + TicketId::max_value() + .checked_div(den.into()) + .unwrap_or_default() + .saturating_mul(num.into()) +} diff --git a/primitives/consensus/sassafras/src/vrf.rs b/primitives/consensus/sassafras/src/vrf.rs new file mode 100644 index 0000000000000..52f9d45e5ab57 --- /dev/null +++ b/primitives/consensus/sassafras/src/vrf.rs @@ -0,0 +1,104 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Utilities related to VRF input, output and signatures. + +use crate::{Randomness, TicketBody, TicketId}; +use scale_codec::Encode; +use sp_consensus_slots::Slot; +use sp_std::vec::Vec; + +pub use sp_core::bandersnatch::{ + ring_vrf::{RingContext, RingProver, RingVerifier, RingVrfSignature}, + vrf::{VrfInput, VrfOutput, VrfSignData, VrfSignature}, +}; + +fn vrf_input_from_data( + domain: &[u8], + data: impl IntoIterator>, +) -> VrfInput { + let raw = data.into_iter().fold(Vec::new(), |mut v, e| { + let bytes = e.as_ref(); + v.extend_from_slice(bytes); + let len = u8::try_from(bytes.len()).expect("private function with well known inputs; qed"); + v.extend_from_slice(&len.to_le_bytes()); + v + }); + VrfInput::new(domain, raw) +} + +/// VRF input to claim slot ownership during block production. +pub fn slot_claim_input(randomness: &Randomness, slot: Slot, epoch: u64) -> VrfInput { + vrf_input_from_data( + b"sassafras-claim-v1.0", + [randomness.as_slice(), &slot.to_le_bytes(), &epoch.to_le_bytes()], + ) +} + +/// Signing-data to claim slot ownership during block production. +pub fn slot_claim_sign_data(randomness: &Randomness, slot: Slot, epoch: u64) -> VrfSignData { + let vrf_input = slot_claim_input(randomness, slot, epoch); + VrfSignData::new_unchecked( + b"sassafras-slot-claim-transcript-v1.0", + Option::<&[u8]>::None, + Some(vrf_input), + ) +} + +/// VRF input to generate the ticket id. +pub fn ticket_id_input(randomness: &Randomness, attempt: u32, epoch: u64) -> VrfInput { + vrf_input_from_data( + b"sassafras-ticket-v1.0", + [randomness.as_slice(), &attempt.to_le_bytes(), &epoch.to_le_bytes()], + ) +} + +/// VRF input to generate the revealed key. +pub fn revealed_key_input(randomness: &Randomness, attempt: u32, epoch: u64) -> VrfInput { + vrf_input_from_data( + b"sassafras-revealed-v1.0", + [randomness.as_slice(), &attempt.to_le_bytes(), &epoch.to_le_bytes()], + ) +} + +/// Data to be signed via ring-vrf. +pub fn ticket_body_sign_data(ticket_body: &TicketBody, ticket_id_input: VrfInput) -> VrfSignData { + VrfSignData::new_unchecked( + b"sassafras-ticket-body-transcript-v1.0", + Some(ticket_body.encode().as_slice()), + Some(ticket_id_input), + ) +} + +/// Make ticket-id from the given VRF input and output. +/// +/// Input should have been obtained via [`ticket_id_input`]. +/// Output should have been obtained from the input directly using the vrf secret key +/// or from the vrf signature outputs. +pub fn make_ticket_id(vrf_input: &VrfInput, vrf_output: &VrfOutput) -> TicketId { + let bytes = vrf_output.make_bytes::<16>(b"ticket-id", vrf_input); + u128::from_le_bytes(bytes) +} + +/// Make revealed key seed from a given VRF input and ouput. +/// +/// Input should have been obtained via [`revealed_key_input`]. +/// Output should have been obtained from the input directly using the vrf secret key +/// or from the vrf signature outputs. +pub fn make_revealed_key_seed(vrf_input: &VrfInput, vrf_output: &VrfOutput) -> [u8; 32] { + vrf_output.make_bytes::<32>(b"revealed-seed", vrf_input) +} diff --git a/primitives/core/src/bandersnatch.rs b/primitives/core/src/bandersnatch.rs index c3ba7f41058e9..3a58bc48fc889 100644 --- a/primitives/core/src/bandersnatch.rs +++ b/primitives/core/src/bandersnatch.rs @@ -31,7 +31,7 @@ use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretString use bandersnatch_vrfs::CanonicalSerialize; #[cfg(feature = "full_crypto")] use bandersnatch_vrfs::SecretKey; -use codec::{Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use scale_info::TypeInfo; use sp_runtime_interface::pass_by::PassByInner; @@ -212,7 +212,7 @@ impl sp_std::fmt::Debug for Signature { /// The raw secret seed, which can be used to reconstruct the secret [`Pair`]. #[cfg(feature = "full_crypto")] -type Seed = [u8; SEED_SERIALIZED_LEN]; +pub type Seed = [u8; SEED_SERIALIZED_LEN]; /// Bandersnatch secret key. #[cfg(feature = "full_crypto")] @@ -294,7 +294,7 @@ impl TraitPair for Pair { fn verify>(signature: &Signature, data: M, public: &Public) -> bool { let data = vrf::VrfSignData::new_unchecked(SIGNING_CTX, &[data.as_ref()], None); let signature = - vrf::VrfSignature { signature: *signature, vrf_outputs: vrf::VrfIosVec::default() }; + vrf::VrfSignature { signature: *signature, outputs: vrf::VrfIosVec::default() }; public.vrf_verify(&data, &signature) } @@ -463,7 +463,7 @@ pub mod vrf { #[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct VrfSignature { /// VRF (pre)outputs. - pub vrf_outputs: VrfIosVec, + pub outputs: VrfIosVec, /// VRF signature. pub signature: Signature, } @@ -506,12 +506,12 @@ pub mod vrf { impl VrfPublic for Public { fn vrf_verify(&self, data: &Self::VrfSignData, signature: &Self::VrfSignature) -> bool { const _: () = assert!(MAX_VRF_IOS == 3, "`MAX_VRF_IOS` expected to be 3"); - let preouts_len = signature.vrf_outputs.len(); - if preouts_len != data.vrf_inputs.len() { + let outputs_len = signature.outputs.len(); + if outputs_len != data.vrf_inputs.len() { return false } // Workaround to overcome backend signature generic over the number of IOs. - match preouts_len { + match outputs_len { 0 => self.vrf_verify_gen::<0>(data, signature), 1 => self.vrf_verify_gen::<1>(data, signature), 2 => self.vrf_verify_gen::<2>(data, signature), @@ -541,7 +541,7 @@ pub mod vrf { let outputs: Vec<_> = signature.preoutputs.into_iter().map(VrfOutput).collect(); let outputs = VrfIosVec::truncate_from(outputs); - VrfSignature { signature: Signature(sign_bytes), vrf_outputs: outputs } + VrfSignature { signature: Signature(sign_bytes), outputs } } /// Generate an arbitrary number of bytes from the given `context` and VRF `input`. @@ -567,7 +567,7 @@ pub mod vrf { }; let Ok(preouts) = signature - .vrf_outputs + .outputs .iter() .map(|o| o.0.clone()) .collect::>() @@ -675,6 +675,8 @@ pub mod ring_vrf { } } + impl EncodeLike for RingContext {} + impl MaxEncodedLen for RingContext { fn max_encoded_len() -> usize { <[u8; RING_CONTEXT_SERIALIZED_LEN]>::max_encoded_len() @@ -910,11 +912,11 @@ mod tests { let signature = pair.vrf_sign(&data); let o10 = pair.make_bytes::<32>(b"ctx1", &i1); - let o11 = signature.vrf_outputs[0].make_bytes::<32>(b"ctx1", &i1); + let o11 = signature.outputs[0].make_bytes::<32>(b"ctx1", &i1); assert_eq!(o10, o11); let o20 = pair.make_bytes::<48>(b"ctx2", &i2); - let o21 = signature.vrf_outputs[1].make_bytes::<48>(b"ctx2", &i2); + let o21 = signature.outputs[1].make_bytes::<48>(b"ctx2", &i2); assert_eq!(o20, o21); } @@ -993,6 +995,35 @@ mod tests { assert!(!signature.verify(&data, &verifier)); } + #[test] + fn ring_vrf_make_bytes_matches() { + let ring_ctx = RingContext::new_testing(); + + let mut pks: Vec<_> = (0..16).map(|i| Pair::from_seed(&[i as u8; 32]).public()).collect(); + assert!(pks.len() <= ring_ctx.max_keyset_size()); + + let pair = Pair::from_seed(DEV_SEED); + + // Just pick one index to patch with the actual public key + let prover_idx = 3; + pks[prover_idx] = pair.public(); + + let i1 = VrfInput::new(b"dom1", b"foo"); + let i2 = VrfInput::new(b"dom2", b"bar"); + let data = VrfSignData::new_unchecked(b"mydata", &[b"tdata"], [i1.clone(), i2.clone()]); + + let prover = ring_ctx.prover(&pks, prover_idx).unwrap(); + let signature = pair.ring_vrf_sign(&data, &prover); + + let o10 = pair.make_bytes::<32>(b"ctx1", &i1); + let o11 = signature.outputs[0].make_bytes::<32>(b"ctx1", &i1); + assert_eq!(o10, o11); + + let o20 = pair.make_bytes::<48>(b"ctx2", &i2); + let o21 = signature.outputs[1].make_bytes::<48>(b"ctx2", &i2); + assert_eq!(o20, o21); + } + #[test] fn encode_decode_ring_vrf_signature() { let ring_ctx = RingContext::new_testing(); diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 6afe4b752a690..8c7d98f00cd89 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -1136,6 +1136,8 @@ pub mod key_types { /// Key type for Babe module, built-in. Identified as `babe`. pub const BABE: KeyTypeId = KeyTypeId(*b"babe"); + /// Key type for Sassafras module, built-in. Identified as `sass`. + pub const SASSAFRAS: KeyTypeId = KeyTypeId(*b"sass"); /// Key type for Grandpa module, built-in. Identified as `gran`. pub const GRANDPA: KeyTypeId = KeyTypeId(*b"gran"); /// Key type for controlling an account in a Substrate runtime, built-in. Identified as `acco`. diff --git a/primitives/crypto/ec-utils/Cargo.toml b/primitives/crypto/ec-utils/Cargo.toml index 90cd38722e0e1..8acbbe3180266 100644 --- a/primitives/crypto/ec-utils/Cargo.toml +++ b/primitives/crypto/ec-utils/Cargo.toml @@ -12,7 +12,6 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -ark-serialize = { version = "0.4.2", default-features = false } ark-ff = { version = "0.4.2", default-features = false } ark-ec = { version = "0.4.2", default-features = false } ark-std = { version = "0.4.0", default-features = false } @@ -35,6 +34,7 @@ sp-ark-bls12-381 = { version = "0.4.0-beta", default-features = false } sp-ark-bw6-761 = { version = "0.4.0-beta", default-features = false } sp-ark-ed-on-bls12-377 = { version = "0.4.0-beta", default-features = false } sp-ark-ed-on-bls12-381-bandersnatch = { version = "0.4.0-beta", default-features = false } +ark-serialize = { version = "0.4.2", default-features = false } [features] default = [ "std" ] diff --git a/primitives/inherents/src/client_side.rs b/primitives/inherents/src/client_side.rs index 27479de136f2d..2e23221261336 100644 --- a/primitives/inherents/src/client_side.rs +++ b/primitives/inherents/src/client_side.rs @@ -99,9 +99,11 @@ pub trait InherentDataProvider: Send + Sync { /// If the given error could not be decoded, `None` should be returned. async fn try_handle_error( &self, - identifier: &InherentIdentifier, - error: &[u8], - ) -> Option>; + _identifier: &InherentIdentifier, + _error: &[u8], + ) -> Option> { + None + } } #[impl_trait_for_tuples::impl_for_tuples(30)] diff --git a/primitives/keyring/src/sr25519.rs b/primitives/keyring/src/sr25519.rs index c738cfdc59d9e..c990b7b796b5d 100644 --- a/primitives/keyring/src/sr25519.rs +++ b/primitives/keyring/src/sr25519.rs @@ -140,14 +140,14 @@ impl std::str::FromStr for Keyring { fn from_str(s: &str) -> Result::Err> { match s { - "alice" => Ok(Keyring::Alice), - "bob" => Ok(Keyring::Bob), - "charlie" => Ok(Keyring::Charlie), - "dave" => Ok(Keyring::Dave), - "eve" => Ok(Keyring::Eve), - "ferdie" => Ok(Keyring::Ferdie), - "one" => Ok(Keyring::One), - "two" => Ok(Keyring::Two), + "Alice" => Ok(Keyring::Alice), + "Bob" => Ok(Keyring::Bob), + "Charlie" => Ok(Keyring::Charlie), + "Dave" => Ok(Keyring::Dave), + "Eve" => Ok(Keyring::Eve), + "Ferdie" => Ok(Keyring::Ferdie), + "One" => Ok(Keyring::One), + "Two" => Ok(Keyring::Two), _ => Err(ParseKeyringError), } } diff --git a/primitives/keystore/src/lib.rs b/primitives/keystore/src/lib.rs index 82062fe7b40a7..b388362ecb898 100644 --- a/primitives/keystore/src/lib.rs +++ b/primitives/keystore/src/lib.rs @@ -17,6 +17,7 @@ //! Keystore traits +#[cfg(feature = "std")] pub mod testing; #[cfg(feature = "bandersnatch-experimental")] diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index fe517843aff73..89761758d6527 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -17,11 +17,12 @@ sp-application-crypto = { version = "23.0.0", default-features = false, path = " sp-consensus-aura = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/aura", features = ["serde"] } sp-consensus-babe = { version = "0.10.0-dev", default-features = false, path = "../../primitives/consensus/babe", features = ["serde"] } sp-genesis-builder = { version = "0.1.0-dev", default-features = false, path = "../../primitives/genesis-builder" } +sp-consensus-sassafras = { version = "0.3.4-dev", default-features = false, path = "../../primitives/consensus/sassafras" } sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../primitives/block-builder" } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../primitives/inherents" } -sp-keyring = { version = "24.0.0", optional = true, path = "../../primitives/keyring" } +sp-keyring = { version = "24.0.0", optional = true, path = "../../primitives/keyring", features = ["bandersnatch-experimental"] } sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../primitives/offchain" } sp-core = { version = "21.0.0", default-features = false, path = "../../primitives/core" } sp-std = { version = "8.0.0", default-features = false, path = "../../primitives/std" } @@ -32,6 +33,7 @@ sp-session = { version = "4.0.0-dev", default-features = false, path = "../../pr sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } sp-runtime = { version = "24.0.0", default-features = false, path = "../../primitives/runtime", features = ["serde"] } pallet-babe = { version = "4.0.0-dev", default-features = false, path = "../../frame/babe" } +pallet-sassafras = { version = "0.3.4-dev", default-features = false, path = "../../frame/sassafras" } pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../frame/balances" } frame-executive = { version = "4.0.0-dev", default-features = false, path = "../../frame/executive" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../frame/system" } @@ -77,6 +79,7 @@ std = [ "log/std", "pallet-babe/std", "pallet-balances/std", + "pallet-sassafras/std", "pallet-timestamp/std", "sc-executor/std", "sc-service", @@ -87,6 +90,7 @@ std = [ "sp-consensus-aura/std", "sp-consensus-babe/std", "sp-consensus-grandpa/std", + "sp-consensus-sassafras/std", "sp-core/std", "sp-externalities/std", "sp-genesis-builder/std", diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index 8a4d6dbe4a71a..48611dccacdb3 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -23,11 +23,11 @@ use super::{ use codec::Encode; use sc_service::construct_genesis_block; use sp_core::{ - sr25519, + bandersnatch, sr25519, storage::{well_known_keys, StateVersion, Storage}, Pair, }; -use sp_keyring::{AccountKeyring, Sr25519Keyring}; +use sp_keyring::AccountKeyring; use sp_runtime::{ traits::{Block as BlockT, Hash as HashT, Header as HeaderT}, BuildStorage, @@ -54,9 +54,9 @@ impl Default for GenesisStorageBuilder { fn default() -> Self { Self::new( vec![ - Sr25519Keyring::Alice.into(), - Sr25519Keyring::Bob.into(), - Sr25519Keyring::Charlie.into(), + AccountKeyring::Alice.into(), + AccountKeyring::Bob.into(), + AccountKeyring::Charlie.into(), ], (0..16_usize) .into_iter() @@ -109,11 +109,23 @@ impl GenesisStorageBuilder { /// A `RuntimeGenesisConfig` from internal configuration pub fn genesis_config(&self) -> RuntimeGenesisConfig { - let authorities_sr25519: Vec<_> = self + let authorities_sr25519: Vec = self .authorities - .clone() - .into_iter() - .map(|id| sr25519::Public::from(id)) + .iter() + .map(|id| { + use std::str::FromStr; + let seed: &'static str = AccountKeyring::from_public(id).unwrap().into(); + sp_keyring::Sr25519Keyring::from_str(&seed).unwrap().into() + }) + .collect(); + let authorities_bandersnatch: Vec = self + .authorities + .iter() + .map(|id| { + use std::str::FromStr; + let seed: &'static str = AccountKeyring::from_public(id).unwrap().into(); + sp_keyring::BandersnatchKeyring::from_str(&seed).unwrap().into() + }) .collect(); RuntimeGenesisConfig { @@ -130,6 +142,14 @@ impl GenesisStorageBuilder { epoch_config: Some(crate::TEST_RUNTIME_BABE_EPOCH_CONFIGURATION), ..Default::default() }, + sassafras: pallet_sassafras::GenesisConfig { + authorities: authorities_bandersnatch.into_iter().map(|x| x.into()).collect(), + epoch_config: sp_consensus_sassafras::EpochConfiguration { + redundancy_factor: 1, + attempts_number: 32, + }, + ..Default::default() + }, substrate_test: substrate_test_pallet::GenesisConfig { authorities: authorities_sr25519.clone(), ..Default::default() diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index b116c8556815f..058fc4ac493d2 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -72,7 +72,10 @@ pub use sp_consensus_babe::{AllowedSlots, BabeEpochConfiguration, Slot}; pub use pallet_balances::Call as BalancesCall; +// Ensure Babe, Sassafras and Aura use the same crypto to simplify things a bit. pub type AuraId = sp_consensus_aura::sr25519::AuthorityId; +pub type SassafrasId = sp_consensus_sassafras::AuthorityId; + #[cfg(feature = "std")] pub use extrinsic::{ExtrinsicBuilder, Transfer}; @@ -301,6 +304,7 @@ construct_runtime!( { System: frame_system, Babe: pallet_babe, + Sassafras: pallet_sassafras, SubstrateTest: substrate_test_pallet::pallet, Balances: pallet_balances, } @@ -411,6 +415,7 @@ impl pallet_timestamp::Config for Runtime { } parameter_types! { + pub const SlotDuration: u64 = 1000; pub const EpochDuration: u64 = 6; } @@ -426,6 +431,23 @@ impl pallet_babe::Config for Runtime { type MaxNominators = ConstU32<100>; } +impl frame_system::offchain::SendTransactionTypes for Runtime +where + RuntimeCall: From, +{ + type Extrinsic = Extrinsic; + type OverarchingCall = RuntimeCall; +} + +impl pallet_sassafras::Config for Runtime { + type SlotDuration = SlotDuration; + type EpochDuration = EpochDuration; + //type EpochChangeTrigger = pallet_sassafras::ExternalTrigger; + type EpochChangeTrigger = pallet_sassafras::SameAuthoritiesForever; + type MaxAuthorities = ConstU32<10>; + type MaxTickets = ConstU32<10>; +} + /// Adds one to the given input and returns the final result. #[inline(never)] fn benchmark_add_one(i: u64) -> u64 { @@ -671,6 +693,52 @@ impl_runtime_apis! { } } + impl sp_consensus_sassafras::SassafrasApi for Runtime { + fn ring_context() -> Option { + Sassafras::ring_context() + } + + fn submit_tickets_unsigned_extrinsic( + tickets: Vec + ) -> bool { + Sassafras::submit_tickets_unsigned_extrinsic(tickets) + } + + fn current_epoch() -> sp_consensus_sassafras::Epoch { + Sassafras::current_epoch() + } + + fn next_epoch() -> sp_consensus_sassafras::Epoch { + Sassafras::next_epoch() + } + + fn slot_ticket_id(slot: sp_consensus_sassafras::Slot) -> Option { + Sassafras::slot_ticket_id(slot) + } + + fn slot_ticket( + slot: sp_consensus_sassafras::Slot + ) -> Option<(sp_consensus_sassafras::TicketId, sp_consensus_sassafras::TicketBody)> { + Sassafras::slot_ticket(slot) + } + + fn generate_key_ownership_proof( + _slot: sp_consensus_sassafras::Slot, + _authority_id: sp_consensus_sassafras::AuthorityId, + ) -> Option { + // TODO-SASS-P3 + None + } + + fn submit_report_equivocation_unsigned_extrinsic( + _equivocation_proof: sp_consensus_sassafras::EquivocationProof<::Header>, + _key_owner_proof: sp_consensus_sassafras::OpaqueKeyOwnershipProof, + ) -> bool { + // TODO-SASS-P3 + false + } + } + impl sp_offchain::OffchainWorkerApi for Runtime { fn offchain_worker(header: &::Header) { let ext = Extrinsic::new_unsigned( @@ -868,6 +936,10 @@ pub mod storage_key_generator { vec![b"Babe", b"EpochConfig"], vec![b"Babe", b"NextAuthorities"], vec![b"Babe", b"SegmentIndex"], + vec![b"Sassafras", b":__STORAGE_VERSION__:"], + vec![b"Sassafras", b"EpochConfig"], + vec![b"Sassafras", b"Authorities"], + vec![b"Sassafras", b"NextAuthorities"], vec![b"Balances", b":__STORAGE_VERSION__:"], vec![b"Balances", b"TotalIssuance"], vec![b"SubstrateTest", b":__STORAGE_VERSION__:"], @@ -926,29 +998,28 @@ pub mod storage_key_generator { let mut res = vec![ //SubstrateTest|:__STORAGE_VERSION__: "00771836bebdd29870ff246d305c578c4e7b9012096b41c4eb3aaf947f6ea429", - //SubstrateTest|Authorities + // SubstrateTest|Authorities "00771836bebdd29870ff246d305c578c5e0621c4869aa60c02be9adcc98a0d1d", - //Babe|:__STORAGE_VERSION__: + // Babe|:__STORAGE_VERSION__: "1cb6f36e027abb2091cfb5110ab5087f4e7b9012096b41c4eb3aaf947f6ea429", - //Babe|Authorities + // Babe|Authorities "1cb6f36e027abb2091cfb5110ab5087f5e0621c4869aa60c02be9adcc98a0d1d", - //Babe|SegmentIndex + // Babe|SegmentIndex "1cb6f36e027abb2091cfb5110ab5087f66e8f035c8adbe7f1547b43c51e6f8a4", - //Babe|NextAuthorities + // Babe|NextAuthorities "1cb6f36e027abb2091cfb5110ab5087faacf00b9b41fda7a9268821c2a2b3e4c", - //Babe|EpochConfig + // Babe|EpochConfig "1cb6f36e027abb2091cfb5110ab5087fdc6b171b77304263c292cc3ea5ed31ef", - //System|:__STORAGE_VERSION__: + // System|:__STORAGE_VERSION__: "26aa394eea5630e07c48ae0c9558cef74e7b9012096b41c4eb3aaf947f6ea429", - //System|UpgradedToU32RefCount + // System|UpgradedToU32RefCount "26aa394eea5630e07c48ae0c9558cef75684a022a34dd8bfa2baaf44f172b710", - //System|ParentHash + // System|ParentHash "26aa394eea5630e07c48ae0c9558cef78a42f33323cb5ced3b44dd825fda9fcc", - //System::BlockHash|0 + // System::BlockHash|0 "26aa394eea5630e07c48ae0c9558cef7a44704b568d21667356a5a050c118746bb1bdbcacd6ac9340000000000000000", - //System|UpgradedToTripleRefCount + // System|UpgradedToTripleRefCount "26aa394eea5630e07c48ae0c9558cef7a7fd6c28836b9a28522dc924110cf439", - // System|Account|blake2_128Concat("//11") "26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da901cae4e3edfbb32c91ed3f01ab964f4eeeab50338d8e5176d3141802d7b010a55dadcd5f23cf8aaafa724627e967e90e", // System|Account|blake2_128Concat("//4") @@ -993,6 +1064,14 @@ pub mod storage_key_generator { "3a636f6465", // :extrinsic_index "3a65787472696e7369635f696e646578", + // Sassafras|__STORAGE_VERSION__: + "be5e1f844c68e483aa815e45bbd9d3184e7b9012096b41c4eb3aaf947f6ea429", + // Sassafras|Authorities + "be5e1f844c68e483aa815e45bbd9d3185e0621c4869aa60c02be9adcc98a0d1d", + // Sassafras|NextAuthorities + "be5e1f844c68e483aa815e45bbd9d318aacf00b9b41fda7a9268821c2a2b3e4c", + // Sassafras|EpochConfig + "be5e1f844c68e483aa815e45bbd9d318dc6b171b77304263c292cc3ea5ed31ef", // Balances|:__STORAGE_VERSION__: "c2261276cc9d1f8598ea4b6a74b15c2f4e7b9012096b41c4eb3aaf947f6ea429", // Balances|TotalIssuance @@ -1034,6 +1113,32 @@ mod tests { prelude::*, runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, }; + fn babe_pre_digest() -> DigestItem { + use sp_consensus_babe::digests::{ + CompatibleDigestItem, PreDigest, SecondaryPlainPreDigest, + }; + DigestItem::babe_pre_digest(PreDigest::SecondaryPlain(SecondaryPlainPreDigest { + authority_index: 0, + slot: 0.into(), + })) + } + + fn sassafras_pre_digest() -> DigestItem { + use sp_consensus_sassafras::{ + digests::{CompatibleDigestItem, PreDigest}, + slot_claim_sign_data, AuthorityPair, + }; + use sp_core::crypto::{Pair, VrfSecret}; + let data = slot_claim_sign_data(&Default::default(), 0.into(), 0); + let vrf_signature = AuthorityPair::from_seed(&[0u8; 32]).as_ref().vrf_sign(&data); + DigestItem::sassafras_pre_digest(PreDigest { + authority_idx: 0, + slot: 0.into(), + vrf_signature, + ticket_claim: None, + }) + } + #[test] fn heap_pages_is_respected() { // This tests that the on-chain `HEAP_PAGES` parameter is respected. @@ -1053,7 +1158,8 @@ mod tests { // Create a block that sets the `:heap_pages` to 32 pages of memory which corresponds to // ~2048k of heap memory. let (new_at_hash, block) = { - let mut builder = client.new_block(Default::default()).unwrap(); + let digest = Digest { logs: vec![babe_pre_digest(), sassafras_pre_digest()] }; + let mut builder = client.new_block(digest).unwrap(); builder.push_storage_change(HEAP_PAGES.to_vec(), Some(32u64.encode())).unwrap(); let block = builder.build().unwrap().block; let hash = block.header.hash(); @@ -1235,7 +1341,7 @@ mod tests { #[test] fn build_minimal_genesis_config_works() { sp_tracing::try_init_simple(); - let default_minimal_json = r#"{"system":{"code":"0x"},"babe":{"authorities":[],"epochConfig":{"c": [ 3, 10 ],"allowed_slots":"PrimaryAndSecondaryPlainSlots"}},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; + let default_minimal_json = r#"{"system":{"code":"0x"},"babe":{"authorities":[],"epochConfig":{"c": [ 3, 10 ],"allowed_slots":"PrimaryAndSecondaryPlainSlots"}},"sassafras":{"authorities":[],"epochConfig":{"redundancy_factor": 1,"attempts_number": 32}},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; let mut t = BasicExternalities::new_empty(); executor_call(&mut t, "GenesisBuilder_build_config", &default_minimal_json.encode()) @@ -1278,6 +1384,15 @@ mod tests { "1cb6f36e027abb2091cfb5110ab5087f4e7b9012096b41c4eb3aaf947f6ea429", //SubstrateTest|:__STORAGE_VERSION__: "00771836bebdd29870ff246d305c578c4e7b9012096b41c4eb3aaf947f6ea429", + + // Sassafras|__STORAGE_VERSION__: + "be5e1f844c68e483aa815e45bbd9d3184e7b9012096b41c4eb3aaf947f6ea429", + // Sassafras|Authorities + "be5e1f844c68e483aa815e45bbd9d3185e0621c4869aa60c02be9adcc98a0d1d", + // Sassafras|NextAuthorities + "be5e1f844c68e483aa815e45bbd9d318aacf00b9b41fda7a9268821c2a2b3e4c", + // Sassafras|EpochConfig + "be5e1f844c68e483aa815e45bbd9d318dc6b171b77304263c292cc3ea5ed31ef", ].into_iter().map(String::from).collect::>(); expected.sort(); @@ -1292,7 +1407,7 @@ mod tests { let r = Vec::::decode(&mut &r[..]).unwrap(); let json = String::from_utf8(r.into()).expect("returned value is json. qed."); - let expected = r#"{"system":{"code":"0x"},"babe":{"authorities":[],"epochConfig":null},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; + let expected = r#"{"system":{"code":"0x"},"babe":{"authorities":[],"epochConfig":null},"sassafras":{"authorities":[],"epochConfig":{"redundancy_factor":0,"attempts_number":0}},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; assert_eq!(expected.to_string(), json); } diff --git a/test-utils/runtime/src/test_json/default_genesis_config.json b/test-utils/runtime/src/test_json/default_genesis_config.json index b0218d417daa5..1d322237c33fe 100644 --- a/test-utils/runtime/src/test_json/default_genesis_config.json +++ b/test-utils/runtime/src/test_json/default_genesis_config.json @@ -25,6 +25,17 @@ "allowed_slots": "PrimaryAndSecondaryPlainSlots" } }, + "sassafras": { + "authorities": [ + "KmTJSgAeSqH6VFSbfLuAGPtNfkXD5NQr2mqgomtyckpvfbRpn", + "KYW9snBs4hEMC2MFbXTWHjHVRt2Mov91h7mEsAEkQfMy9PD61", + "KXMr3GG4GkpFoQoDXUPEcPbSWePNzuPNtEPycdJ3yveZnm56G" + ], + "epochConfig": { + "redundancy_factor": 1, + "attempts_number": 32 + } + }, "substrateTest": { "authorities": [ "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", diff --git a/test-utils/runtime/src/test_json/default_genesis_config_incomplete.json b/test-utils/runtime/src/test_json/default_genesis_config_incomplete.json index e25730ee11cf0..4965136fd1ec0 100644 --- a/test-utils/runtime/src/test_json/default_genesis_config_incomplete.json +++ b/test-utils/runtime/src/test_json/default_genesis_config_incomplete.json @@ -18,6 +18,17 @@ "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" ] }, + "sassafras": { + "authorities": [ + "KmTJSgAeSqH6VFSbfLuAGPtNfkXD5NQr2mqgomtyckpvfbRpn", + "KYW9snBs4hEMC2MFbXTWHjHVRt2Mov91h7mEsAEkQfMy9PD61", + "KXMr3GG4GkpFoQoDXUPEcPbSWePNzuPNtEPycdJ3yveZnm56G" + ], + "epochConfig": { + "redundancy_factor": 1, + "attempts_number": 32 + } + }, "balances": { "balances": [ [ diff --git a/test-utils/runtime/src/test_json/default_genesis_config_invalid.json b/test-utils/runtime/src/test_json/default_genesis_config_invalid.json index 00550efaeec9f..ae0c9f986c29e 100644 --- a/test-utils/runtime/src/test_json/default_genesis_config_invalid.json +++ b/test-utils/runtime/src/test_json/default_genesis_config_invalid.json @@ -25,6 +25,17 @@ "allowed_slots": "PrimaryAndSecondaryPlainSlots" } }, + "sassafras": { + "authorities": [ + "KmTJSgAeSqH6VFSbfLuAGPtNfkXD5NQr2mqgomtyckpvfbRpn", + "KYW9snBs4hEMC2MFbXTWHjHVRt2Mov91h7mEsAEkQfMy9PD61", + "KXMr3GG4GkpFoQoDXUPEcPbSWePNzuPNtEPycdJ3yveZnm56G" + ], + "epochConfig": { + "redundancy_factor": 1, + "attempts_number": 32 + } + }, "substrateTest": { "authorities": [ "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY",