diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 669ece643..df147e046 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -132,6 +132,8 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} - name: Check TOML uses: dprint/check@v2.2 + - name: Build guests + run: make build-risc0 - name: Run lint run: | if ! make lint ; then @@ -163,6 +165,8 @@ jobs: uses: ./.github/actions/install-risc0 with: github_token: ${{ secrets.GITHUB_TOKEN }} + - name: Build guests + run: make build-risc0 - name: Run cargo-udeps env: RUSTFLAGS: -A warnings diff --git a/Cargo.lock b/Cargo.lock index a2ba5676f..9734bbd4d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1727,6 +1727,7 @@ dependencies = [ "ethereum-rpc", "hex", "jsonrpsee", + "lazy_static", "log", "log-panics", "proptest", @@ -1738,6 +1739,7 @@ dependencies = [ "reth-transaction-pool", "revm", "risc0", + "risc0-binfmt", "rs_merkle", "rustc_version_runtime", "secp256k1", @@ -1834,7 +1836,7 @@ dependencies = [ [[package]] name = "citrea-e2e" version = "0.1.0" -source = "git+https://github.com/chainwayxyz/citrea-e2e?rev=c3f27fc#c3f27fcc1e3cf828f151a9d564f9b667d4b1fe18" +source = "git+https://github.com/chainwayxyz/citrea-e2e?rev=423db61#423db6182000d4b94b121eed05256de1b4bd0a7c" dependencies = [ "anyhow", "async-trait", @@ -1948,6 +1950,7 @@ dependencies = [ "bincode", "borsh", "citrea-common", + "citrea-primitives", "hex", "jsonrpsee", "sequencer-client", @@ -7454,6 +7457,7 @@ dependencies = [ "anyhow", "async-trait", "citrea-common", + "derive_more", "jsonrpsee", "sov-db", "sov-ledger-rpc", diff --git a/Cargo.toml b/Cargo.toml index 02b55f20d..1094d657c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -111,6 +111,7 @@ tokio-util = { version = "0.7.12", features = ["rt"] } num_cpus = "1.0" # Risc0 dependencies +risc0-binfmt = { version = "1.1.3" } risc0-zkvm = { version = "1.1.3", default-features = false } risc0-zkvm-platform = { version = "1.1.3" } risc0-zkp = { version = "1.1.3" } @@ -155,7 +156,7 @@ tower-http = { version = "0.5.0", features = ["full"] } tower = { version = "0.4.13", features = ["full"] } hyper = { version = "1.4.0" } -citrea-e2e = { git = "https://github.com/chainwayxyz/citrea-e2e", rev = "c3f27fc" } +citrea-e2e = { git = "https://github.com/chainwayxyz/citrea-e2e", rev = "423db61" } [patch.crates-io] bitcoincore-rpc = { version = "0.18.0", git = "https://github.com/chainwayxyz/rust-bitcoincore-rpc.git", rev = "ca3cfa2" } diff --git a/Makefile b/Makefile index 31279ae6c..7517a7e23 100644 --- a/Makefile +++ b/Makefile @@ -9,11 +9,19 @@ PARALLEL_PROOF_LIMIT := 1 help: ## Display this help message @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) +.PHONY: build-risc0 +build-risc0: + $(MAKE) -j 2 -C guests/risc0 all + +.PHONY: build-sp1 +build-sp1: + $(MAKE) -C guests/sp1 all + .PHONY: build build: ## Build the project @cargo build -build-release: ## Build the project in release mode +build-release: build-risc0 build-sp1 ## Build the project in release mode @cargo build --release clean: ## Cleans compiled @@ -36,7 +44,7 @@ clean-all: clean clean-node clean-txs test-legacy: ## Runs test suite with output from tests printed @cargo test -- --nocapture -Zunstable-options --report-time -test: build $(EF_TESTS_DIR) ## Runs test suite using next test +test: build-risc0 build-sp1 build $(EF_TESTS_DIR) ## Runs test suite using next test RISC0_DEV_MODE=1 cargo nextest run --workspace --all-features --no-fail-fast $(filter-out $@,$(MAKECMDGOALS)) install-dev-tools: ## Installs all necessary cargo helpers diff --git a/bin/citrea/Cargo.toml b/bin/citrea/Cargo.toml index e26705b40..f48e1df0f 100644 --- a/bin/citrea/Cargo.toml +++ b/bin/citrea/Cargo.toml @@ -50,10 +50,12 @@ borsh = { workspace = true, features = ["bytes"] } clap = { workspace = true } hex = { workspace = true, optional = true } jsonrpsee = { workspace = true, features = ["http-client", "server"] } +lazy_static = { workspace = true } log-panics = { workspace = true } reth-primitives = { workspace = true } reth-rpc-types = { workspace = true } reth-transaction-pool = { workspace = true } +risc0-binfmt = { workspace = true } secp256k1 = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } @@ -98,8 +100,7 @@ sp1-helper = { version = "3.0.0", default-features = false } [features] default = [] # Deviate from convention by making the "native" feature active by default. This aligns with how this package is meant to be used (as a binary first, library second). - -bench = ["hex"] # "sov-risc0-adapter/bench", "risc0/bench"] +bench = ["hex"] [[bin]] name = "citrea" diff --git a/bin/citrea/src/guests.rs b/bin/citrea/src/guests.rs new file mode 100644 index 000000000..d17a205b7 --- /dev/null +++ b/bin/citrea/src/guests.rs @@ -0,0 +1,69 @@ +use std::collections::HashMap; + +use citrea_risc0_adapter::Digest; +use lazy_static::lazy_static; +use risc0_binfmt::compute_image_id; +use sov_rollup_interface::spec::SpecId; + +macro_rules! guest { + ($a:expr) => {{ + let code = include_bytes!($a).to_vec(); + let id = compute_image_id(&code).unwrap(); + + (id, code) + }}; +} + +lazy_static! { + /// The following 2 are used as latest guest builds for tests that use mock DA. + pub(crate) static ref BATCH_PROOF_LATEST_MOCK_GUESTS: HashMap)> = { + let mut m = HashMap::new(); + + m.insert(SpecId::Genesis, (Digest::new(citrea_risc0::BATCH_PROOF_MOCK_ID), citrea_risc0::BATCH_PROOF_MOCK_ELF.to_vec())); + m + }; + pub(crate) static ref LIGHT_CLIENT_LATEST_MOCK_GUESTS: HashMap)> = { + let mut m = HashMap::new(); + + m.insert(SpecId::Genesis, (Digest::new(citrea_risc0::LIGHT_CLIENT_PROOF_MOCK_ID), citrea_risc0::LIGHT_CLIENT_PROOF_MOCK_ELF.to_vec())); + m + }; + /// The following 2 are used as latest guest builds for tests that use Bitcoin DA. + pub(crate) static ref BATCH_PROOF_LATEST_BITCOIN_GUESTS: HashMap)> = { + let mut m = HashMap::new(); + + m.insert(SpecId::Genesis, (Digest::new(citrea_risc0::BATCH_PROOF_BITCOIN_ID), citrea_risc0::BATCH_PROOF_BITCOIN_ELF.to_vec())); + m + }; + pub(crate) static ref LIGHT_CLIENT_LATEST_BITCOIN_GUESTS: HashMap)> = { + let mut m = HashMap::new(); + + m.insert(SpecId::Genesis, (Digest::new(citrea_risc0::LIGHT_CLIENT_PROOF_BITCOIN_ID), citrea_risc0::LIGHT_CLIENT_PROOF_BITCOIN_ELF.to_vec())); + m + }; + /// Production guests + pub(crate) static ref BATCH_PROOF_MAINNET_GUESTS: HashMap)> = { + let mut m = HashMap::new(); + + m.insert(SpecId::Genesis, guest!("../../../resources/guests/risc0/mainnet/batch-0.elf")); + m + }; + pub(crate) static ref BATCH_PROOF_TESTNET_GUESTS: HashMap)> = { + let mut m = HashMap::new(); + + m.insert(SpecId::Genesis, guest!("../../../resources/guests/risc0/testnet/batch-0.elf")); + m + }; + pub(crate) static ref LIGHT_CLIENT_MAINNET_GUESTS: HashMap)> = { + let mut m = HashMap::new(); + + m.insert(SpecId::Genesis, guest!("../../../resources/guests/risc0/mainnet/light-0.elf")); + m + }; + pub(crate) static ref LIGHT_CLIENT_TESTNET_GUESTS: HashMap)> = { + let mut m = HashMap::new(); + + m.insert(SpecId::Genesis, guest!("../../../resources/guests/risc0/testnet/light-0.elf")); + m + }; +} diff --git a/bin/citrea/src/lib.rs b/bin/citrea/src/lib.rs index 95ed90893..1d018cf69 100644 --- a/bin/citrea/src/lib.rs +++ b/bin/citrea/src/lib.rs @@ -4,15 +4,38 @@ use std::env; use std::str::FromStr; +use serde::Serialize; +use sov_modules_rollup_blueprint::Network; use tracing::Level; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::{fmt, EnvFilter}; mod eth; +mod guests; mod rollup; pub use rollup::*; +/// The network currently running. +#[derive(clap::ValueEnum, Copy, Clone, Default, Debug, Serialize)] +#[serde(rename_all = "kebab-case")] +pub enum NetworkArg { + /// Mainnet + #[default] + Mainnet, + /// Testnet + Testnet, +} + +impl From for Network { + fn from(value: NetworkArg) -> Self { + match value { + NetworkArg::Mainnet => Network::Mainnet, + NetworkArg::Testnet => Network::Testnet, + } + } +} + /// Default initialization of logging pub fn initialize_logging(level: Level) { let env_filter = EnvFilter::from_str(&env::var("RUST_LOG").unwrap_or_else(|_| { diff --git a/bin/citrea/src/main.rs b/bin/citrea/src/main.rs index 153836899..01867b8f3 100644 --- a/bin/citrea/src/main.rs +++ b/bin/citrea/src/main.rs @@ -2,7 +2,9 @@ use core::fmt::Debug as DebugTrait; use anyhow::Context as _; use bitcoin_da::service::BitcoinServiceConfig; -use citrea::{initialize_logging, BitcoinRollup, CitreaRollupBlueprint, MockDemoRollup}; +use citrea::{ + initialize_logging, BitcoinRollup, CitreaRollupBlueprint, MockDemoRollup, NetworkArg, +}; use citrea_common::{ from_toml_path, BatchProverConfig, FromEnv, FullNodeConfig, LightClientProverConfig, SequencerConfig, @@ -11,9 +13,9 @@ use citrea_stf::genesis_config::GenesisPaths; use clap::Parser; use sov_mock_da::MockDaConfig; use sov_modules_api::Spec; -use sov_modules_rollup_blueprint::RollupBlueprint; +use sov_modules_rollup_blueprint::{Network, RollupBlueprint}; use sov_state::storage::NativeStorage; -use tracing::{error, instrument}; +use tracing::{error, info, instrument}; #[cfg(test)] mod test_rpc; @@ -23,6 +25,20 @@ mod test_rpc; #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { + /// The mode in which the node runs. + /// This determines which guest code to use. + /// Default is Mainnet. + #[clap(short, long, default_value_t, value_enum)] + network: NetworkArg, + + /// Override network to run testnet directly. + #[arg(long)] + testnet: bool, + + /// Run the development chain + #[arg(long, conflicts_with_all = ["testnet"])] + dev: bool, + /// Path to the genesis configuration. /// Defines the genesis of module states like evm. #[arg(long)] @@ -131,9 +147,19 @@ async fn main() -> Result<(), anyhow::Error> { )); } + let mut network = args.network.into(); + if args.testnet { + network = Network::Testnet; + } else if args.dev { + network = Network::Nightly; + } + + info!("Starting node on {network}"); + match args.da_layer { SupportedDaLayer::Mock => { start_rollup::( + network, &GenesisPaths::from_dir(&args.genesis_paths), args.rollup_config_path, batch_prover_config, @@ -144,6 +170,7 @@ async fn main() -> Result<(), anyhow::Error> { } SupportedDaLayer::Bitcoin => { start_rollup::( + network, &GenesisPaths::from_dir(&args.genesis_paths), args.rollup_config_path, batch_prover_config, @@ -159,6 +186,7 @@ async fn main() -> Result<(), anyhow::Error> { #[instrument(level = "trace", skip_all, err)] async fn start_rollup( + network: Network, rt_genesis_paths: &<::NativeRuntime as sov_modules_stf_blueprint::Runtime< ::NativeContext, ::DaSpec, @@ -179,7 +207,7 @@ where None => FullNodeConfig::from_env() .context("Failed to read rollup configuration from the environment")?, }; - let rollup_blueprint = S::new(); + let rollup_blueprint = S::new(network); if let Some(sequencer_config) = sequencer_config { let sequencer_rollup = rollup_blueprint diff --git a/bin/citrea/src/rollup/bitcoin.rs b/bin/citrea/src/rollup/bitcoin.rs index 6a3a4c0ef..4b4b70e07 100644 --- a/bin/citrea/src/rollup/bitcoin.rs +++ b/bin/citrea/src/rollup/bitcoin.rs @@ -8,10 +8,9 @@ use bitcoin_da::spec::{BitcoinSpec, RollupParams}; use bitcoin_da::verifier::BitcoinVerifier; use citrea_common::rpc::register_healthcheck_rpc; use citrea_common::tasks::manager::TaskManager; -use citrea_common::{BatchProverConfig, FullNodeConfig, LightClientProverConfig}; +use citrea_common::FullNodeConfig; use citrea_primitives::{TO_BATCH_PROOF_PREFIX, TO_LIGHT_CLIENT_PREFIX}; use citrea_risc0_adapter::host::Risc0BonsaiHost; -use citrea_risc0_adapter::Digest; // use citrea_sp1::host::SP1Host; use citrea_stf::genesis_config::StorageConfig; use citrea_stf::runtime::Runtime; @@ -19,24 +18,28 @@ use citrea_stf::verifier::StateTransitionVerifier; use prover_services::{ParallelProverService, ProofGenMode}; use sov_db::ledger_db::LedgerDB; use sov_modules_api::default_context::{DefaultContext, ZkDefaultContext}; -use sov_modules_api::{Address, Spec}; +use sov_modules_api::{Address, Spec, SpecId, Zkvm}; use sov_modules_rollup_blueprint::RollupBlueprint; use sov_modules_stf_blueprint::StfBlueprint; use sov_prover_storage_manager::ProverStorageManager; use sov_rollup_interface::da::DaVerifier; use sov_rollup_interface::services::da::SenderWithNotifier; -use sov_rollup_interface::spec::SpecId; -use sov_rollup_interface::zk::Zkvm; use sov_state::ZkStorage; use sov_stf_runner::ProverGuestRunConfig; use tokio::sync::broadcast; use tokio::sync::mpsc::unbounded_channel; use tracing::instrument; -use crate::CitreaRollupBlueprint; +use crate::guests::{ + BATCH_PROOF_LATEST_BITCOIN_GUESTS, BATCH_PROOF_MAINNET_GUESTS, BATCH_PROOF_TESTNET_GUESTS, + LIGHT_CLIENT_LATEST_BITCOIN_GUESTS, LIGHT_CLIENT_MAINNET_GUESTS, LIGHT_CLIENT_TESTNET_GUESTS, +}; +use crate::{CitreaRollupBlueprint, Network}; /// Rollup with BitcoinDa -pub struct BitcoinRollup {} +pub struct BitcoinRollup { + network: Network, +} impl CitreaRollupBlueprint for BitcoinRollup {} @@ -45,7 +48,8 @@ impl RollupBlueprint for BitcoinRollup { type DaService = BitcoinService; type DaSpec = BitcoinSpec; type DaConfig = BitcoinServiceConfig; - type Vm = Risc0BonsaiHost<'static>; + type DaVerifier = BitcoinVerifier; + type Vm = Risc0BonsaiHost; type ZkContext = ZkDefaultContext; type NativeContext = DefaultContext; @@ -60,8 +64,8 @@ impl RollupBlueprint for BitcoinRollup { StfBlueprint, >; - fn new() -> Self { - Self {} + fn new(network: Network) -> Self { + Self { network } } #[instrument(level = "trace", skip_all, err)] @@ -100,25 +104,6 @@ impl RollupBlueprint for BitcoinRollup { Ok(rpc_methods) } - #[instrument(level = "trace", skip(self), ret)] - fn get_batch_prover_code_commitments_by_spec( - &self, - ) -> HashMap::CodeCommitment> { - let mut map = HashMap::new(); - map.insert( - SpecId::Genesis, - Digest::new(citrea_risc0::BATCH_PROOF_BITCOIN_ID), - ); - // let (_, vk) = citrea_sp1::host::CLIENT.setup(include_bytes!("../guests/sp1/batch-prover-bitcoin/elf/zkvm-elf")); - // map.insert(SpecId::Genesis, vk); - map - } - - #[instrument(level = "trace", skip(self), ret)] - fn get_light_client_prover_code_commitment(&self) -> ::CodeCommitment { - Digest::new(citrea_risc0::LIGHT_CLIENT_PROOF_BITCOIN_ID) - } - #[instrument(level = "trace", skip_all, err)] fn create_storage_manager( &self, @@ -175,15 +160,94 @@ impl RollupBlueprint for BitcoinRollup { Ok(service) } + fn create_da_verifier(&self) -> Self::DaVerifier { + BitcoinVerifier::new(RollupParams { + to_light_client_prefix: TO_LIGHT_CLIENT_PREFIX.to_vec(), + to_batch_proof_prefix: TO_BATCH_PROOF_PREFIX.to_vec(), + }) + } + + fn get_batch_proof_elfs(&self) -> HashMap> { + match self.network { + Network::Mainnet => BATCH_PROOF_MAINNET_GUESTS + .iter() + .map(|(k, (_, code))| (*k, code.clone())) + .collect(), + Network::Testnet => BATCH_PROOF_TESTNET_GUESTS + .iter() + .map(|(k, (_, code))| (*k, code.clone())) + .collect(), + Network::Nightly => BATCH_PROOF_LATEST_BITCOIN_GUESTS + .iter() + .map(|(k, (_, code))| (*k, code.clone())) + .collect(), + } + } + + fn get_light_client_elfs(&self) -> HashMap> { + match self.network { + Network::Mainnet => LIGHT_CLIENT_MAINNET_GUESTS + .iter() + .map(|(k, (_, code))| (*k, code.clone())) + .collect(), + Network::Testnet => LIGHT_CLIENT_TESTNET_GUESTS + .iter() + .map(|(k, (_, code))| (*k, code.clone())) + .collect(), + Network::Nightly => LIGHT_CLIENT_LATEST_BITCOIN_GUESTS + .iter() + .map(|(k, (_, code))| (*k, code.clone())) + .collect(), + } + } + + fn get_batch_proof_code_commitments( + &self, + ) -> HashMap::CodeCommitment> { + match self.network { + Network::Mainnet => BATCH_PROOF_MAINNET_GUESTS + .iter() + .map(|(k, (id, _))| (*k, *id)) + .collect(), + Network::Testnet => BATCH_PROOF_TESTNET_GUESTS + .iter() + .map(|(k, (id, _))| (*k, *id)) + .collect(), + Network::Nightly => BATCH_PROOF_LATEST_BITCOIN_GUESTS + .iter() + .map(|(k, (id, _))| (*k, *id)) + .collect(), + } + } + + fn get_light_client_proof_code_commitment( + &self, + ) -> HashMap::CodeCommitment> { + match self.network { + Network::Mainnet => LIGHT_CLIENT_MAINNET_GUESTS + .iter() + .map(|(k, (id, _))| (*k, *id)) + .collect(), + Network::Testnet => LIGHT_CLIENT_TESTNET_GUESTS + .iter() + .map(|(k, (id, _))| (*k, *id)) + .collect(), + Network::Nightly => LIGHT_CLIENT_LATEST_BITCOIN_GUESTS + .iter() + .map(|(k, (id, _))| (*k, *id)) + .collect(), + } + } + #[instrument(level = "trace", skip_all)] - async fn create_batch_prover_service( + async fn create_prover_service( &self, - prover_config: BatchProverConfig, - _rollup_config: &FullNodeConfig, + proving_mode: ProverGuestRunConfig, da_service: &Arc, + da_verifier: Self::DaVerifier, ledger_db: LedgerDB, ) -> Self::ProverService { - let vm = Risc0BonsaiHost::new(citrea_risc0::BATCH_PROOF_BITCOIN_ELF, ledger_db.clone()); + let vm = Risc0BonsaiHost::new(ledger_db.clone()); // let vm = SP1Host::new( // include_bytes!("../guests/sp1/batch-prover-bitcoin/elf/zkvm-elf"), // ledger_db.clone(), @@ -192,12 +256,7 @@ impl RollupBlueprint for BitcoinRollup { let zk_stf = StfBlueprint::new(); let zk_storage = ZkStorage::new(); - let da_verifier = BitcoinVerifier::new(RollupParams { - to_light_client_prefix: TO_LIGHT_CLIENT_PREFIX.to_vec(), - to_batch_proof_prefix: TO_BATCH_PROOF_PREFIX.to_vec(), - }); - - let proof_mode = match prover_config.proving_mode { + let proof_mode = match proving_mode { ProverGuestRunConfig::Skip => ProofGenMode::Skip, ProverGuestRunConfig::Simulate => { let stf_verifier = StateTransitionVerifier::new(zk_stf, da_verifier); @@ -216,38 +275,4 @@ impl RollupBlueprint for BitcoinRollup { ) .expect("Should be able to instantiate prover service") } - - #[instrument(level = "trace", skip_all)] - async fn create_light_client_prover_service( - &self, - prover_config: LightClientProverConfig, - _rollup_config: &FullNodeConfig, - da_service: &Arc, - ledger_db: LedgerDB, - ) -> Self::ProverService { - let vm = Risc0BonsaiHost::new( - citrea_risc0::LIGHT_CLIENT_PROOF_BITCOIN_ELF, - ledger_db.clone(), - ); - let zk_stf = StfBlueprint::new(); - let zk_storage = ZkStorage::new(); - - let da_verifier = BitcoinVerifier::new(RollupParams { - to_light_client_prefix: TO_LIGHT_CLIENT_PREFIX.to_vec(), - to_batch_proof_prefix: TO_BATCH_PROOF_PREFIX.to_vec(), - }); - - let proof_mode = match prover_config.proving_mode { - ProverGuestRunConfig::Skip => ProofGenMode::Skip, - ProverGuestRunConfig::Simulate => { - let stf_verifier = StateTransitionVerifier::new(zk_stf, da_verifier); - ProofGenMode::Simulate(stf_verifier) - } - ProverGuestRunConfig::Execute => ProofGenMode::Execute, - ProverGuestRunConfig::Prove => ProofGenMode::Prove, - }; - - ParallelProverService::new(da_service.clone(), vm, proof_mode, zk_storage, 1, ledger_db) - .expect("Should be able to instantiate prover service") - } } diff --git a/bin/citrea/src/rollup/mock.rs b/bin/citrea/src/rollup/mock.rs index b87adae9f..72f128679 100644 --- a/bin/citrea/src/rollup/mock.rs +++ b/bin/citrea/src/rollup/mock.rs @@ -4,31 +4,31 @@ use std::sync::Arc; use async_trait::async_trait; use citrea_common::rpc::register_healthcheck_rpc; use citrea_common::tasks::manager::TaskManager; -use citrea_common::{BatchProverConfig, FullNodeConfig, LightClientProverConfig}; +use citrea_common::FullNodeConfig; // use citrea_sp1::host::SP1Host; use citrea_risc0_adapter::host::Risc0BonsaiHost; -use citrea_risc0_adapter::Digest; use citrea_stf::genesis_config::StorageConfig; use citrea_stf::runtime::Runtime; use citrea_stf::verifier::StateTransitionVerifier; use prover_services::{ParallelProverService, ProofGenMode}; use sov_db::ledger_db::LedgerDB; -use sov_mock_da::{MockDaConfig, MockDaService, MockDaSpec}; +use sov_mock_da::{MockDaConfig, MockDaService, MockDaSpec, MockDaVerifier}; use sov_modules_api::default_context::{DefaultContext, ZkDefaultContext}; -use sov_modules_api::{Address, Spec}; +use sov_modules_api::{Address, Spec, SpecId, Zkvm}; use sov_modules_rollup_blueprint::RollupBlueprint; use sov_modules_stf_blueprint::StfBlueprint; use sov_prover_storage_manager::ProverStorageManager; -use sov_rollup_interface::spec::SpecId; -use sov_rollup_interface::zk::Zkvm; use sov_state::ZkStorage; use sov_stf_runner::ProverGuestRunConfig; use tokio::sync::broadcast; -use crate::CitreaRollupBlueprint; +use crate::guests::{BATCH_PROOF_LATEST_MOCK_GUESTS, LIGHT_CLIENT_LATEST_MOCK_GUESTS}; +use crate::{CitreaRollupBlueprint, Network}; /// Rollup with MockDa -pub struct MockDemoRollup {} +pub struct MockDemoRollup { + network: Network, +} impl CitreaRollupBlueprint for MockDemoRollup {} @@ -37,24 +37,21 @@ impl RollupBlueprint for MockDemoRollup { type DaService = MockDaService; type DaSpec = MockDaSpec; type DaConfig = MockDaConfig; - type Vm = Risc0BonsaiHost<'static>; - + type DaVerifier = MockDaVerifier; + type Vm = Risc0BonsaiHost; type ZkContext = ZkDefaultContext; type NativeContext = DefaultContext; - type StorageManager = ProverStorageManager; - type ZkRuntime = Runtime; type NativeRuntime = Runtime; - type ProverService = ParallelProverService< Self::DaService, Self::Vm, StfBlueprint, >; - fn new() -> Self { - Self {} + fn new(network: Network) -> Self { + Self { network } } fn create_rpc_methods( @@ -88,21 +85,6 @@ impl RollupBlueprint for MockDemoRollup { Ok(rpc_methods) } - fn get_batch_prover_code_commitments_by_spec( - &self, - ) -> HashMap::CodeCommitment> { - let mut map = HashMap::new(); - map.insert( - SpecId::Genesis, - Digest::new(citrea_risc0::BATCH_PROOF_MOCK_ID), - ); - map - } - - fn get_light_client_prover_code_commitment(&self) -> ::CodeCommitment { - Digest::new(citrea_risc0::LIGHT_CLIENT_PROOF_MOCK_ID) - } - async fn create_da_service( &self, rollup_config: &FullNodeConfig, @@ -115,46 +97,95 @@ impl RollupBlueprint for MockDemoRollup { ))) } - async fn create_batch_prover_service( - &self, - prover_config: BatchProverConfig, - _rollup_config: &FullNodeConfig, - da_service: &Arc, - ledger_db: LedgerDB, - ) -> Self::ProverService { - let vm = Risc0BonsaiHost::new(citrea_risc0::BATCH_PROOF_MOCK_ELF, ledger_db.clone()); + fn create_da_verifier(&self) -> Self::DaVerifier { + Default::default() + } - let zk_stf = StfBlueprint::new(); - let zk_storage = ZkStorage::new(); - let da_verifier = Default::default(); + fn get_batch_proof_elfs(&self) -> HashMap> { + match self.network { + Network::Mainnet => BATCH_PROOF_LATEST_MOCK_GUESTS + .iter() + .map(|(k, (_, code))| (*k, code.clone())) + .collect(), + Network::Testnet => BATCH_PROOF_LATEST_MOCK_GUESTS + .iter() + .map(|(k, (_, code))| (*k, code.clone())) + .collect(), + Network::Nightly => BATCH_PROOF_LATEST_MOCK_GUESTS + .iter() + .map(|(k, (_, code))| (*k, code.clone())) + .collect(), + } + } - let proof_mode = match prover_config.proving_mode { - ProverGuestRunConfig::Skip => ProofGenMode::Skip, - ProverGuestRunConfig::Simulate => { - let stf_verifier = StateTransitionVerifier::new(zk_stf, da_verifier); - ProofGenMode::Simulate(stf_verifier) - } - ProverGuestRunConfig::Execute => ProofGenMode::Execute, - ProverGuestRunConfig::Prove => ProofGenMode::Prove, - }; + fn get_light_client_elfs(&self) -> HashMap> { + match self.network { + Network::Mainnet => LIGHT_CLIENT_LATEST_MOCK_GUESTS + .iter() + .map(|(k, (_, code))| (*k, code.clone())) + .collect(), + Network::Testnet => LIGHT_CLIENT_LATEST_MOCK_GUESTS + .iter() + .map(|(k, (_, code))| (*k, code.clone())) + .collect(), + Network::Nightly => LIGHT_CLIENT_LATEST_MOCK_GUESTS + .iter() + .map(|(k, (_, code))| (*k, code.clone())) + .collect(), + } + } - ParallelProverService::new(da_service.clone(), vm, proof_mode, zk_storage, 1, ledger_db) - .expect("Should be able to instantiate prover service") + fn get_batch_proof_code_commitments( + &self, + ) -> HashMap::CodeCommitment> { + match self.network { + Network::Mainnet => BATCH_PROOF_LATEST_MOCK_GUESTS + .iter() + .map(|(k, (id, _))| (*k, *id)) + .collect(), + Network::Testnet => BATCH_PROOF_LATEST_MOCK_GUESTS + .iter() + .map(|(k, (id, _))| (*k, *id)) + .collect(), + Network::Nightly => BATCH_PROOF_LATEST_MOCK_GUESTS + .iter() + .map(|(k, (id, _))| (*k, *id)) + .collect(), + } } - async fn create_light_client_prover_service( + fn get_light_client_proof_code_commitment( &self, - prover_config: LightClientProverConfig, - _rollup_config: &FullNodeConfig, + ) -> HashMap::CodeCommitment> { + match self.network { + Network::Mainnet => LIGHT_CLIENT_LATEST_MOCK_GUESTS + .iter() + .map(|(k, (id, _))| (*k, *id)) + .collect(), + Network::Testnet => LIGHT_CLIENT_LATEST_MOCK_GUESTS + .iter() + .map(|(k, (id, _))| (*k, *id)) + .collect(), + Network::Nightly => LIGHT_CLIENT_LATEST_MOCK_GUESTS + .iter() + .map(|(k, (id, _))| (*k, *id)) + .collect(), + } + } + + async fn create_prover_service( + &self, + proving_mode: ProverGuestRunConfig, da_service: &Arc, + da_verifier: Self::DaVerifier, ledger_db: LedgerDB, ) -> Self::ProverService { - let vm = Risc0BonsaiHost::new(citrea_risc0::LIGHT_CLIENT_PROOF_MOCK_ELF, ledger_db.clone()); + let vm = Risc0BonsaiHost::new(ledger_db.clone()); + let zk_stf = StfBlueprint::new(); let zk_storage = ZkStorage::new(); - let da_verifier = Default::default(); - let proof_mode = match prover_config.proving_mode { + let proof_mode = match proving_mode { ProverGuestRunConfig::Skip => ProofGenMode::Skip, ProverGuestRunConfig::Simulate => { let stf_verifier = StateTransitionVerifier::new(zk_stf, da_verifier); diff --git a/bin/citrea/src/rollup/mod.rs b/bin/citrea/src/rollup/mod.rs index bdaef22f6..a1f1a69d6 100644 --- a/bin/citrea/src/rollup/mod.rs +++ b/bin/citrea/src/rollup/mod.rs @@ -224,7 +224,7 @@ pub trait CitreaRollupBlueprint: RollupBlueprint { } }; - let code_commitments_by_spec = self.get_batch_prover_code_commitments_by_spec(); + let code_commitments_by_spec = self.get_batch_proof_code_commitments(); let current_l2_height = ledger_db .get_head_soft_confirmation() @@ -275,6 +275,8 @@ pub trait CitreaRollupBlueprint: RollupBlueprint { .create_da_service(&rollup_config, true, &mut task_manager) .await?; + let da_verifier = self.create_da_verifier(); + // Migrate before constructing ledger_db instance so that no lock is present. let migrator = LedgerDBMigrator::new( rollup_config.storage.path.as_path(), @@ -289,10 +291,10 @@ pub trait CitreaRollupBlueprint: RollupBlueprint { let ledger_db = self.create_ledger_db(&rocksdb_config); let prover_service = self - .create_batch_prover_service( - prover_config.clone(), - &rollup_config, + .create_prover_service( + prover_config.proving_mode, &da_service, + da_verifier, ledger_db.clone(), ) .await; @@ -348,7 +350,8 @@ pub trait CitreaRollupBlueprint: RollupBlueprint { } }; - let code_commitments_by_spec = self.get_batch_prover_code_commitments_by_spec(); + let code_commitments_by_spec = self.get_batch_proof_code_commitments(); + let elfs_by_spec = self.get_batch_proof_elfs(); let current_l2_height = ledger_db .get_head_soft_confirmation() @@ -371,6 +374,7 @@ pub trait CitreaRollupBlueprint: RollupBlueprint { Arc::new(prover_service), prover_config, code_commitments_by_spec, + elfs_by_spec, fork_manager, soft_confirmation_tx, task_manager, @@ -403,6 +407,7 @@ pub trait CitreaRollupBlueprint: RollupBlueprint { let da_service = self .create_da_service(&rollup_config, true, &mut task_manager) .await?; + let da_verifier = self.create_da_verifier(); let rocksdb_config = RocksdbConfig::new( rollup_config.storage.path.as_path(), @@ -411,10 +416,10 @@ pub trait CitreaRollupBlueprint: RollupBlueprint { let ledger_db = self.create_ledger_db(&rocksdb_config); let prover_service = self - .create_light_client_prover_service( - prover_config.clone(), - &rollup_config, + .create_prover_service( + prover_config.proving_mode, &da_service, + da_verifier, ledger_db.clone(), ) .await; @@ -436,9 +441,9 @@ pub trait CitreaRollupBlueprint: RollupBlueprint { None, )?; - let batch_prover_code_commitments_by_spec = - self.get_batch_prover_code_commitments_by_spec(); - let light_client_prover_code_commitment = self.get_light_client_prover_code_commitment(); + let batch_prover_code_commitments_by_spec = self.get_batch_proof_code_commitments(); + let light_client_prover_code_commitment = self.get_light_client_proof_code_commitment(); + let light_client_prover_elfs = self.get_light_client_elfs(); let current_l2_height = ledger_db .get_head_soft_confirmation() @@ -459,6 +464,7 @@ pub trait CitreaRollupBlueprint: RollupBlueprint { prover_config, batch_prover_code_commitments_by_spec, light_client_prover_code_commitment, + light_client_prover_elfs, task_manager, )?; diff --git a/bin/citrea/tests/test_helpers/mod.rs b/bin/citrea/tests/test_helpers/mod.rs index 97c1ae266..77816f30b 100644 --- a/bin/citrea/tests/test_helpers/mod.rs +++ b/bin/citrea/tests/test_helpers/mod.rs @@ -14,6 +14,7 @@ use citrea_stf::genesis_config::GenesisPaths; use sov_mock_da::{MockAddress, MockBlock, MockDaConfig, MockDaService}; use sov_modules_api::default_signature::private_key::DefaultPrivateKey; use sov_modules_api::PrivateKey; +use sov_modules_rollup_blueprint::{Network, RollupBlueprint as _}; use sov_rollup_interface::da::{BlobReaderTrait, DaData, SequencerCommitment}; use sov_rollup_interface::services::da::{DaService, SlotData}; use sov_rollup_interface::zk::Proof; @@ -48,7 +49,7 @@ pub async fn start_rollup( // Fake receipts are receipts without the proof, they only include the journal, which makes them suitable for testing and development std::env::set_var("RISC0_DEV_MODE", "1"); - let mock_demo_rollup = MockDemoRollup {}; + let mock_demo_rollup = MockDemoRollup::new(Network::Testnet); if sequencer_config.is_some() && rollup_prover_config.is_some() { panic!("Both sequencer and batch prover config cannot be set at the same time"); diff --git a/crates/batch-prover/src/da_block_handler.rs b/crates/batch-prover/src/da_block_handler.rs index dcbd42ef6..9c6b93659 100644 --- a/crates/batch-prover/src/da_block_handler.rs +++ b/crates/batch-prover/src/da_block_handler.rs @@ -11,12 +11,14 @@ use citrea_common::da::get_da_block_at_height; use citrea_common::utils::merge_state_diffs; use citrea_common::BatchProverConfig; use citrea_primitives::compression::compress_blob; +use citrea_primitives::forks::FORKS; use citrea_primitives::MAX_TXBODY_SIZE; use rand::Rng; use serde::de::DeserializeOwned; use serde::Serialize; use sov_db::ledger_db::BatchProverLedgerOps; use sov_db::schema::types::{BatchNumber, SlotNumber}; +use sov_modules_api::fork::fork_from_block_number; use sov_modules_api::{DaSpec, StateDiff, Zkvm}; use sov_rollup_interface::da::{BlockHeaderTrait, SequencerCommitment}; use sov_rollup_interface::services::da::{DaService, SlotData}; @@ -61,6 +63,7 @@ where sequencer_pub_key: Vec, sequencer_da_pub_key: Vec, code_commitments_by_spec: HashMap, + elfs_by_spec: HashMap>, l1_block_cache: Arc>>, skip_submission_until_l1: u64, pending_l1_blocks: VecDeque<::FilteredBlock>, @@ -94,6 +97,7 @@ where sequencer_pub_key: Vec, sequencer_da_pub_key: Vec, code_commitments_by_spec: HashMap, + elfs_by_spec: HashMap>, skip_submission_until_l1: u64, l1_block_cache: Arc>>, ) -> Self { @@ -105,6 +109,7 @@ where sequencer_pub_key, sequencer_da_pub_key, code_commitments_by_spec, + elfs_by_spec, skip_submission_until_l1, l1_block_cache, pending_l1_blocks: VecDeque::new(), @@ -244,6 +249,7 @@ where self.prover_service.clone(), self.ledger_db.clone(), self.code_commitments_by_spec.clone(), + self.elfs_by_spec.clone(), l1_block.clone(), sequencer_commitments, inputs, @@ -428,6 +434,13 @@ pub(crate) fn break_sequencer_commitments_into_groups( ) -> anyhow::Result>> { let mut result_range = vec![]; + // This assumes that sequencer commitments are sorted. + let first_block_number = sequencer_commitments + .first() + .ok_or(anyhow!("No Sequencer commitments found"))? + .l2_start_block_number; + let mut current_spec = fork_from_block_number(FORKS, first_block_number).spec_id; + let mut range = 0usize..=0usize; let mut cumulative_state_diff = StateDiff::new(); for (index, sequencer_commitment) in sequencer_commitments.iter().enumerate() { @@ -456,14 +469,15 @@ pub(crate) fn break_sequencer_commitments_into_groups( // Threshold is checked by comparing compressed state diff size as the data will be compressed before it is written on DA let state_diff_threshold_reached = compressed_state_diff.len() > MAX_TXBODY_SIZE; - if state_diff_threshold_reached { - // We've exceeded the limit with the current commitments - // so we have to stop at the previous one. - result_range.push(range); + let commitment_spec = + fork_from_block_number(FORKS, sequencer_commitment.l2_end_block_number).spec_id; + if commitment_spec != current_spec || state_diff_threshold_reached { + result_range.push(range); // Reset the cumulative state diff to be equal to the current commitment state diff cumulative_state_diff = sequencer_commitment_state_diff; range = index..=index; + current_spec = commitment_spec } else { range = *range.start()..=index; } diff --git a/crates/batch-prover/src/proving.rs b/crates/batch-prover/src/proving.rs index d42a4a6f9..438e3af20 100644 --- a/crates/batch-prover/src/proving.rs +++ b/crates/batch-prover/src/proving.rs @@ -187,6 +187,7 @@ pub(crate) async fn prove_l1( prover_service: Arc, ledger: DB, code_commitments_by_spec: HashMap, + elfs_by_spec: HashMap>, l1_block: Da::FilteredBlock, sequencer_commitments: Vec, inputs: Vec>, @@ -221,8 +222,18 @@ where } } + let last_l2_height = sequencer_commitments + .last() + .expect("Should have at least 1 commitment") + .l2_end_block_number; + let current_spec = fork_from_block_number(FORKS, last_l2_height).spec_id; + let elf = elfs_by_spec + .get(¤t_spec) + .expect("Every fork should have an elf attached") + .clone(); + // Prove all proofs in parallel - let proofs = prover_service.prove().await?; + let proofs = prover_service.prove(elf).await?; let txs_and_proofs = prover_service.submit_proofs(proofs).await?; @@ -298,13 +309,15 @@ where >(&proof) .expect("Proof should be deserializable"); - info!("Verifying proof!"); - let last_active_spec_id = fork_from_block_number(FORKS, circuit_output.last_l2_height).spec_id; + let code_commitment = code_commitments_by_spec .get(&last_active_spec_id) .expect("Proof public input must contain valid spec id"); + + info!("Verifying proof with image ID: {:?}", code_commitment); + Vm::verify(proof.as_slice(), code_commitment) .map_err(|err| anyhow!("Failed to verify proof: {:?}. Skipping it...", err))?; diff --git a/crates/batch-prover/src/rpc.rs b/crates/batch-prover/src/rpc.rs index 4bacb4cc9..f949bef6c 100644 --- a/crates/batch-prover/src/rpc.rs +++ b/crates/batch-prover/src/rpc.rs @@ -52,6 +52,7 @@ where pub sequencer_pub_key: Vec, pub l1_block_cache: Arc>>, pub code_commitments_by_spec: HashMap, + pub elfs_by_spec: HashMap>, pub(crate) phantom_c: PhantomData C>, pub(crate) phantom_vm: PhantomData Vm>, pub(crate) phantom_sr: PhantomData StateRoot>, @@ -238,6 +239,7 @@ where self.context.prover_service.clone(), self.context.ledger.clone(), self.context.code_commitments_by_spec.clone(), + self.context.elfs_by_spec.clone(), l1_block, sequencer_commitments, inputs, diff --git a/crates/batch-prover/src/runner.rs b/crates/batch-prover/src/runner.rs index 8c5200080..dacc2f276 100644 --- a/crates/batch-prover/src/runner.rs +++ b/crates/batch-prover/src/runner.rs @@ -64,6 +64,7 @@ where phantom: std::marker::PhantomData, prover_config: BatchProverConfig, code_commitments_by_spec: HashMap, + elfs_by_spec: HashMap>, l1_block_cache: Arc>>, sync_blocks_count: u64, fork_manager: ForkManager, @@ -103,6 +104,7 @@ where prover_service: Arc, prover_config: BatchProverConfig, code_commitments_by_spec: HashMap, + elfs_by_spec: HashMap>, fork_manager: ForkManager, soft_confirmation_tx: broadcast::Sender, task_manager: TaskManager<()>, @@ -150,6 +152,7 @@ where phantom: std::marker::PhantomData, prover_config, code_commitments_by_spec, + elfs_by_spec, l1_block_cache: Arc::new(Mutex::new(L1BlockCache::new())), sync_blocks_count: runner_config.sync_blocks_count, fork_manager, @@ -171,6 +174,7 @@ where l1_block_cache: self.l1_block_cache.clone(), prover_service: self.prover_service.clone(), code_commitments_by_spec: self.code_commitments_by_spec.clone(), + elfs_by_spec: self.elfs_by_spec.clone(), phantom_c: std::marker::PhantomData, phantom_vm: std::marker::PhantomData, phantom_sr: std::marker::PhantomData, @@ -279,6 +283,7 @@ where let sequencer_pub_key = self.sequencer_pub_key.clone(); let sequencer_da_pub_key = self.sequencer_da_pub_key.clone(); let code_commitments_by_spec = self.code_commitments_by_spec.clone(); + let elfs_by_spec = self.elfs_by_spec.clone(); let l1_block_cache = self.l1_block_cache.clone(); self.task_manager.spawn(|cancellation_token| async move { @@ -298,6 +303,7 @@ where sequencer_pub_key, sequencer_da_pub_key, code_commitments_by_spec, + elfs_by_spec, skip_submission_until_l1, l1_block_cache.clone(), ); diff --git a/crates/batch-prover/tests/prover_tests.rs b/crates/batch-prover/tests/prover_tests.rs index bc0f036eb..6b0fd78bb 100644 --- a/crates/batch-prover/tests/prover_tests.rs +++ b/crates/batch-prover/tests/prover_tests.rs @@ -32,7 +32,7 @@ async fn test_successful_prover_execution() { .await; vm.make_proof(); - let proofs = prover_service.prove().await.unwrap(); + let proofs = prover_service.prove([0; 1].to_vec()).await.unwrap(); prover_service.submit_proofs(proofs).await.unwrap(); } @@ -66,7 +66,7 @@ async fn test_parallel_proving_and_submit() { .await; vm.make_proof(); - let proofs = prover_service.prove().await.unwrap(); + let proofs = prover_service.prove([0; 1].to_vec()).await.unwrap(); let txs_and_proofs = prover_service.submit_proofs(proofs).await.unwrap(); assert_eq!(txs_and_proofs.len(), 2); diff --git a/crates/fullnode/src/da_block_handler.rs b/crates/fullnode/src/da_block_handler.rs index 8c433697c..a4c225f9c 100644 --- a/crates/fullnode/src/da_block_handler.rs +++ b/crates/fullnode/src/da_block_handler.rs @@ -296,7 +296,7 @@ where "Processing zk proof at height: {}", l1_block.header().height() ); - tracing::debug!("ZK proof: {:?}", proof); + tracing::trace!("ZK proof: {:?}", proof); // TODO: select output version based on spec let batch_proof_output = Vm::extract_output::< diff --git a/crates/light-client-prover/Cargo.toml b/crates/light-client-prover/Cargo.toml index c0df76b43..f8c07dc71 100644 --- a/crates/light-client-prover/Cargo.toml +++ b/crates/light-client-prover/Cargo.toml @@ -11,6 +11,7 @@ repository.workspace = true [dependencies] # Citrea Deps citrea-common = { path = "../common", optional = true } +citrea-primitives = { path = "../primitives", optional = true } sequencer-client = { path = "../sequencer-client", optional = true } # Sov SDK deps @@ -40,6 +41,7 @@ tempfile = { workspace = true } [features] default = [] native = [ + "dep:citrea-primitives", "dep:citrea-common", "dep:sequencer-client", "dep:sov-db", diff --git a/crates/light-client-prover/src/da_block_handler.rs b/crates/light-client-prover/src/da_block_handler.rs index 572e49e61..ec6b50a1a 100644 --- a/crates/light-client-prover/src/da_block_handler.rs +++ b/crates/light-client-prover/src/da_block_handler.rs @@ -1,14 +1,17 @@ use std::collections::{HashMap, VecDeque}; use std::sync::Arc; +use anyhow::anyhow; use borsh::BorshDeserialize; use citrea_common::cache::L1BlockCache; use citrea_common::da::get_da_block_at_height; use citrea_common::LightClientProverConfig; +use citrea_primitives::forks::FORKS; use sequencer_client::SequencerClient; use sov_db::ledger_db::{LightClientProverLedgerOps, SharedLedgerOps}; use sov_db::schema::types::{SlotNumber, StoredLightClientProofOutput}; -use sov_modules_api::{BlobReaderTrait, DaSpec, Zkvm}; +use sov_modules_api::fork::fork_from_block_number; +use sov_modules_api::{BatchProofCircuitOutputV2, BlobReaderTrait, DaSpec, Zkvm}; use sov_rollup_interface::da::{BlockHeaderTrait, DaDataLightClient, DaNamespace}; use sov_rollup_interface::services::da::{DaService, SlotData}; use sov_rollup_interface::spec::SpecId; @@ -34,8 +37,9 @@ where ledger_db: DB, da_service: Arc, batch_prover_da_pub_key: Vec, - batch_proof_code_commitments_by_spec: HashMap, - light_client_proof_code_commitment: Vm::CodeCommitment, + batch_proof_code_commitments: HashMap, + light_client_proof_code_commitments: HashMap, + light_client_proof_elfs: HashMap>, l1_block_cache: Arc>>, queued_l1_blocks: VecDeque<::FilteredBlock>, sequencer_client: Arc, @@ -55,8 +59,9 @@ where ledger_db: DB, da_service: Arc, batch_prover_da_pub_key: Vec, - batch_proof_code_commitments_by_spec: HashMap, - light_client_proof_code_commitment: Vm::CodeCommitment, + batch_proof_code_commitments: HashMap, + light_client_proof_code_commitments: HashMap, + light_client_proof_elfs: HashMap>, sequencer_client: Arc, ) -> Self { Self { @@ -65,8 +70,9 @@ where ledger_db, da_service, batch_prover_da_pub_key, - batch_proof_code_commitments_by_spec, - light_client_proof_code_commitment, + batch_proof_code_commitments, + light_client_proof_code_commitments, + light_client_proof_elfs, l1_block_cache: Arc::new(Mutex::new(L1BlockCache::new())), queued_l1_blocks: VecDeque::new(), sequencer_client, @@ -150,16 +156,20 @@ where batch_proofs.len() ); - // Do any kind of ordering etc. on batch proofs here - // If you do so, don't forget to do the same inside zk - let batch_proof_method_id = self - .batch_proof_code_commitments_by_spec - .get(&SpecId::Genesis) - .expect("Batch proof code commitment not found"); - let mut assumptions = vec![]; for batch_proof in batch_proofs { if let DaDataLightClient::Complete(proof) = batch_proof { + let batch_proof_output = Vm::extract_output::< + ::Spec, + BatchProofCircuitOutputV2<::Spec, [u8; 32]>, + >(&proof) + .map_err(|_| anyhow!("Proof should be deserializable"))?; + let last_l2_height = batch_proof_output.last_l2_height; + let current_spec = fork_from_block_number(FORKS, last_l2_height).spec_id; + let batch_proof_method_id = self + .batch_proof_code_commitments + .get(¤t_spec) + .expect("Batch proof code commitment not found"); if let Err(e) = Vm::verify(proof.as_slice(), batch_proof_method_id) { tracing::error!("Failed to verify batch proof: {:?}", e); continue; @@ -170,7 +180,7 @@ where let previous_l1_height = l1_height - 1; let mut light_client_proof_journal = None; let mut l2_genesis_state_root = None; - match self + let l2_last_height = match self .ledger_db .get_light_client_proof_data_by_l1_height(previous_l1_height)? { @@ -179,14 +189,15 @@ where let output = data.light_client_proof_output; assumptions.push(proof); light_client_proof_journal = Some(borsh::to_vec(&output)?); + Some(output.last_l2_height) } None => { - let initial_l1_height = self + let soft_confirmation = self .sequencer_client .get_soft_confirmation::(1) .await? - .unwrap() - .da_slot_height; + .unwrap(); + let initial_l1_height = soft_confirmation.da_slot_height; // If the prev block is the block before the first processed l1 block // then we don't have a previous light client proof, so just give an info if previous_l1_height == initial_l1_height { @@ -209,8 +220,27 @@ where previous_l1_height ); } + Some(soft_confirmation.l2_height) } - } + }; + + let l2_last_height = l2_last_height.ok_or(anyhow!( + "Could not determine the last L2 height for batch proof" + ))?; + let current_fork = fork_from_block_number(FORKS, l2_last_height); + let batch_proof_method_id = self + .batch_proof_code_commitments + .get(¤t_fork.spec_id) + .expect("Fork should have a guest code attached"); + let light_client_proof_code_commitment = self + .light_client_proof_code_commitments + .get(¤t_fork.spec_id) + .expect("Fork should have a guest code attached"); + let light_client_elf = self + .light_client_proof_elfs + .get(¤t_fork.spec_id) + .expect("Fork should have a guest code attached") + .clone(); let circuit_input = LightClientCircuitInput { da_data, @@ -219,12 +249,14 @@ where da_block_header: l1_block.header().clone(), batch_prover_da_pub_key: self.batch_prover_da_pub_key.clone(), batch_proof_method_id: batch_proof_method_id.clone().into(), - light_client_proof_method_id: self.light_client_proof_code_commitment.clone().into(), + light_client_proof_method_id: light_client_proof_code_commitment.clone().into(), previous_light_client_proof_journal: light_client_proof_journal, l2_genesis_state_root, }; - let proof = self.prove(circuit_input, assumptions).await?; + let proof = self + .prove(light_client_elf, circuit_input, assumptions) + .await?; let circuit_output = Vm::extract_output::>(&proof) @@ -285,12 +317,12 @@ where } } }); - batch_proofs } async fn prove( &self, + light_client_elf: Vec, circuit_input: LightClientCircuitInput<::Spec>, assumptions: Vec>, ) -> Result { @@ -300,7 +332,7 @@ where .add_proof_data((borsh::to_vec(&circuit_input)?, assumptions)) .await; - let proofs = self.prover_service.prove().await?; + let proofs = self.prover_service.prove(light_client_elf).await?; assert_eq!(proofs.len(), 1); diff --git a/crates/light-client-prover/src/runner.rs b/crates/light-client-prover/src/runner.rs index 67a5b6d6c..4b8b63124 100644 --- a/crates/light-client-prover/src/runner.rs +++ b/crates/light-client-prover/src/runner.rs @@ -74,7 +74,8 @@ where prover_config: LightClientProverConfig, task_manager: TaskManager<()>, batch_proof_commitments_by_spec: HashMap, - light_client_proof_commitment: Vm::CodeCommitment, + light_client_proof_commitment: HashMap, + light_client_proof_elfs: HashMap>, } impl CitreaLightClientProver @@ -94,7 +95,8 @@ where prover_service: Arc, prover_config: LightClientProverConfig, batch_proof_commitments_by_spec: HashMap, - light_client_proof_commitment: Vm::CodeCommitment, + light_client_proof_commitment: HashMap, + light_client_proof_elfs: HashMap>, task_manager: TaskManager<()>, ) -> Result { let sequencer_client_url = runner_config.sequencer_client_url.clone(); @@ -110,6 +112,7 @@ where task_manager, batch_proof_commitments_by_spec, light_client_proof_commitment, + light_client_proof_elfs, }) } @@ -192,6 +195,7 @@ where let batch_prover_da_pub_key = self.public_keys.prover_da_pub_key.clone(); let batch_proof_commitments_by_spec = self.batch_proof_commitments_by_spec.clone(); let light_client_proof_commitment = self.light_client_proof_commitment.clone(); + let light_client_proof_elfs = self.light_client_proof_elfs.clone(); let sequencer_client = self.sequencer_client.clone(); self.task_manager.spawn(|cancellation_token| async move { @@ -203,6 +207,7 @@ where batch_prover_da_pub_key, batch_proof_commitments_by_spec, light_client_proof_commitment, + light_client_proof_elfs, Arc::new(sequencer_client), ); l1_block_handler diff --git a/crates/prover-services/src/parallel/mod.rs b/crates/prover-services/src/parallel/mod.rs index 904b06dbb..c675d09ce 100644 --- a/crates/prover-services/src/parallel/mod.rs +++ b/crates/prover-services/src/parallel/mod.rs @@ -113,7 +113,7 @@ where ) } - async fn prove_all(&self, proof_queue: Vec) -> Vec { + async fn prove_all(&self, elf: Vec, proof_queue: Vec) -> Vec { let num_threads = self.thread_pool.current_num_threads(); // Future buffer to keep track of ongoing provings @@ -128,7 +128,7 @@ where ongoing_proofs = remaining_proofs; } - let proof_fut = self.prove_one(proof_data); + let proof_fut = self.prove_one(elf.clone(), proof_data); ongoing_proofs.push(Box::pin(async move { let proof = proof_fut.await; (idx, proof) @@ -144,7 +144,7 @@ where proofs } - async fn prove_one(&self, (input, assumptions): ProofData) -> Proof { + async fn prove_one(&self, elf: Vec, (input, assumptions): ProofData) -> Proof { let mut vm = self.vm.clone(); let zk_storage = self.zk_storage.clone(); let proof_mode = self.proof_mode.clone(); @@ -157,7 +157,7 @@ where let (tx, rx) = oneshot::channel(); self.thread_pool.spawn(move || { let proof = - make_proof(vm, zk_storage, proof_mode).expect("Proof creation must not fail"); + make_proof(vm, elf, zk_storage, proof_mode).expect("Proof creation must not fail"); let _ = tx.send(proof); }); @@ -188,7 +188,7 @@ where proof_queue.push(proof_data); } - async fn prove(&self) -> anyhow::Result> { + async fn prove(&self, elf: Vec) -> anyhow::Result> { let mut proof_queue = self.proof_queue.lock().await; if let ProofGenMode::Skip = *self.proof_mode.lock().await { tracing::debug!("Skipped proving {} proofs", proof_queue.len()); @@ -205,7 +205,7 @@ where let proof_queue = std::mem::take(&mut *proof_queue); // Prove all - Ok(self.prove_all(proof_queue).await) + Ok(self.prove_all(elf, proof_queue).await) } async fn submit_proofs( @@ -232,6 +232,7 @@ where fn make_proof( mut vm: Vm, + elf: Vec, zk_storage: Stf::PreState, proof_mode: Arc>>, ) -> Result @@ -248,7 +249,7 @@ where .run_sequencer_commitments_in_da_slot(vm.simulate_with_hints(), zk_storage) .map(|_| Vec::default()) .map_err(|e| anyhow::anyhow!("Guest execution must succeed but failed with {:?}", e)), - ProofGenMode::Execute => vm.run(false), - ProofGenMode::Prove => vm.run(true), + ProofGenMode::Execute => vm.run(elf, false), + ProofGenMode::Prove => vm.run(elf, true), } } diff --git a/crates/risc0/src/host.rs b/crates/risc0/src/host.rs index 6e8a05ebd..cabe1fd85 100644 --- a/crates/risc0/src/host.rs +++ b/crates/risc0/src/host.rs @@ -35,23 +35,15 @@ pub struct RecoveredBonsaiSession { /// A [`Risc0BonsaiHost`] stores a binary to execute in the Risc0 VM and prove in the Risc0 Bonsai API. #[derive(Clone)] -pub struct Risc0BonsaiHost<'a> { - elf: &'a [u8], +pub struct Risc0BonsaiHost { env: Vec, assumptions: Vec, - image_id: Digest, _ledger_db: LedgerDB, } -impl<'a> Risc0BonsaiHost<'a> { +impl Risc0BonsaiHost { /// Create a new Risc0Host to prove the given binary. - pub fn new(elf: &'a [u8], ledger_db: LedgerDB) -> Self { - // Compute the image_id, then upload the ELF with the image_id as its key. - // handle error - let image_id = compute_image_id(elf).unwrap(); - - tracing::trace!("Calculated image id: {:?}", image_id.as_words()); - + pub fn new(ledger_db: LedgerDB) -> Self { match std::env::var("RISC0_PROVER") { Ok(prover) => match prover.as_str() { "bonsai" => { @@ -85,16 +77,14 @@ impl<'a> Risc0BonsaiHost<'a> { } Self { - elf, env: Default::default(), assumptions: vec![], - image_id, _ledger_db: ledger_db, } } } -impl<'a> ZkvmHost for Risc0BonsaiHost<'a> { +impl ZkvmHost for Risc0BonsaiHost { type Guest = Risc0Guest; fn add_hint(&mut self, item: Vec) { @@ -116,7 +106,7 @@ impl<'a> ZkvmHost for Risc0BonsaiHost<'a> { /// Only with_proof = true is supported. /// Proofs are created on the Bonsai API. - fn run(&mut self, with_proof: bool) -> Result { + fn run(&mut self, elf: Vec, with_proof: bool) -> Result { if !with_proof { if std::env::var("RISC0_PROVER") == Ok("bonsai".to_string()) { panic!("Bonsai prover requires with_proof to be true"); @@ -145,11 +135,14 @@ impl<'a> ZkvmHost for Risc0BonsaiHost<'a> { tracing::info!("Starting risc0 proving"); let ProveInfo { receipt, stats } = - prover.prove_with_opts(env, self.elf, &ProverOpts::groth16())?; + prover.prove_with_opts(env, &elf, &ProverOpts::groth16())?; tracing::info!("Execution Stats: {:?}", stats); - receipt.verify(self.image_id)?; + let image_id = compute_image_id(&elf)?; + + receipt.verify(image_id)?; + tracing::trace!("Calculated image id: {:?}", image_id.as_words()); tracing::info!("Verified the receipt"); @@ -204,7 +197,7 @@ impl<'a> ZkvmHost for Risc0BonsaiHost<'a> { } } -impl<'host> Zkvm for Risc0BonsaiHost<'host> { +impl Zkvm for Risc0BonsaiHost { type CodeCommitment = Digest; type Error = anyhow::Error; diff --git a/crates/sovereign-sdk/adapters/mock-zkvm/src/lib.rs b/crates/sovereign-sdk/adapters/mock-zkvm/src/lib.rs index 734e00854..1e0b64186 100644 --- a/crates/sovereign-sdk/adapters/mock-zkvm/src/lib.rs +++ b/crates/sovereign-sdk/adapters/mock-zkvm/src/lib.rs @@ -195,7 +195,11 @@ impl sov_rollup_interface::zk::ZkvmHost for MockZkvm { } } - fn run(&mut self, _with_proof: bool) -> Result { + fn run( + &mut self, + _elf: Vec, + _with_proof: bool, + ) -> Result { self.worker_thread_notifier.wait(); Ok(self.committed_data.pop_front().unwrap_or_default()) } diff --git a/crates/sovereign-sdk/full-node/sov-stf-runner/src/prover_service/mod.rs b/crates/sovereign-sdk/full-node/sov-stf-runner/src/prover_service/mod.rs index 64341324a..224418904 100644 --- a/crates/sovereign-sdk/full-node/sov-stf-runner/src/prover_service/mod.rs +++ b/crates/sovereign-sdk/full-node/sov-stf-runner/src/prover_service/mod.rs @@ -89,7 +89,7 @@ pub trait ProverService { async fn add_proof_data(&self, proof_data: ProofData); /// Prove added input and assumptions. - async fn prove(&self) -> anyhow::Result>; + async fn prove(&self, elf: Vec) -> anyhow::Result>; /// Submit proofs to DA. async fn submit_proofs( diff --git a/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/Cargo.toml b/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/Cargo.toml index 35a4029ac..54744223c 100644 --- a/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/Cargo.toml +++ b/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/Cargo.toml @@ -22,6 +22,7 @@ sov-stf-runner = { path = "../../full-node/sov-stf-runner", features = ["native" anyhow = { workspace = true } async-trait = { workspace = true } +derive_more = { workspace = true, features = ["display"] } jsonrpsee = { workspace = true, features = ["http-client", "server"] } tokio = { workspace = true } tracing = { workspace = true } diff --git a/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/src/lib.rs b/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/src/lib.rs index d03b72d0a..67896e73e 100644 --- a/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/src/lib.rs +++ b/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/src/lib.rs @@ -1,26 +1,41 @@ #![deny(missing_docs)] #![doc = include_str!("../README.md")] -mod runtime_rpc; - use std::collections::HashMap; use std::sync::Arc; use async_trait::async_trait; use citrea_common::tasks::manager::TaskManager; -use citrea_common::{BatchProverConfig, FullNodeConfig, LightClientProverConfig}; -pub use runtime_rpc::*; +use citrea_common::FullNodeConfig; +use derive_more::Display; use sov_db::ledger_db::LedgerDB; use sov_db::rocks_db_config::RocksdbConfig; use sov_modules_api::{Context, DaSpec, Spec}; use sov_modules_stf_blueprint::{GenesisParams, Runtime as RuntimeTrait}; +use sov_rollup_interface::da::DaVerifier; use sov_rollup_interface::services::da::DaService; use sov_rollup_interface::spec::SpecId; use sov_rollup_interface::storage::HierarchicalStorageManager; use sov_rollup_interface::zk::{Zkvm, ZkvmHost}; -use sov_stf_runner::ProverService; +use sov_stf_runner::{ProverGuestRunConfig, ProverService}; use tokio::sync::broadcast; +mod runtime_rpc; + +pub use runtime_rpc::*; + +/// The network currently running. +#[derive(Copy, Clone, Default, Debug, Display)] +pub enum Network { + /// Mainnet + #[default] + Mainnet, + /// Testnet + Testnet, + /// nightly + Nightly, +} + /// This trait defines how to crate all the necessary dependencies required by a rollup. #[async_trait] pub trait RollupBlueprint: Sized + Send + Sync { @@ -33,6 +48,9 @@ pub trait RollupBlueprint: Sized + Send + Sync { /// Data Availability config. type DaConfig: Send + Sync; + /// Data Availability verifier. + type DaVerifier: DaVerifier + Send + Sync; + /// Host of a zkVM program. type Vm: ZkvmHost + Zkvm + Send + Sync + 'static; @@ -58,15 +76,23 @@ pub trait RollupBlueprint: Sized + Send + Sync { type ProverService: ProverService + Send + Sync + 'static; /// Creates a new instance of the blueprint. - fn new() -> Self; + fn new(network: Network) -> Self; + + /// Get batch proof guest code elfs by fork. + fn get_batch_proof_elfs(&self) -> HashMap>; + + /// Get light client guest code elfs by fork. + fn get_light_client_elfs(&self) -> HashMap>; /// Get batch prover code commitments by fork. - fn get_batch_prover_code_commitments_by_spec( + fn get_batch_proof_code_commitments( &self, ) -> HashMap::CodeCommitment>; /// Get light client prover code commitment. - fn get_light_client_prover_code_commitment(&self) -> ::CodeCommitment; + fn get_light_client_proof_code_commitment( + &self, + ) -> HashMap::CodeCommitment>; /// Creates RPC methods for the rollup. fn create_rpc_methods( @@ -110,21 +136,15 @@ pub trait RollupBlueprint: Sized + Send + Sync { task_manager: &mut TaskManager<()>, ) -> Result, anyhow::Error>; - /// Creates instance of [`ProverService`]. - async fn create_batch_prover_service( - &self, - prover_config: BatchProverConfig, - rollup_config: &FullNodeConfig, - da_service: &Arc, - ledger_db: LedgerDB, - ) -> Self::ProverService; + /// Creates instance of [`BitcoinDaVerifier`] + fn create_da_verifier(&self) -> Self::DaVerifier; /// Creates instance of [`ProverService`]. - async fn create_light_client_prover_service( + async fn create_prover_service( &self, - prover_config: LightClientProverConfig, - rollup_config: &FullNodeConfig, + proving_mode: ProverGuestRunConfig, da_service: &Arc, + da_verifier: Self::DaVerifier, ledger_db: LedgerDB, ) -> Self::ProverService; diff --git a/crates/sovereign-sdk/rollup-interface/src/state_machine/zk/mod.rs b/crates/sovereign-sdk/rollup-interface/src/state_machine/zk/mod.rs index b6b71fc2f..39bd0de46 100644 --- a/crates/sovereign-sdk/rollup-interface/src/state_machine/zk/mod.rs +++ b/crates/sovereign-sdk/rollup-interface/src/state_machine/zk/mod.rs @@ -43,7 +43,7 @@ pub trait ZkvmHost: Zkvm + Clone { /// This runs the guest binary compiled for the zkVM target, optionally /// creating a SNARK of correct execution. Running the true guest binary comes /// with some mild performance overhead and is not as easy to debug as [`simulate_with_hints`](ZkvmHost::simulate_with_hints). - fn run(&mut self, with_proof: bool) -> Result; + fn run(&mut self, elf: Vec, with_proof: bool) -> Result; /// Extracts public input and receipt from the proof. fn extract_output(proof: &Proof) -> Result; diff --git a/guests/risc0/Makefile b/guests/risc0/Makefile new file mode 100644 index 000000000..8546a99c5 --- /dev/null +++ b/guests/risc0/Makefile @@ -0,0 +1,12 @@ +.PHONY: all +all: batch-proof-bitcoin light-client-bitcoin + +.PHONY: batch-proof-bitcoin +batch-proof-bitcoin: + cd ../../ && \ + cargo risczero build --manifest-path guests/risc0/batch-proof-bitcoin/Cargo.toml + +.PHONY: light-client-bitcoin +light-client-bitcoin: + cd ../../ && \ + cargo risczero build --manifest-path guests/risc0/light-client-proof-bitcoin/Cargo.toml diff --git a/guests/sp1/Makefile b/guests/sp1/Makefile new file mode 100644 index 000000000..b121ed572 --- /dev/null +++ b/guests/sp1/Makefile @@ -0,0 +1,3 @@ +.PHONY: all +all: + echo "Skipping SP1 build" diff --git a/bin/citrea/build.rs b/guests/sp1/build.rs similarity index 100% rename from bin/citrea/build.rs rename to guests/sp1/build.rs diff --git a/resources/guests/risc0/mainnet/.gitkeep b/resources/guests/risc0/mainnet/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/resources/guests/risc0/mainnet/batch-0.elf b/resources/guests/risc0/mainnet/batch-0.elf new file mode 100755 index 000000000..998baeac3 Binary files /dev/null and b/resources/guests/risc0/mainnet/batch-0.elf differ diff --git a/resources/guests/risc0/mainnet/light-0.elf b/resources/guests/risc0/mainnet/light-0.elf new file mode 100755 index 000000000..998baeac3 Binary files /dev/null and b/resources/guests/risc0/mainnet/light-0.elf differ diff --git a/resources/guests/risc0/testnet/.gitkeep b/resources/guests/risc0/testnet/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/resources/guests/risc0/testnet/batch-0.elf b/resources/guests/risc0/testnet/batch-0.elf new file mode 100755 index 000000000..19724e549 Binary files /dev/null and b/resources/guests/risc0/testnet/batch-0.elf differ diff --git a/resources/guests/risc0/testnet/light-0.elf b/resources/guests/risc0/testnet/light-0.elf new file mode 100755 index 000000000..19724e549 Binary files /dev/null and b/resources/guests/risc0/testnet/light-0.elf differ diff --git a/resources/guests/sp1/mainnet/.gitkeep b/resources/guests/sp1/mainnet/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/resources/guests/sp1/testnet/.gitkeep b/resources/guests/sp1/testnet/.gitkeep new file mode 100644 index 000000000..e69de29bb