diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5146421..a2fe24c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,18 +16,22 @@ jobs: - name: Install toolchain uses: actions-rs/toolchain@v1 with: - toolchain: stable + toolchain: 1.77.2 profile: minimal - + - uses: Swatinem/rust-cache@v1 with: cache-on-failure: true - name: Install solc - run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.21 && solc --version + run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && svm use 0.8.20 && solc --version - - name: Run test + - name: Run test (mv lookups) run: cargo test --workspace --all-features --all-targets -- --nocapture + - name: Run test (non-mv lookups) + run: cargo test --workspace --all-targets -- --nocapture + - name: Run separate example + run: cargo run --package halo2_solidity_verifier --example separately --all-features -- --nocapture lint: name: Lint diff --git a/Cargo.toml b/Cargo.toml index affc3f5..d80bed2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,10 +4,10 @@ version = "0.1.0" edition = "2021" [dependencies] -halo2_proofs = { git = "https://github.com/privacy-scaling-explorations/halo2", tag = "v2023_04_20" } +halo2_proofs = { git = "https://github.com/zkonduit/halo2?branch=ac/cache-lookup-commitments#8b13a0d2a7a34d8daab010dadb2c47dfa47d37d0", package = "halo2_proofs", branch = "ac/cache-lookup-commitments" } askama = { version = "0.12.0", features = ["config"], default-features = false } hex = "0.4.3" -ruint = "1" +ruint = "1.8.0" sha3 = "0.10" itertools = "0.11.0" @@ -15,17 +15,22 @@ itertools = "0.11.0" blake2b_simd = "1" # For feature = "evm" -revm = { version = "3.3.0", default-features = false, optional = true } +revm = { version = "14.0.1", default-features = false, optional = true } [dev-dependencies] rand = "0.8.5" -revm = { version = "3.3.0", default-features = false } -halo2_maingate = { git = "https://github.com/privacy-scaling-explorations/halo2wrong", tag = "v2023_04_20", package = "maingate" } +revm = { version = "14.0.1", default-features = false } +halo2_maingate = { git = "https://github.com/zkonduit/halo2wrong", branch = "ac/chunked-mv-lookup", package = "maingate" } + [features] default = [] evm = ["dep:revm"] +mv-lookup = ["halo2_proofs/mv-lookup", "halo2_maingate/mv-lookup"] [[example]] name = "separately" required-features = ["evm"] + +[patch.'https://github.com/zkonduit/halo2'] +halo2_proofs = { git = "https://github.com/zkonduit/halo2?branch=ac/cache-lookup-commitments#8b13a0d2a7a34d8daab010dadb2c47dfa47d37d0", package = "halo2_proofs", branch = "ac/cache-lookup-commitments" } diff --git a/README.md b/README.md index 7cae107..d7c70e5 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ For audited solidity verifier generator and proof aggregation toolkits, please r ## Usage -### Generate verifier and verifying key separately as 2 solidity contracts +### Generate fully reusable verifier and verifying artifact separately as 2 solidity contracts ```rust let generator = SolidityGenerator::new(¶ms, &vk, Bdfg21, num_instances); @@ -42,7 +42,7 @@ Note that function selector is already included. The [`Keccak256Transcript`](./src/transcript.rs#L19) behaves exactly same as the `EvmTranscript` in `snark-verifier`. -## Design Rationale +## Design Rationale for Conjoined Verifier The current solidity verifier generator within `snark-verifier` faces a couple of issues: @@ -51,6 +51,21 @@ The current solidity verifier generator within `snark-verifier` faces a couple o This repository is a ground-up rebuild, addressing these concerns while maintaining a focus on code size and readability. Remarkably, the gas cost is comparable, if not slightly lower, than the one generated by `snark-verifier`. +## Design Rationale for Reusable Verifier + +The previous `render_separately` solidity verifier, although granted some degree of reusability, was still dependent on a given circuit's configuation despite being independent of the verifying key. We wanted to reengineer the separate verifier to be completely independent of the circuit configuration, allowing for a single verifier to be used across multiple circuits. + +In the process we created two new types of contracts--`Halo2VerifierReusable` and `Halo2VerifierArtifact`-- that replaced the previous `Halo2Verifier` and `Halo2VerifierKey` contracts generated by the `render_seperately` compilation respectively. + +The `Halo2VerifierArtifact` extends the original `Halo2VerifierKey` by encoding all of the circuit configuration data that was hardcoded in the original separate `Halo2Verifier` into memory. The `Halo2VerifierReusable` then loads this configuration data dynamicaly from the `Halo2VerifierArtifact` at runtime, decodes it and executes the verification computation in a functionally identical manner to the conjoined version. + +For large circuits, this reduces deployment costs by 77 percent enabling the deployment of circuits that were previously infeasible due to the contract size limit, requiring an aggregation to get below the limit. + ## Acknowledgement The template is heavily inspired by Aztec's [`BaseUltraVerifier.sol`](https://github.com/AztecProtocol/barretenberg/blob/4c456a2b196282160fd69bead6a1cea85289af37/sol/src/ultra/BaseUltraVerifier.sol). + + +## Lookup Modularity + +Note that we have extended the verifier to include the ability to verify mvlookup / logup lookups. This is hidden behind the `mvlookup` feature flag. \ No newline at end of file diff --git a/examples/separately.rs b/examples/separately.rs index b2e58c5..0e5607e 100644 --- a/examples/separately.rs +++ b/examples/separately.rs @@ -1,6 +1,7 @@ use application::StandardPlonk; use prelude::*; +use halo2_proofs::poly::commitment::Params; use halo2_solidity_verifier::{ compile_solidity, encode_calldata, BatchOpenScheme::Bdfg21, Evm, Keccak256Transcript, SolidityGenerator, @@ -16,14 +17,14 @@ fn main() { let vk = keygen_vk(¶ms[&K_RANGE.start], &StandardPlonk::default()).unwrap(); let generator = SolidityGenerator::new(¶ms[&K_RANGE.start], &vk, Bdfg21, 0); let (verifier_solidity, _) = generator.render_separately().unwrap(); - save_solidity("Halo2Verifier.sol", &verifier_solidity); + save_solidity("Halo2VerifierReusable.sol", &verifier_solidity); let verifier_creation_code = compile_solidity(&verifier_solidity); let verifier_creation_code_size = verifier_creation_code.len(); println!("Verifier creation code size: {verifier_creation_code_size}"); let mut evm = Evm::default(); - let verifier_address = evm.create(verifier_creation_code); + let (verifier_address, _) = evm.create(verifier_creation_code); let deployed_verifier_solidity = verifier_solidity; @@ -35,12 +36,12 @@ fn main() { let pk = keygen_pk(¶ms[&k], vk, &circuit).unwrap(); let generator = SolidityGenerator::new(¶ms[&k], pk.get_vk(), Bdfg21, num_instances); let (verifier_solidity, vk_solidity) = generator.render_separately().unwrap(); - save_solidity(format!("Halo2VerifyingKey-{k}.sol"), &vk_solidity); + save_solidity(format!("Halo2VerifyingArtifact-{k}.sol"), &vk_solidity); assert_eq!(deployed_verifier_solidity, verifier_solidity); let vk_creation_code = compile_solidity(&vk_solidity); - let vk_address = evm.create(vk_creation_code); + let (vk_address, _) = evm.create(vk_creation_code); let calldata = { let instances = circuit.instances(); @@ -75,7 +76,7 @@ fn create_proof_checked( pk: &ProvingKey, circuit: impl Circuit, instances: &[Fr], - mut rng: impl RngCore, + mut rng: impl RngCore + Send + Sync, ) -> Vec { use halo2_proofs::{ poly::kzg::{ @@ -107,6 +108,7 @@ fn create_proof_checked( SingleStrategy::new(params), &[&[instances]], &mut transcript, + params.n(), ) }; assert!(result.is_ok()); diff --git a/rust-toolchain b/rust-toolchain new file mode 100644 index 0000000..6ae3510 --- /dev/null +++ b/rust-toolchain @@ -0,0 +1,2 @@ +[toolchain] +channel = "nightly-2024-07-18" diff --git a/src/codegen.rs b/src/codegen.rs index d950c27..ece173e 100644 --- a/src/codegen.rs +++ b/src/codegen.rs @@ -1,11 +1,16 @@ use crate::codegen::{ - evaluator::Evaluator, + evaluator::{EvaluatorDynamic, EvaluatorStatic}, pcs::{ - bdfg21_computations, queries, rotation_sets, + bdfg21_computations_dynamic, bdfg21_computations_static, queries, rotation_sets, BatchOpenScheme::{Bdfg21, Gwc19}, }, - template::{Halo2Verifier, Halo2VerifyingKey}, - util::{fr_to_u256, g1_to_u256s, g2_to_u256s, ConstraintSystemMeta, Data, Ptr}, + template::{ + Halo2Verifier, Halo2VerifierReusable, Halo2VerifyingArtifact, Halo2VerifyingKey, + VerifyingCache, + }, + util::{ + expression_consts, fr_to_u256, g1_to_u256s, g2_to_u256s, ConstraintSystemMeta, Data, Ptr, + }, }; use halo2_proofs::{ halo2curves::{bn256, ff::Field}, @@ -14,7 +19,10 @@ use halo2_proofs::{ }; use itertools::{chain, Itertools}; use ruint::aliases::U256; -use std::fmt::{self, Debug}; +use std::{ + collections::HashMap, + fmt::{self, Debug}, +}; mod evaluator; mod pcs; @@ -23,6 +31,20 @@ pub(crate) mod util; pub use pcs::BatchOpenScheme; +// Maximum capacity of 10 words allocated for the num_advices_user_challenges encoded data. +const NUM_ADVICES_USER_CHALLENGES_LABELS: [&str; 10] = [ + "num_advices_user_challenges_0", + "num_advices_user_challenges_1", + "num_advices_user_challenges_2", + "num_advices_user_challenges_3", + "num_advices_user_challenges_4", + "num_advices_user_challenges_5", + "num_advices_user_challenges_6", + "num_advices_user_challenges_7", + "num_advices_user_challenges_8", + "num_advices_user_challenges_9", +]; + /// Solidity verifier generator for [`halo2`] proof with KZG polynomial commitment scheme on BN254. #[derive(Debug)] pub struct SolidityGenerator<'a> { @@ -134,7 +156,7 @@ impl<'a> SolidityGenerator<'a> { impl<'a> SolidityGenerator<'a> { /// Render `Halo2Verifier.sol` with verifying key embedded into writer. pub fn render_into(&self, verifier_writer: &mut impl fmt::Write) -> Result<(), fmt::Error> { - self.generate_verifier(false).render(verifier_writer) + self.generate_verifier().render(verifier_writer) } /// Render `Halo2Verifier.sol` with verifying key embedded and return it as `String`. @@ -144,18 +166,18 @@ impl<'a> SolidityGenerator<'a> { Ok(verifier_output) } - /// Render `Halo2Verifier.sol` and `Halo2VerifyingKey.sol` into writers. + /// Render `Halo2VerifierReusable.sol` and `Halo2VerifyingArtifact.sol` into writers. pub fn render_separately_into( &self, verifier_writer: &mut impl fmt::Write, vk_writer: &mut impl fmt::Write, ) -> Result<(), fmt::Error> { - self.generate_verifier(true).render(verifier_writer)?; - self.generate_vk().render(vk_writer)?; + self.generate_separate_verifier().render(verifier_writer)?; + self.generate_verifying_artifact().render(vk_writer)?; Ok(()) } - /// Render `Halo2Verifier.sol` and `Halo2VerifyingKey.sol` and return them as `String`. + /// Render `Halo2VerifierReusable.sol` and `Halo2VerifyingArtifact.sol` and return them as `String`. pub fn render_separately(&self) -> Result<(String, String), fmt::Error> { let mut verifier_output = String::new(); let mut vk_output = String::new(); @@ -163,88 +185,443 @@ impl<'a> SolidityGenerator<'a> { Ok((verifier_output, vk_output)) } - fn generate_vk(&self) -> Halo2VerifyingKey { - let constants = { - let domain = self.vk.get_domain(); - let vk_digest = fr_to_u256(vk_transcript_repr(self.vk)); - let num_instances = U256::from(self.num_instances); - let k = U256::from(domain.k()); - let n_inv = fr_to_u256(bn256::Fr::from(1 << domain.k()).invert().unwrap()); - let omega = fr_to_u256(domain.get_omega()); - let omega_inv = fr_to_u256(domain.get_omega_inv()); - let omega_inv_to_l = { - let l = self.meta.rotation_last.unsigned_abs() as u64; - fr_to_u256(domain.get_omega_inv().pow_vartime([l])) - }; - let has_accumulator = U256::from(self.acc_encoding.is_some() as usize); - let acc_offset = self - .acc_encoding - .map(|acc_encoding| U256::from(acc_encoding.offset)) - .unwrap_or_default(); - let num_acc_limbs = self - .acc_encoding - .map(|acc_encoding| U256::from(acc_encoding.num_limbs)) - .unwrap_or_default(); - let num_acc_limb_bits = self - .acc_encoding - .map(|acc_encoding| U256::from(acc_encoding.num_limb_bits)) - .unwrap_or_default(); - let g1 = self.params.get_g()[0]; - let g1 = g1_to_u256s(g1); - let g2 = g2_to_u256s(self.params.g2()); - let neg_s_g2 = g2_to_u256s(-self.params.s_g2()); - vec![ - ("vk_digest", vk_digest), - ("num_instances", num_instances), - ("k", k), - ("n_inv", n_inv), - ("omega", omega), - ("omega_inv", omega_inv), - ("omega_inv_to_l", omega_inv_to_l), - ("has_accumulator", has_accumulator), - ("acc_offset", acc_offset), - ("num_acc_limbs", num_acc_limbs), - ("num_acc_limb_bits", num_acc_limb_bits), - ("g1_x", g1[0]), - ("g1_y", g1[1]), - ("g2_x_1", g2[0]), - ("g2_x_2", g2[1]), - ("g2_y_1", g2[2]), - ("g2_y_2", g2[3]), - ("neg_s_g2_x_1", neg_s_g2[0]), - ("neg_s_g2_x_2", neg_s_g2[1]), - ("neg_s_g2_y_1", neg_s_g2[2]), - ("neg_s_g2_y_2", neg_s_g2[3]), - ] + fn dummy_vka_constants(&self) -> Vec<(&'static str, U256)> { + // Number of words the num_advices_user_challenges will take up. + let num_advices_len = self.meta.num_advices().len(); + let slots = 10; + let num_advices_user_challenges_capacity = + num_advices_len / slots + if num_advices_len % slots == 0 { 0 } else { 1 }; + + // assert that the predefined_labels length are less than the num_advices_user_challenges_capacity + assert!( + NUM_ADVICES_USER_CHALLENGES_LABELS.len() >= num_advices_user_challenges_capacity, + "predefined_labels length of 10 must be less than or equal to num_advices_user_challenges_capacity" + ); + + let mut constants = vec![ + ("vk_digest", U256::from(0)), + ("vk_mptr", U256::from(0)), + ("vk_len", U256::from(0)), + ("instance_cptr", U256::from(0)), + ("num_instances", U256::from(0)), + ("num_evals", U256::from(0)), + ("challenges_offset", U256::from(0)), + ("fsm_challenges", U256::from(0)), // The fsm position that we can move the num_advices_user_challenges to. + ("k", U256::from(0)), + ("n_inv", U256::from(0)), + ("omega", U256::from(0)), + ("omega_inv", U256::from(0)), + ("omega_inv_to_l", U256::from(0)), + ("has_accumulator", U256::from(0)), + ("acc_offset", U256::from(0)), + ("num_acc_limbs", U256::from(0)), + ("num_acc_limb_bits", U256::from(0)), + ("g1_x", U256::from(0)), + ("g1_y", U256::from(0)), + ("g2_x_1", U256::from(0)), + ("g2_x_2", U256::from(0)), + ("g2_y_1", U256::from(0)), + ("g2_y_2", U256::from(0)), + ("neg_s_g2_x_1", U256::from(0)), + ("neg_s_g2_x_2", U256::from(0)), + ("neg_s_g2_y_1", U256::from(0)), + ("neg_s_g2_y_2", U256::from(0)), + ("last_quotient_x_cptr", U256::from(0)), + ("first_quotient_x_cptr", U256::from(0)), + ("gate_computations_len_offset", U256::from(0)), + ("permutation_computations_len_offset", U256::from(0)), + ("lookup_computations_len_offset", U256::from(0)), + ("pcs_computations_len_offset", U256::from(0)), + ("num_neg_lagranges", U256::from(0)), + ]; + // Find the index of "fsm_challenges" and insert after it. + let fsm_challenges_index = constants + .iter() + .position(|(label, _)| *label == "fsm_challenges") + .unwrap(); + + // Create a vector of tuples with the num_advices_user_challenges elements. + let advices_entries: Vec<(&str, U256)> = (0..num_advices_user_challenges_capacity) + .map(|i| (NUM_ADVICES_USER_CHALLENGES_LABELS[i], U256::from(0))) + .collect(); + + // Insert the num_advices_user_challenges after the "fsm_challenges". + constants.splice( + fsm_challenges_index + 1..fsm_challenges_index + 1, + advices_entries, + ); + + constants + } + + fn dummy_vk_constants() -> Vec<(&'static str, U256)> { + vec![ + ("vk_digest", U256::from(0)), + ("num_instances", U256::from(0)), + ("k", U256::from(0)), + ("n_inv", U256::from(0)), + ("omega", U256::from(0)), + ("omega_inv", U256::from(0)), + ("omega_inv_to_l", U256::from(0)), + ("has_accumulator", U256::from(0)), + ("acc_offset", U256::from(0)), + ("num_acc_limbs", U256::from(0)), + ("num_acc_limb_bits", U256::from(0)), + ("g1_x", U256::from(0)), + ("g1_y", U256::from(0)), + ("g2_x_1", U256::from(0)), + ("g2_x_2", U256::from(0)), + ("g2_y_1", U256::from(0)), + ("g2_y_2", U256::from(0)), + ("neg_s_g2_x_1", U256::from(0)), + ("neg_s_g2_x_2", U256::from(0)), + ("neg_s_g2_y_1", U256::from(0)), + ("neg_s_g2_y_2", U256::from(0)), + ] + } + + fn generate_vk(&self, reusable: bool) -> Halo2VerifyingKey { + // Get the dummy constants using the new function + let mut constants = if reusable { + self.dummy_vka_constants() + } else { + Self::dummy_vk_constants() + }; + + // Fill in the actual values where applicable + let domain = self.vk.get_domain(); + let vk_digest = fr_to_u256(vk_transcript_repr(self.vk)); + let num_instances = U256::from(self.num_instances); + let k = U256::from(domain.k()); + let n_inv = fr_to_u256(bn256::Fr::from(1 << domain.k()).invert().unwrap()); + let omega = fr_to_u256(domain.get_omega()); + let omega_inv = fr_to_u256(domain.get_omega_inv()); + let omega_inv_to_l = { + let l = self.meta.rotation_last.unsigned_abs() as u64; + fr_to_u256(domain.get_omega_inv().pow_vartime([l])) }; - let fixed_comms = chain![self.vk.fixed_commitments()] + let has_accumulator = U256::from(self.acc_encoding.is_some() as usize); + let acc_offset = self + .acc_encoding + .map(|acc_encoding| U256::from(acc_encoding.offset)) + .unwrap_or_default(); + let num_acc_limbs = self + .acc_encoding + .map(|acc_encoding| U256::from(acc_encoding.num_limbs)) + .unwrap_or_default(); + let num_acc_limb_bits = self + .acc_encoding + .map(|acc_encoding| U256::from(acc_encoding.num_limb_bits)) + .unwrap_or_default(); + let g1 = self.params.get_g()[0]; + let g1 = g1_to_u256s(g1); + let g2 = g2_to_u256s(self.params.g2()); + let neg_s_g2 = g2_to_u256s(-self.params.s_g2()); + + constants = constants + .into_iter() + .map(|(name, dummy_val)| { + let value = match name { + "vk_digest" => vk_digest, + "num_instances" => num_instances, + "k" => k, + "n_inv" => n_inv, + "omega" => omega, + "omega_inv" => omega_inv, + "omega_inv_to_l" => omega_inv_to_l, + "has_accumulator" => has_accumulator, + "acc_offset" => acc_offset, + "num_acc_limbs" => num_acc_limbs, + "num_acc_limb_bits" => num_acc_limb_bits, + "g1_x" => g1[0], + "g1_y" => g1[1], + "g2_x_1" => g2[0], + "g2_x_2" => g2[1], + "g2_y_1" => g2[2], + "g2_y_2" => g2[3], + "neg_s_g2_x_1" => neg_s_g2[0], + "neg_s_g2_x_2" => neg_s_g2[1], + "neg_s_g2_y_1" => neg_s_g2[2], + "neg_s_g2_y_2" => neg_s_g2[3], + "challenges_offset" => U256::from(self.meta.challenge_indices.len() * 32), + "num_evals" => U256::from(self.meta.num_evals), + "num_neg_lagranges" => { + U256::from(self.meta.rotation_last.unsigned_abs() as usize) + } + _ => dummy_val, + }; + (name, value) + }) + .collect(); + + let fixed_comms: Vec<(U256, U256)> = chain![self.vk.fixed_commitments()] .flat_map(g1_to_u256s) .tuples() .collect(); - let permutation_comms = chain![self.vk.permutation().commitments()] + let permutation_comms: Vec<(U256, U256)> = chain![self.vk.permutation().commitments()] .flat_map(g1_to_u256s) .tuples() .collect(); + Halo2VerifyingKey { - constants, - fixed_comms, - permutation_comms, + constants: constants.clone(), + fixed_comms: fixed_comms.clone(), + permutation_comms: permutation_comms.clone(), } } - fn generate_verifier(&self, separate: bool) -> Halo2Verifier { - let proof_cptr = Ptr::calldata(if separate { 0x84 } else { 0x64 }); + fn generate_verifying_artifact(&self) -> Halo2VerifyingArtifact { + let mut dummy_vk = self.generate_vk(true); + + fn set_constant_value(constants: &mut [(&str, U256)], name: &str, value: U256) { + if let Some((_, val)) = constants.iter_mut().find(|(n, _)| *n == name) { + *val = value; + } + } + + let const_expressions = expression_consts(self.vk.cs()) + .into_iter() + .map(fr_to_u256) + .collect::>(); + + let vk_mptr_mock = self.estimate_static_working_memory_size( + &VerifyingCache::Key(&dummy_vk), + Ptr::calldata(0x84), + ); + + let dummy_data = Data::new( + &self.meta, + &VerifyingCache::Key(&dummy_vk), + Ptr::memory(vk_mptr_mock), + Ptr::calldata(0x84), + ); + + let mut vk_lookup_const_table_dummy: HashMap, Ptr> = HashMap::new(); + + let offset = vk_mptr_mock + + (dummy_vk.constants.len() * 0x20) + + (dummy_vk.fixed_comms.len() + dummy_vk.permutation_comms.len()) * 0x40; + + // keys to the map are the values of vk.const_expressions and values are the memory location of the vk.const_expressions. + const_expressions.iter().enumerate().for_each(|(idx, _)| { + let mptr = offset + (0x20 * idx); + let mptr = Ptr::memory(mptr); + vk_lookup_const_table_dummy.insert(const_expressions[idx], mptr); + }); + + let evaluator_dummy = EvaluatorDynamic::new( + self.vk.cs(), + &self.meta, + &dummy_data, + vk_lookup_const_table_dummy, + ); + + // Fill in the quotient eval computations with dummy values. (maintains the correct shape) + let gate_computations_dummy = evaluator_dummy.gate_computations(); + let permutation_computations_dummy = evaluator_dummy.permutation_computations(); + let lookup_computations_dummy = evaluator_dummy.lookup_computations(0); + // Same for the pcs computations + let pcs_computations_dummy = match self.scheme { + Bdfg21 => bdfg21_computations_dynamic(&self.meta, &dummy_data), + Gwc19 => unimplemented!(), + }; + + let num_advices = self.meta.num_advices(); + let num_user_challenges = self.meta.num_challenges(); + // truncate the last elements of num_user_challenges to match the length of num_advices. + let num_user_challenges = num_user_challenges + .iter() + .take(num_advices.len()) + .copied() + .collect::>(); + + let mut fsm_challenges = 0x20 * (1 + self.num_instances); // initialize fsm_challenges with the space that loading the instances into memory will take up. + let mut max_advices_value = 0; // Initialize variable to track the maximum value of *num_advices * 0x40 + + let num_advices_user_challenges: Vec = { + let mut packed_words: Vec = vec![U256::from(0)]; + let mut bit_counter = 8; + let mut last_idx = 0; + for (num_advices, num_user_challenges) in + num_advices.iter().zip(num_user_challenges.iter()) + { + let offset = 24; + let next_bit_counter = bit_counter + offset; + if next_bit_counter > 256 { + last_idx += 1; + packed_words.push(U256::from(0)); + bit_counter = 0; + } + + let advices_value = *num_advices * 0x40; - let vk = self.generate_vk(); - let vk_len = vk.len(); - let vk_mptr = Ptr::memory(self.estimate_static_working_memory_size(&vk, proof_cptr)); - let data = Data::new(&self.meta, &vk, vk_mptr, proof_cptr); + // Ensure that the packed num_advices and num_user_challenges data doesn't overflow. + assert!( + advices_value < 0x10000, + "num_advices * 0x40 must be less than 0x10000" + ); + assert!( + *num_user_challenges < 0x100, + "num_user_challenges must be less than 0x100" + ); - let evaluator = Evaluator::new(self.vk.cs(), &self.meta, &data); - let quotient_eval_numer_computations = chain![ + // Track the maximum value of *num_advices * 0x40 + if advices_value > max_advices_value { + max_advices_value = advices_value; + } + + packed_words[last_idx] |= U256::from(advices_value) << bit_counter; + bit_counter += 16; + packed_words[last_idx] |= U256::from(*num_user_challenges) << bit_counter; + bit_counter += 8; + } + let packed_words_len = packed_words.len(); + // Encode the length of the exprs vec in the first word + packed_words[0] |= U256::from(packed_words_len); + packed_words + }; + + fsm_challenges += max_advices_value; + + // Iterate through the `num_advices_user_challenges` and update corresponding values in `constants` + for (i, value) in num_advices_user_challenges.iter().enumerate() { + if i >= NUM_ADVICES_USER_CHALLENGES_LABELS.len() { + panic!("word capacity for num_advices_user_challenges encoded vka data must be less than or equal to 10") + } + set_constant_value( + &mut dummy_vk.constants, + NUM_ADVICES_USER_CHALLENGES_LABELS[i], + *value, + ); + } + + // Update constants + let first_quotient_x_cptr = dummy_data.quotient_comm_cptr; + let last_quotient_x_cptr = first_quotient_x_cptr + 2 * (self.meta.num_quotients - 1); + let instance_cptr = U256::from(self.meta.proof_len(self.scheme) + 0xa4); + let gate_computations_len_offset = dummy_vk.len(true) + (const_expressions.len() * 0x20); + let permutations_computations_len_offset = + gate_computations_len_offset + (0x20 * gate_computations_dummy.len()); + let lookup_computations_len_offset = + permutations_computations_len_offset + (0x20 * permutation_computations_dummy.len()); + let pcs_computations_len_offset = + lookup_computations_len_offset + (0x20 * lookup_computations_dummy.len()); + + set_constant_value( + &mut dummy_vk.constants, + "fsm_challenges", + U256::from(fsm_challenges), + ); + + set_constant_value(&mut dummy_vk.constants, "instance_cptr", instance_cptr); + set_constant_value( + &mut dummy_vk.constants, + "first_quotient_x_cptr", + U256::from(first_quotient_x_cptr.value().as_usize()), + ); + set_constant_value( + &mut dummy_vk.constants, + "last_quotient_x_cptr", + U256::from(last_quotient_x_cptr.value().as_usize()), + ); + set_constant_value( + &mut dummy_vk.constants, + "gate_computations_len_offset", + U256::from(gate_computations_len_offset), + ); + set_constant_value( + &mut dummy_vk.constants, + "permutation_computations_len_offset", + U256::from(permutations_computations_len_offset), + ); + set_constant_value( + &mut dummy_vk.constants, + "lookup_computations_len_offset", + U256::from(lookup_computations_len_offset), + ); + set_constant_value( + &mut dummy_vk.constants, + "pcs_computations_len_offset", + U256::from(pcs_computations_len_offset), + ); + + // Recreate the vk with the correct shape + let mut vk = Halo2VerifyingArtifact { + constants: dummy_vk.constants, + fixed_comms: dummy_vk.fixed_comms, + permutation_comms: dummy_vk.permutation_comms, + const_expressions, + gate_computations: gate_computations_dummy, + permutation_computations: permutation_computations_dummy, + lookup_computations: lookup_computations_dummy, + pcs_computations: pcs_computations_dummy, + }; + + // Now generate the real vk_mptr with a vk that has the correct length + let vk_mptr = self.estimate_static_working_memory_size( + &VerifyingCache::Artifact(&vk), + Ptr::calldata(0x84), + ); + + // replace the mock vk_mptr with the real vk_mptr + set_constant_value(&mut vk.constants, "vk_mptr", U256::from(vk_mptr)); + // replace the mock vk_len with the real vk_len + let vk_len = vk.len(true); + set_constant_value(&mut vk.constants, "vk_len", U256::from(vk_len)); + + // Generate the real data. + let data = Data::new( + &self.meta, + &VerifyingCache::Artifact(&vk), + Ptr::memory(vk_mptr), + Ptr::calldata(0x84), + ); + + // Regenerate the gate computations with the correct offsets. + let mut vk_lookup_const_table: HashMap, Ptr> = HashMap::new(); + + // create a hashmap of vk.const_expressions values to its vk memory location. + let offset = vk_mptr + + (vk.constants.len() * 0x20) + + (vk.fixed_comms.len() + vk.permutation_comms.len()) * 0x40; + + // keys to the map are the values of vk.const_expressions and values are the memory location of the vk.const_expressions. + vk.const_expressions + .iter() + .enumerate() + .for_each(|(idx, _)| { + let mptr = offset + (0x20 * idx); + let mptr = Ptr::memory(mptr); + vk_lookup_const_table.insert(vk.const_expressions[idx], mptr); + }); + + // Now we initalize the real evaluator_vk which will contain the correct offsets in the vk_lookup_const_table. + let evaluator = + EvaluatorDynamic::new(self.vk.cs(), &self.meta, &data, vk_lookup_const_table); + + // NOTE: We don't need to replace the gate_computations_total_length since we are only potentially modifying the offsets for each constant mload operation. + vk.gate_computations = evaluator.gate_computations(); + // We need to replace the lookup_computations so that the constant mptrs in the encoded input expessions have the correct offsets. + vk.lookup_computations = + evaluator.lookup_computations(vk_mptr + lookup_computations_len_offset); + vk + } + + fn generate_verifier(&self) -> Halo2Verifier { + let proof_cptr = Ptr::calldata(0x64); + + let proof_len_cptr = Ptr::calldata(0x6014F51944); + + let vk = self.generate_vk(false); + let vk_m = self.estimate_static_working_memory_size(&VerifyingCache::Key(&vk), proof_cptr); + let vk_mptr = Ptr::memory(vk_m); + let data = Data::new(&self.meta, &VerifyingCache::Key(&vk), vk_mptr, proof_cptr); + + let evaluator = EvaluatorStatic::new(self.vk.cs(), &self.meta, &data); + let quotient_eval_numer_computations: Vec> = chain![ evaluator.gate_computations(), evaluator.permutation_computations(), - evaluator.lookup_computations() + evaluator.lookup_computations(), ] .enumerate() .map(|(idx, (mut lines, var))| { @@ -261,14 +638,13 @@ impl<'a> SolidityGenerator<'a> { .collect(); let pcs_computations = match self.scheme { - Bdfg21 => bdfg21_computations(&self.meta, &data), + Bdfg21 => bdfg21_computations_static(&self.meta, &data), Gwc19 => unimplemented!(), }; Halo2Verifier { scheme: self.scheme, - embedded_vk: (!separate).then_some(vk), - vk_len, + embedded_vk: vk, vk_mptr, num_neg_lagranges: self.meta.rotation_last.unsigned_abs() as usize, num_advices: self.meta.num_advices(), @@ -276,6 +652,7 @@ impl<'a> SolidityGenerator<'a> { num_evals: self.meta.num_evals, num_quotients: self.meta.num_quotients, proof_cptr, + proof_len_cptr, quotient_comm_cptr: data.quotient_comm_cptr, proof_len: self.meta.proof_len(self.scheme), challenge_mptr: data.challenge_mptr, @@ -285,15 +662,25 @@ impl<'a> SolidityGenerator<'a> { } } - fn estimate_static_working_memory_size( - &self, - vk: &Halo2VerifyingKey, - proof_cptr: Ptr, - ) -> usize { + fn generate_separate_verifier(&self) -> Halo2VerifierReusable { + let vk_const_offsets: HashMap<&'static str, U256> = self + .dummy_vka_constants() + .iter() + .enumerate() + .map(|(idx, &(key, _))| (key, U256::from(idx * 32))) + .collect(); + + Halo2VerifierReusable { + scheme: self.scheme, + vk_const_offsets, + } + } + + fn estimate_static_working_memory_size(&self, vk: &VerifyingCache, proof_cptr: Ptr) -> usize { + let mock_vk_mptr = Ptr::memory(0x100000); + let mock = Data::new(&self.meta, vk, mock_vk_mptr, proof_cptr); let pcs_computation = match self.scheme { Bdfg21 => { - let mock_vk_mptr = Ptr::memory(0x100000); - let mock = Data::new(&self.meta, vk, mock_vk_mptr, proof_cptr); let (superset, sets) = rotation_sets(&queries(&self.meta, &mock)); let num_coeffs = sets.iter().map(|set| set.rots().len()).sum::(); 2 * (1 + num_coeffs) + 6 + 2 * superset.len() + 1 + 3 * sets.len() @@ -301,21 +688,47 @@ impl<'a> SolidityGenerator<'a> { Gwc19 => unimplemented!(), }; - itertools::max([ + let fsm_usage = itertools::max([ // Keccak256 input (can overwrite vk) itertools::max(chain![ self.meta.num_advices().into_iter().map(|n| n * 2 + 1), [self.meta.num_evals + 1], ]) .unwrap() - .saturating_sub(vk.len() / 0x20), + .saturating_sub(vk.len(true) / 0x20), // PCS computation pcs_computation, // Pairing 12, ]) .unwrap() - * 0x20 + * 0x20; + // match statement for vk + match vk { + VerifyingCache::Artifact(_) => { + let mut vk_lookup_const_table_dummy: HashMap, Ptr> = + HashMap::new(); + let const_expressions = expression_consts(self.vk.cs()) + .into_iter() + .map(fr_to_u256) + .collect::>(); + const_expressions.iter().enumerate().for_each(|(idx, _)| { + let mptr = 0x20 * idx; + let mptr = Ptr::memory(mptr); + vk_lookup_const_table_dummy.insert(const_expressions[idx], mptr); + }); + let evaluator = EvaluatorDynamic::new( + self.vk.cs(), + &self.meta, + &mock, + vk_lookup_const_table_dummy, + ); + + let expression_eval_computations = evaluator.quotient_eval_fsm_usage(); + itertools::max([fsm_usage, expression_eval_computations]).unwrap() + } + _ => fsm_usage, + } } } diff --git a/src/codegen/evaluator.rs b/src/codegen/evaluator.rs index c65a654..1c3582c 100644 --- a/src/codegen/evaluator.rs +++ b/src/codegen/evaluator.rs @@ -10,10 +10,13 @@ use halo2_proofs::{ }; use itertools::{chain, izip, Itertools}; use ruint::aliases::U256; +use ruint::Uint; use std::{cell::RefCell, cmp::Ordering, collections::HashMap, iter}; +use super::util::{Ptr, Word}; + #[derive(Debug)] -pub(crate) struct Evaluator<'a, F: PrimeField> { +pub(crate) struct EvaluatorStatic<'a, F: PrimeField> { cs: &'a ConstraintSystem, meta: &'a ConstraintSystemMeta, data: &'a Data, @@ -21,9 +24,9 @@ pub(crate) struct Evaluator<'a, F: PrimeField> { var_cache: RefCell>, } -impl<'a, F> Evaluator<'a, F> +impl<'a, F> EvaluatorStatic<'a, F> where - F: PrimeField, + F: PrimeField>, { pub(crate) fn new( cs: &'a ConstraintSystem, @@ -55,20 +58,20 @@ where data.permutation_z_evals.first().map(|(z, _, _)| { vec![ format!("let l_0 := mload(L_0_MPTR)"), - format!("let eval := addmod(l_0, sub(r, mulmod(l_0, {z}, r)), r)"), + format!("let eval := addmod(l_0, sub(R, mulmod(l_0, {z}, R)), R)"), ] }), data.permutation_z_evals.last().map(|(z, _, _)| { - let item = "addmod(mulmod(perm_z_last, perm_z_last, r), sub(r, perm_z_last), r)"; + let item = "addmod(mulmod(perm_z_last, perm_z_last, R), sub(R, perm_z_last), R)"; vec![ format!("let perm_z_last := {z}"), - format!("let eval := mulmod(mload(L_LAST_MPTR), {item}, r)"), + format!("let eval := mulmod(mload(L_LAST_MPTR), {item}, R)"), ] }), data.permutation_z_evals.iter().tuple_windows().map( |((_, _, z_i_last), (z_j, _, _))| { - let item = format!("addmod({z_j}, sub(r, {z_i_last}), r)"); - vec![format!("let eval := mulmod(mload(L_0_MPTR), {item}, r)")] + let item = format!("addmod({z_j}, sub(R, {z_i_last}), R)"); + vec![format!("let eval := mulmod(mload(L_0_MPTR), {item}, R)")] } ), izip!( @@ -80,36 +83,36 @@ where let last_column_idx = columns.len() - 1; chain![ [ - format!("let gamma := mload(GAMMA_MPTR)"), - format!("let beta := mload(BETA_MPTR)"), + format!("let gamma := mload({})", "GAMMA_MPTR"), + format!("let beta := mload({})", "BETA_MPTR"), format!("let lhs := {}", evals.1), format!("let rhs := {}", evals.0), ], columns.iter().flat_map(|column| { let perm_eval = &data.permutation_evals[column]; let eval = self.eval(*column.column_type(), column.index(), 0); - let item = format!("mulmod(beta, {perm_eval}, r)"); + let item = format!("mulmod(beta, {perm_eval}, R)"); [format!( - "lhs := mulmod(lhs, addmod(addmod({eval}, {item}, r), gamma, r), r)" + "lhs := mulmod(lhs, addmod(addmod({eval}, {item}, R), gamma, R), R)" )] }), (chunk_idx == 0) - .then(|| "mstore(0x00, mulmod(beta, mload(X_MPTR), r))".to_string()), + .then(|| format!("mstore(0x00, mulmod(beta, mload({}), R))", "X_MPTR")), columns.iter().enumerate().flat_map(|(idx, column)| { let eval = self.eval(*column.column_type(), column.index(), 0); - let item = format!("addmod(addmod({eval}, mload(0x00), r), gamma, r)"); + let item = format!("addmod(addmod({eval}, mload(0x00), R), gamma, R)"); chain![ - [format!("rhs := mulmod(rhs, {item}, r)")], + [format!("rhs := mulmod(rhs, {item}, R)")], (!(chunk_idx == last_chunk_idx && idx == last_column_idx)) - .then(|| "mstore(0x00, mulmod(mload(0x00), delta, r))".to_string()), + .then(|| "mstore(0x00, mulmod(mload(0x00), DELTA, R))".to_string()), ] }), { - let item = format!("addmod(mload(L_LAST_MPTR), mload(L_BLIND_MPTR), r)"); - let item = format!("sub(r, mulmod(left_sub_right, {item}, r))"); + let item = format!("addmod(mload(L_LAST_MPTR), mload(L_BLIND_MPTR), R)"); + let item = format!("sub(R, mulmod(left_sub_right, {item}, R))"); [ - format!("let left_sub_right := addmod(lhs, sub(r, rhs), r)"), - format!("let eval := addmod(left_sub_right, {item}, r)"), + format!("let left_sub_right := addmod(lhs, sub(R, rhs), R)"), + format!("let eval := addmod(left_sub_right, {item}, R)"), ] } ] @@ -120,6 +123,130 @@ where .collect() } + #[cfg(feature = "mv-lookup")] + pub fn lookup_computations(&self) -> Vec<(Vec, String)> { + let evaluate = |expressions: &Vec<_>| { + // println!("expressions: {:?}", expressions); + let (lines, inputs) = expressions + .iter() + .map(|expression| self.evaluate(expression)) + .fold((Vec::new(), Vec::new()), |mut acc, result| { + acc.0.extend(result.0); + acc.1.push(result.1); + acc + }); + self.reset(); + (lines, inputs) + }; + + let inputs_tables = self + .cs + .lookups() + .iter() + .map(|lookup| { + let inputs_iter = lookup.input_expressions().iter(); + let inputs = inputs_iter.clone().map(evaluate).collect_vec(); + let table = evaluate(lookup.table_expressions()); + (inputs, table) + }) + .collect_vec(); + + let vec = izip!(inputs_tables, &self.data.lookup_evals) + .flat_map(|(inputs_tables, evals)| { + let (inputs, (table_lines, tables)) = inputs_tables.clone(); + let num_inputs = inputs.len(); + let (table_0, rest_tables) = tables.split_first().unwrap(); + let (phi, phi_next, m) = evals; + [ + vec![ + format!("let l_0 := mload(L_0_MPTR)"), + format!("let eval := mulmod(l_0, {phi}, R)"), + ], + vec![ + format!("let l_last := mload(L_LAST_MPTR)"), + format!("let eval := mulmod(l_last, {phi}, R)"), + ], + chain![ + [ + format!("let theta := mload({})", "THETA_MPTR").as_str(), + format!("let beta := mload({})", "BETA_MPTR").as_str(), + "let table" + ] + .map(str::to_string), + code_block::<1, false>(chain![ + table_lines, + [format!("table := {table_0}")], + rest_tables.iter().map(|table| format!( + "table := addmod(mulmod(table, theta, R), {table}, R)" + )), + [format!("table := addmod(table, beta, R)")], + ]), + izip!(0.., inputs.into_iter()).flat_map(|(idx, (input_lines, inputs))| { + let (input_0, rest_inputs) = inputs.split_first().unwrap(); + let ident = format!("input_{idx}"); + chain![ + [format!("let {ident}")], + code_block::<1, false>(chain![ + input_lines, + [format!("{ident} := {input_0}")], + rest_inputs.iter().map(|input| format!( + "{ident} := addmod(mulmod({ident}, theta, R), {input}, R)" + )), + [format!("{ident} := addmod({ident}, beta, R)")], + ]), + ] + }), + [format!("let lhs"), format!("let rhs")], + (0..num_inputs).flat_map(|i| { + assert_ne!(num_inputs, 0); + if num_inputs == 1 { + vec![format!("rhs := table")] + } else { + let idents = (0..num_inputs) + .filter(|j| *j != i) + .map(|idx| format!("input_{idx}")) + .collect_vec(); + let (ident_0, rest_idents) = idents.split_first().unwrap(); + code_block::<1, false>(chain![ + [format!("let tmp := {ident_0}")], + chain![rest_idents] + .map(|ident| format!("tmp := mulmod(tmp, {ident}, R)")), + [format!("rhs := addmod(rhs, tmp, R)"),], + (i == num_inputs - 1) + .then(|| format!("rhs := mulmod(rhs, table, R)")), + ]) + } + }), + code_block::<1, false>(chain![ + [format!("let tmp := input_0")], + (1..num_inputs) + .map(|idx| format!("tmp := mulmod(tmp, input_{idx}, R)")), + [ + format!("rhs := addmod(rhs, sub(R, mulmod({m}, tmp, R)), R)"), + { + let item = format!("addmod({phi_next}, sub(R, {phi}), R)"); + format!("lhs := mulmod(mulmod(table, tmp, R), {item}, R)") + }, + ], + ]), + { + let l_inactive = + format!("addmod(mload(L_BLIND_MPTR), mload(L_LAST_MPTR), R)"); + let l_active = format!("addmod(1, sub(R, {l_inactive}), R)"); + [format!( + "let eval := mulmod({l_active}, addmod(lhs, sub(R, rhs), R), R)" + )] + }, + ] + .collect_vec(), + ] + }) + .zip(iter::repeat("eval".to_string())) + .collect_vec(); + vec + } + + #[cfg(not(feature = "mv-lookup"))] pub fn lookup_computations(&self) -> Vec<(Vec, String)> { let input_tables = self .cs @@ -142,7 +269,7 @@ where (input_lines, inputs, table_lines, tables) }) .collect_vec(); - izip!(input_tables, &self.data.lookup_evals) + let vec = izip!(input_tables, &self.data.lookup_evals) .flat_map(|(input_table, evals)| { let (input_lines, inputs, table_lines, tables) = input_table; let (input_0, rest_inputs) = inputs.split_first().unwrap(); @@ -216,7 +343,8 @@ where ] }) .zip(iter::repeat("eval".to_string())) - .collect_vec() + .collect_vec(); + vec } fn eval(&self, column_type: impl Into, column_index: usize, rotation: i32) -> String { @@ -265,25 +393,25 @@ where ) }, &|(mut acc, var)| { - let (lines, var) = self.init_var(format!("sub(r, {var})"), None); + let (lines, var) = self.init_var(format!("sub(R, {var})"), None); acc.extend(lines); (acc, var) }, &|(mut lhs_acc, lhs_var), (rhs_acc, rhs_var)| { - let (lines, var) = self.init_var(format!("addmod({lhs_var}, {rhs_var}, r)"), None); + let (lines, var) = self.init_var(format!("addmod({lhs_var}, {rhs_var}, R)"), None); lhs_acc.extend(rhs_acc); lhs_acc.extend(lines); (lhs_acc, var) }, &|(mut lhs_acc, lhs_var), (rhs_acc, rhs_var)| { - let (lines, var) = self.init_var(format!("mulmod({lhs_var}, {rhs_var}, r)"), None); + let (lines, var) = self.init_var(format!("mulmod({lhs_var}, {rhs_var}, R)"), None); lhs_acc.extend(rhs_acc); lhs_acc.extend(lines); (lhs_acc, var) }, &|(mut acc, var), scalar| { let scalar = u256_string(scalar); - let (lines, var) = self.init_var(format!("mulmod({var}, {scalar}, r)"), None); + let (lines, var) = self.init_var(format!("mulmod({var}, {scalar}, R)"), None); acc.extend(lines); (acc, var) }, @@ -310,6 +438,852 @@ where } } +// Define an enum which catagorizes the operand memory location: +// calldata_mptr +// constant_mptr +// instance_mptr +// chllenge_mptr +// static_memory_ptr +#[derive(Clone, PartialEq, Eq)] +pub enum OperandMem { + Calldata, + Constant, + Instance, + Challenge, + StaticMemory, +} + +// Holds the encoded data stored in the VK artifact +// needed to perform the gate computations of +// the quotient evaluation portion of the reusable verifier. +#[derive(Clone, PartialEq, Eq, Default)] +pub struct GateDataEncoded { + pub(crate) length: usize, + pub(crate) packed_expression_words: Vec, +} + +impl GateDataEncoded { + pub fn len(&self) -> usize { + if self == &Self::default() { + 0 + } else { + 1 + self.packed_expression_words.len() + } + } +} + +// Holds the encoded data stored in the VK artifact +// needed to perform the permutation computations of +// the quotient evaluation portion of the reusable verifier. +#[derive(Clone, PartialEq, Eq)] +pub struct PermutationDataEncoded { + pub(crate) permutation_meta_data: U256, + pub(crate) permutation_data: Vec, +} + +impl Default for PermutationDataEncoded { + fn default() -> Self { + PermutationDataEncoded { + permutation_meta_data: U256::from(0), + permutation_data: Vec::new(), + } + } +} + +impl PermutationDataEncoded { + pub fn len(&self) -> usize { + if self == &Self::default() { + 0 + } else { + 1 + self.permutation_data.len() + } + } +} + +#[derive(Clone, PartialEq, Eq)] +pub struct InputsEncoded { + pub(crate) expression: Vec, + pub(crate) acc: usize, +} + +// Holds the encoded data stored in the VK artifact +// needed to perform the lookup computations for one lookup table. +// In the case where non mv-lookups are used the inputs.len() == 1 +#[derive(Clone, PartialEq, Eq)] +pub struct LookupEncoded { + pub(crate) evals: U256, + pub(crate) table_lines: Vec, + pub(crate) acc: usize, + pub(crate) inputs: Vec, +} + +// For each element of the lookups vector we have a word for: +// 1) the evals (new_table: bool, (cptr, cptr, cptr)) +// 2) table_lines Vec +// 3) table_inputs Vec packed into a single (throws an error if table_inputs.len() > 16) +// these values represent the set of table_lines results that are used to compute the accumulator evaluation of the +// lookup table. +// 4) outer_inputs_len (inputs.0.len()) +// For each element of the inputs vector in LookupEncoded we have a word for: +// 1) inputs (inputs[i].expressions) +// 2) input_vars Vec packed into a single (throws an error if > 16) +// these values represent the set of input expression results that are used to compute the accumulator evaluation of the +// lookup inputs. +// Then we have a word for each step in the expression evaluation. This is the +// sum of the lengths of the inputs. +impl LookupEncoded { + pub fn len(&self) -> usize { + let base_len = 1; + base_len + + self + .inputs + .iter() + .map(|inputs| inputs.expression.len()) + .sum::() + + self.table_lines.len() + } +} + +// Holds the encoded data stored in the VK artifact +// needed to perform the lookup computations of all the lookup tables +// needed in the quotient evaluation portion of the reusable verifier. +#[derive(Clone, PartialEq, Eq)] +pub struct LookupsDataEncoded { + pub(crate) meta_data: U256, + pub(crate) lookups: Vec, +} + +impl Default for LookupsDataEncoded { + fn default() -> Self { + LookupsDataEncoded { + meta_data: U256::from(0), + lookups: Vec::new(), + } + } +} + +impl LookupsDataEncoded { + pub fn len(&self) -> usize { + if self == &Self::default() { + 1 + } else { + 1 + self.lookups.iter().map(LookupEncoded::len).sum::() + } + } +} +#[derive(Debug)] +pub(crate) struct EvaluatorDynamic<'a, F: PrimeField> { + cs: &'a ConstraintSystem, + meta: &'a ConstraintSystemMeta, + data: &'a Data, + static_mem_ptr: RefCell, + encoded_var_cache: RefCell>, + const_cache: RefCell, Ptr>>, +} + +impl<'a, F> EvaluatorDynamic<'a, F> +where + F: PrimeField>, +{ + pub(crate) fn new( + cs: &'a ConstraintSystem, + meta: &'a ConstraintSystemMeta, + data: &'a Data, + const_cache: HashMap, Ptr>, + ) -> Self { + Self { + cs, + meta, + data, + static_mem_ptr: RefCell::new(0x00), + encoded_var_cache: Default::default(), + const_cache: RefCell::new(const_cache), + } + } + + pub fn gate_computations(&self) -> GateDataEncoded { + let packed_expression_words: Vec> = self + .cs + .gates() + .iter() + .flat_map(Gate::polynomials) + .map(|expression| self.evaluate_and_reset(expression, true)) + .collect(); + let length = packed_expression_words.len(); + let packed_expression_words_flattened: Vec = + packed_expression_words.into_iter().flatten().collect(); + + GateDataEncoded { + length, + packed_expression_words: packed_expression_words_flattened, + } + } + + #[allow(dead_code)] + fn gate_computation_fsm_usage(&self) -> usize { + let packed_expression_words: Vec> = self + .cs + .gates() + .iter() + .flat_map(Gate::polynomials) + .map(|expression| self.evaluate_and_reset(expression, false)) + .collect(); + let gate_computation_longest = chain![packed_expression_words] + .max_by_key(|x| x.len()) + .unwrap() + .clone() + .len(); + gate_computation_longest * 0x20 + } + + pub fn permutation_computations(&self) -> PermutationDataEncoded { + let Self { meta, data, .. } = self; + let permutation_z_evals_last_idx = data.permutation_z_evals.len() - 1; + let permutation_z_evals: Vec = data + .permutation_z_evals + .iter() + .map(|z| self.encode_triplet_evaluation_word(z, 0)) + .collect(); + let column_evals: Vec> = meta + .permutation_columns + .chunks(meta.permutation_chunk_len) + .map(|columns| { + columns + .iter() + .map(|column| { + let perm_eval = + U256::from(data.permutation_evals[column].ptr().value().as_usize()); + let eval = self.eval_encoded(*column.column_type(), column.index(), 0); + eval | (perm_eval << 24) + }) + .collect() + }) + .collect(); + // First lsg byte encodes the last index of the permutation_z_evals + // The next 2 bytes encode the num words each set of permutation data will take up (except the last one, we use the next 2 bytes for that) scaled by 0x20 + // 48 is the bit offset of the permutation_z_evals and 40 is the bit offset of each column eval. + let num_words = 1 + ((48 + (meta.permutation_chunk_len) * 40) / 256); + let perm_meta_data: U256 = { + let mut packed_word = U256::from(permutation_z_evals_last_idx); + packed_word |= U256::from(num_words * 0x20) << 8; + let last_num_words = 1 + ((48 + (column_evals.last().unwrap().len()) * 40) / 256); + packed_word |= U256::from(last_num_words * 0x20) << 24; + packed_word + }; + let perm_data: Vec = izip!(0.., column_evals) + .flat_map(|(chunk_idx, column_evals)| { + let mut packed_words = vec![permutation_z_evals[chunk_idx]]; + let mut last_idx = 0; + let mut bit_counter = 48; + for eval in column_evals.iter() { + let next_bit_counter = bit_counter + 40; + if next_bit_counter > 256 { + last_idx += 1; + packed_words.push(U256::from(0)); + bit_counter = 0; + } + packed_words[last_idx] |= *eval << bit_counter; + bit_counter += 40; + } + packed_words + }) + .collect_vec(); + PermutationDataEncoded { + permutation_meta_data: perm_meta_data, + permutation_data: perm_data, + } + } + + #[cfg(not(feature = "mv-lookup"))] + pub fn quotient_eval_fsm_usage(&self) -> usize { + let gate_computation_fsm_usage = self.gate_computation_fsm_usage(); + + // 0x40 offset b/c that is where the fsm pointer starts in the permutations computation code block + let permutation_computation_fsm_usage = (self.data.permutation_z_evals.len() * 0x20) + 0x40; + + let input_expressions_fsm_usage = 0xc0; // offset to store theta offset ptrs used in the non mv lookup computations. + + itertools::max([ + gate_computation_fsm_usage, + permutation_computation_fsm_usage, + input_expressions_fsm_usage, + ]) + .unwrap() + } + + #[cfg(feature = "mv-lookup")] + pub fn quotient_eval_fsm_usage(&self) -> usize { + let gate_computation_fsm_usage = self.gate_computation_fsm_usage(); + + // 0x40 offset b/c that is where the fsm pointer starts in the permutations computation code block + let permutation_computation_fsm_usage = (self.data.permutation_z_evals.len() * 0x20) + 0x40; + + let evaluate_fsm_usage = |idx: usize, expressions: &Vec<_>| { + let offset = 0xa0; // offset to store theta offset ptrs used + // in the lookup computations. + let fsm = (0x20 * idx) + offset; + self.set_static_mem_ptr(fsm); + let max_fsm_usage = expressions + .iter() + .map(|expression| self.evaluate_encode(expression)) + .fold(fsm, |mut acc, result| { + acc += result.0.len() * 0x20; + acc + }); + self.reset(); + max_fsm_usage + }; + + let input_expressions_fsm_usage = self + .cs + .lookups() + .iter() + .map(|lookup| { + let inputs_iter = lookup.input_expressions().iter().enumerate(); + let fsm_usages = inputs_iter + .clone() + .map(|(idx, expressions)| evaluate_fsm_usage(idx, expressions)) + .collect_vec(); + *fsm_usages.iter().max().unwrap() + }) + .collect_vec(); + let input_expressions_fsm_usage = *input_expressions_fsm_usage.iter().max().unwrap_or(&0x0); + + itertools::max([ + gate_computation_fsm_usage, + permutation_computation_fsm_usage, + input_expressions_fsm_usage, + ]) + .unwrap() + } + + #[cfg(not(feature = "mv-lookup"))] + pub fn lookup_computations(&self, offset: usize) -> LookupsDataEncoded { + let input_tables = self + .cs + .lookups() + .iter() + .map(|lookup| { + let [packed_table_expr, packed_input_expr] = + [lookup.table_expressions(), lookup.input_expressions()].map(|expressions| { + let fsm = 0xc0; // offset to store theta offset ptrs used in the non mv lookup computations. + self.set_static_mem_ptr(fsm); + let (lines, inputs) = expressions + .iter() + .map(|expression| self.evaluate_encode(expression)) + .fold((Vec::new(), Vec::new()), |mut acc, result| { + acc.0.extend(result.0); + acc.1.push(result.1); + acc + }); + self.reset(); + self.encode_pack_expr_operations(lines, 8, Some(inputs)) + }); + (packed_table_expr, packed_input_expr) + }) + .collect_vec(); + + let is_contiguous = |positions: &[usize]| -> bool { + if positions.is_empty() { + return true; + } + for window in positions.windows(2) { + if window[1] != window[0] + 1 { + return false; + } + } + true + }; + + let mut accumulator = 0; + + let mut previous_table_lines: Option>> = None; + // Hshmap used to check for contigious table_expressions in the lookups + let mut table_expression_positions: HashMap>, Vec> = HashMap::new(); + + let lookups: Vec = izip!(input_tables, &self.data.lookup_evals) + .enumerate() + .map(|(lookup_index, (input_table, evals))| { + let (table_lines, inputs) = input_table; + let evals = self.encode_quintuple_evaluation_word(evals, 8); + let mut inner_accumulator = 0; + let inputs: Vec = inputs + .iter() + .map(|input_lines| { + let res = InputsEncoded { + expression: vec![input_lines.clone()], + acc: inner_accumulator, + }; + inner_accumulator += 1; + res + }) + .collect_vec(); + table_expression_positions + .entry(table_lines.clone()) + .or_default() + .push(lookup_index); + + let mut lookup_encoded = LookupEncoded { + evals, + table_lines: table_lines.clone(), + inputs: inputs.clone(), + acc: accumulator, + }; + // The first byte in the eval word will store whether we use the previous table lines or not. + // If we use the previous table lines then the byte will be 0x0 otherwise it will be 0x1. + if let Some(prev_lines) = &previous_table_lines { + if *prev_lines != table_lines { + lookup_encoded.evals |= U256::from(0x1); + } else { + lookup_encoded.table_lines = Vec::new(); + } + } else { + lookup_encoded.evals |= U256::from(0x1); + } + accumulator += lookup_encoded.len(); + + previous_table_lines = Some(table_lines); + lookup_encoded + }) + .collect_vec(); + // Check if any table_expressions (Vec>) have non-contiguous positions + for (table_exprs, positions) in table_expression_positions.iter() { + if !is_contiguous(positions) { + eprintln!( + "Warning: The table expressions `{:?}` are not contiguous across lookups. \ + Consider reordering your lookups so that all occurrences of this table expression \ + are adjacent to each other. This will result in more gas efficient verifications", + table_exprs + ); + } + } + + if lookups.is_empty() { + return LookupsDataEncoded::default(); + } + + let mut data = LookupsDataEncoded { + lookups, + meta_data: U256::from(0), + }; + // Insert the num_words and end_ptr to the beginning of the meta data word. + let end_ptr = (data.len() * 32) + offset; + data.meta_data = U256::from(end_ptr); + // Encode 0x1 into the next byte to indicate that is non-mv-lookup data. + data.meta_data |= U256::from(0x1) << 16; + data + } + + #[cfg(feature = "mv-lookup")] + pub fn lookup_computations(&self, offset: usize) -> LookupsDataEncoded { + let evaluate_table = |expressions: &Vec<_>| { + let offset = 0xa0; // offset to store theta offset ptrs used in the reusable verifier (need to do this to avoid stack too deep errors) + self.set_static_mem_ptr(offset); // println!("expressions: {:?}", expressions); + let (lines, inputs) = expressions + .iter() + .map(|expression| self.evaluate_encode(expression)) + .fold((Vec::new(), Vec::new()), |mut acc, result| { + acc.0.extend(result.0); + acc.1.push(result.1); + acc + }); + self.reset(); + self.encode_pack_expr_operations(lines, 8, Some(inputs)) + }; + + let evaluate_inputs = |idx: usize, expressions: &Vec<_>| { + // println!("expressions: {:?}", expressions); + let offset = 0xa0; // offset to store theta offset ptrs used + // in the lookup computations. + let fsm = (0x20 * idx) + offset; + self.set_static_mem_ptr(fsm); + let (lines, inputs) = expressions + .iter() + .map(|expression| self.evaluate_encode(expression)) + .fold((Vec::new(), Vec::new()), |mut acc, result| { + acc.0.extend(result.0); + acc.1.push(result.1); + acc + }); + self.reset(); + // bit offset to store the number of inputs + let bit_offset = if idx == 0 { 24 } else { 8 }; + self.encode_pack_expr_operations(lines, bit_offset, Some(inputs)) + }; + + let is_contiguous = |positions: &[usize]| -> bool { + if positions.is_empty() { + return true; + } + for window in positions.windows(2) { + if window[1] != window[0] + 1 { + return false; + } + } + true + }; + + let inputs_tables = self + .cs + .lookups() + .iter() + .map(|lookup| { + let inputs_iter = lookup.input_expressions().iter().enumerate(); + // outer inputs of the MV lookup vector scaled by 0x20. + let outer_inputs_len = lookup.input_expressions().len() * 0x20; + let inputs = inputs_iter + .clone() + .map(|(idx, expressions)| { + let mut lines = evaluate_inputs(idx, expressions); + if idx == 0 { + lines[0] |= U256::from(outer_inputs_len); + } + lines + }) + .collect_vec(); + let table = evaluate_table(lookup.table_expressions()); + (inputs, table) + }) + .collect_vec(); + + let mut accumulator = 0; + + let mut previous_table_lines: Option>> = None; + // Hshmap used to check for contigious table_expressions in the lookups + let mut table_expression_positions: HashMap>, Vec> = HashMap::new(); + + let lookups: Vec = izip!(inputs_tables, &self.data.lookup_evals) + .enumerate() + .map(|(lookup_index, (inputs_tables, evals))| { + let (inputs, table_lines) = inputs_tables.clone(); + let evals = self.encode_triplet_evaluation_word(evals, 8); + let mut inner_accumulator = 0; + let inputs: Vec = inputs + .iter() + .map(|input_lines| { + let res = InputsEncoded { + expression: input_lines.clone(), + acc: inner_accumulator, + }; + inner_accumulator += input_lines.len(); + res + }) + .collect_vec(); + + table_expression_positions + .entry(table_lines.clone()) + .or_default() + .push(lookup_index); + + let mut lookup_encoded = LookupEncoded { + evals, + table_lines: table_lines.clone(), + inputs: inputs.clone(), + acc: accumulator, + }; + // The first byte in the eval word will store whether we use the previous table lines or not. + // If we use the previous table lines then the byte will be 0x0 otherwise it will be 0x1. + if let Some(prev_lines) = &previous_table_lines { + if *prev_lines != table_lines { + lookup_encoded.evals |= U256::from(0x1); + } else { + lookup_encoded.table_lines = Vec::new(); + } + } else { + lookup_encoded.evals |= U256::from(0x1); + } + accumulator += lookup_encoded.len(); + + previous_table_lines = Some(table_lines); + lookup_encoded + }) + .collect_vec(); + + // Check if any table_expressions (Vec>) have non-contiguous positions + for (table_exprs, positions) in table_expression_positions.iter() { + if !is_contiguous(positions) { + eprintln!( + "Warning: The table expressions `{:?}` are not contiguous across lookups. \ + Consider reordering your lookups so that all occurrences of this table expression \ + are adjacent to each other. This will result in more gas efficient verifications", + table_exprs + ); + } + } + + if lookups.is_empty() { + return LookupsDataEncoded::default(); + } + + let mut data = LookupsDataEncoded { + lookups, + meta_data: U256::from(0), + }; + // Insert the num_words and end_ptr to the beginning of the meta data word. + let end_ptr = (data.len() * 32) + offset; + data.meta_data = U256::from(end_ptr); + data + } + + fn eval_encoded( + &self, + column_type: impl Into, + column_index: usize, + rotation: i32, + ) -> U256 { + match column_type.into() { + Any::Advice(_) => self.encode_single_operand( + 0_u8, + U256::from( + self.data.advice_evals[&(column_index, rotation)] + .ptr() + .value() + .as_usize(), + ), + ), + Any::Fixed => self.encode_single_operand( + 0_u8, + U256::from( + self.data.fixed_evals[&(column_index, rotation)] + .ptr() + .value() + .as_usize(), + ), + ), + Any::Instance => self.encode_single_operand(1_u8, U256::from(0)), // On the EVM side the 0x0 op here we will inidicate that we need to perform the l_0 mload operation. + } + } + + // TODO: optimiize this by maintaing a cache of previous var stored in static memory and if + // any of the steps require the same var then just return the pointer to the var instead of encoding it again + + fn reset(&self) { + *self.static_mem_ptr.borrow_mut() = 0x0; + *self.encoded_var_cache.borrow_mut() = Default::default(); + } + + fn encode_operation(&self, op: u8, lhs_ptr: U256, rhs_ptr: U256) -> U256 { + U256::from(op) | (lhs_ptr << 8) | (rhs_ptr << 24) + } + + fn encode_single_operand(&self, op: u8, ptr: U256) -> U256 { + U256::from(op) | (ptr << 8) + } + + fn encode_triplet_evaluation_word( + &self, + value: &(Word, Word, Word), + bit_offset: usize, + ) -> U256 { + let (z_i, z_j, z_i_last) = value; + U256::from(z_i.ptr().value().as_usize()) << bit_offset + | U256::from(z_j.ptr().value().as_usize()) << (bit_offset + 16) + | U256::from(z_i_last.ptr().value().as_usize()) << (bit_offset + 32) + } + + #[allow(dead_code)] + fn encode_quintuple_evaluation_word( + &self, + value: &(Word, Word, Word, Word, Word), + bit_offset: usize, + ) -> U256 { + let (z_i, z_j, z_k, z_l, z_m) = value; + U256::from(z_i.ptr().value().as_usize()) << bit_offset + | U256::from(z_j.ptr().value().as_usize()) << (bit_offset + 16) + | U256::from(z_k.ptr().value().as_usize()) << (bit_offset + 32) + | U256::from(z_l.ptr().value().as_usize()) << (bit_offset + 48) + | U256::from(z_m.ptr().value().as_usize()) << (bit_offset + 64) + } + + fn encode_pack_expr_operations( + &self, + exprs: Vec, + mut bit_counter: i32, + lookup_inputs: Option>, + ) -> Vec { + let mut packed_words: Vec = vec![U256::from(0)]; + let mut last_idx = 0; + let initial_offset = bit_counter; + + for expr in exprs.iter() { + let first_byte = expr.as_limbs()[0] & 0xFF; + let offset = if first_byte == 0 || first_byte == 1 { + 24 // single operand operation bit offset + } else { + 40 // multi operand operation bit offset + }; + + let next_bit_counter = bit_counter + offset; + if next_bit_counter > 256 { + last_idx += 1; + packed_words.push(U256::from(0)); + bit_counter = 0; + } + packed_words[last_idx] |= *expr << bit_counter; + bit_counter += offset; + } + if let Some(inputs) = lookup_inputs { + // 1 byte for op flag (0x04) followed by 1 byte for the number of words allocated. + let offset = 16; + let next_bit_counter = bit_counter + offset; + if next_bit_counter > 256 { + last_idx += 1; + packed_words.push(U256::from(0)); + bit_counter = 0; + } + let start_idx = last_idx; + packed_words[last_idx] |= U256::from(4_u8) << bit_counter; + bit_counter += 8; + let length_bit_offset = bit_counter; + bit_counter += 8; + // iterate over the inputs + for input in inputs.iter() { + let offset = 16; + let next_bit_counter = bit_counter + offset; + if next_bit_counter > 256 { + last_idx += 1; + packed_words.push(U256::from(0)); + bit_counter = 0; + } + packed_words[last_idx] |= *input << bit_counter; + bit_counter += 16; + } + let allocated_words = last_idx - start_idx + 1; + // Encode the number of words allocated for the lookup input vars (after the 0x04 flag) + packed_words[start_idx] |= U256::from(allocated_words) << length_bit_offset; + } + + let packed_words_len = packed_words.len(); + + // Encode the length of the exprs vec in the first word + let offset = if initial_offset == 24 { 16 } else { 0 }; + packed_words[0] |= U256::from(packed_words_len) << offset; + + packed_words + } + + fn evaluate_and_reset(&self, expression: &Expression, pack: bool) -> Vec { + *self.static_mem_ptr.borrow_mut() = 0x0; + let result = self.evaluate_encode(expression); + self.reset(); + let res = result.0; + if pack { + self.encode_pack_expr_operations(res, 8, None) + } else { + res + } + } + + fn set_static_mem_ptr(&self, value: usize) { + *self.static_mem_ptr.borrow_mut() = value; + } + + fn evaluate_encode(&self, expression: &Expression) -> (Vec, U256) { + evaluate( + expression, + &|constant| self.init_encoded_var(constant, OperandMem::Constant), + &|query| { + self.init_encoded_var( + self.eval_encoded(Fixed, query.column_index(), query.rotation().0), + OperandMem::Calldata, + ) + }, + &|query| { + self.init_encoded_var( + self.eval_encoded(Advice::default(), query.column_index(), query.rotation().0), + OperandMem::Calldata, + ) + }, + &|_| { + self.init_encoded_var( + // instance eval ptr located 17 words after the theta mptr + U256::from((self.data.theta_mptr + 17).value().as_usize()), + OperandMem::Instance, + ) + }, + &|challenge| { + self.init_encoded_var( + U256::from( + self.data.challenges[challenge.index()] + .ptr() + .value() + .as_usize(), + ), + OperandMem::Challenge, + ) + }, + &|(mut acc, var)| { + let (lines, var) = self.init_encoded_var( + self.encode_single_operand(1_u8, var), + OperandMem::StaticMemory, + ); + acc.extend(lines); + (acc, var) + }, + &|(mut lhs_acc, lhs_var), (rhs_acc, rhs_var)| { + let (lines, var) = self.init_encoded_var( + self.encode_operation(2_u8, lhs_var, rhs_var), + OperandMem::StaticMemory, + ); + lhs_acc.extend(rhs_acc); + lhs_acc.extend(lines); + (lhs_acc, var) + }, + &|(mut lhs_acc, lhs_var), (rhs_acc, rhs_var)| { + let (lines, var) = self.init_encoded_var( + self.encode_operation(3_u8, lhs_var, rhs_var), + OperandMem::StaticMemory, + ); + lhs_acc.extend(rhs_acc); + lhs_acc.extend(lines); + (lhs_acc, var) + }, + &|(mut acc, var), scalar| { + // fetch the scalar pointer from the const cache + let scalar_ptr = self.const_cache.borrow()[&scalar]; + let (lines, var) = self.init_encoded_var( + self.encode_operation(3_u8, var, U256::from(scalar_ptr.value().as_usize())), + OperandMem::StaticMemory, + ); + acc.extend(lines); + (acc, var) + }, + ) + } + + // Return the encoded word and the static memory pointer + fn init_encoded_var(&self, value: U256, var: OperandMem) -> (Vec, U256) { + match var { + OperandMem::Calldata | OperandMem::StaticMemory => { + if self.encoded_var_cache.borrow().contains_key(&value) { + (vec![], self.encoded_var_cache.borrow()[&value]) + } else { + let var = self.next_encoded_var(); + self.encoded_var_cache.borrow_mut().insert(value, var); + (vec![value], var) + } + } + OperandMem::Constant => ( + vec![], + U256::from(self.const_cache.borrow().get(&value).map_or_else( + || { + println!("Key not found: {}", value); + 0 // Default value, you can change this if needed + }, + |entry| entry.value().as_usize(), + )), + ), + OperandMem::Instance | OperandMem::Challenge => (vec![], value), + } + } + + fn next_encoded_var(&self) -> U256 { + let count = *self.static_mem_ptr.borrow(); + *self.static_mem_ptr.borrow_mut() += 0x20; + U256::from(count) + } +} + fn u256_string(value: U256) -> String { if value.bit_len() < 64 { format!("0x{:x}", value.as_limbs()[0]) @@ -348,9 +1322,9 @@ fn evaluate( scaled: &impl Fn(T, U256) -> T, ) -> T where - F: PrimeField, + F: PrimeField>, { - let evaluate = |expr| { + let evaluate = |expr: &Expression| { evaluate( expr, constant, fixed, advice, instance, challenge, negated, sum, product, scaled, ) diff --git a/src/codegen/pcs.rs b/src/codegen/pcs.rs index 2b7536a..72d3c87 100644 --- a/src/codegen/pcs.rs +++ b/src/codegen/pcs.rs @@ -2,6 +2,7 @@ use crate::codegen::util::{for_loop, ConstraintSystemMeta, Data, EcPoint, Location, Ptr, Word}; use itertools::{chain, izip, Itertools}; +use ruint::aliases::U256; use std::collections::{BTreeMap, BTreeSet}; /// KZG batch open schemes in `halo2`. @@ -32,6 +33,52 @@ impl Query { } } +#[cfg(feature = "mv-lookup")] +pub(crate) fn queries(meta: &ConstraintSystemMeta, data: &Data) -> Vec { + chain![ + meta.advice_queries.iter().map(|query| { + let comm = data.advice_comms[query.0]; + let eval = data.advice_evals[query]; + Query::new(comm, query.1, eval) + }), + izip!(&data.permutation_z_comms, &data.permutation_z_evals).flat_map(|(&comm, evals)| { + [Query::new(comm, 0, evals.0), Query::new(comm, 1, evals.1)] + }), + izip!(&data.permutation_z_comms, &data.permutation_z_evals) + .rev() + .skip(1) + .map(|(&comm, evals)| Query::new(comm, meta.rotation_last, evals.2)), + izip!( + &data.lookup_m_comms, + &data.lookup_phi_comms, + &data.lookup_evals + ) + .flat_map(|(&m_comm, &phi_comm, evals)| { + [ + Query::new(phi_comm, 0, evals.0), + Query::new(phi_comm, 1, evals.1), + Query::new(m_comm, 0, evals.2), + ] + }), + meta.fixed_queries.iter().map(|query| { + let comm = data.fixed_comms[query.0]; + let eval = data.fixed_evals[query]; + Query::new(comm, query.1, eval) + }), + meta.permutation_columns.iter().map(|column| { + let comm = data.permutation_comms[column]; + let eval = data.permutation_evals[column]; + Query::new(comm, 0, eval) + }), + [ + Query::new(data.computed_quotient_comm, 0, data.computed_quotient_eval), + Query::new(data.random_comm, 0, data.random_eval), + ] + ] + .collect() +} + +#[cfg(not(feature = "mv-lookup"))] pub(crate) fn queries(meta: &ConstraintSystemMeta, data: &Data) -> Vec { chain![ meta.advice_queries.iter().map(|query| { @@ -124,7 +171,7 @@ pub(crate) fn rotation_sets(queries: &[Query]) -> (BTreeSet, Vec = comm_queries .into_iter() .fold(Vec::::new(), |mut sets, (comm, queries)| { @@ -157,7 +204,10 @@ pub(crate) fn rotation_sets(queries: &[Query]) -> (BTreeSet, Vec Vec> { +pub(crate) fn bdfg21_computations_static( + meta: &ConstraintSystemMeta, + data: &Data, +) -> Vec> { let queries = queries(meta, data); let (superset, sets) = rotation_sets(&queries); let min_rot = *superset.first().unwrap(); @@ -200,10 +250,10 @@ pub(crate) fn bdfg21_computations(meta: &ConstraintSystemMeta, data: &Data) -> V let point_computations = chain![ [ - "let x := mload(X_MPTR)", - "let omega := mload(OMEGA_MPTR)", - "let omega_inv := mload(OMEGA_INV_MPTR)", - "let x_pow_of_omega := mulmod(x, omega, r)" + format!("let x := mload({})", "X_MPTR").as_str(), + format!("let omega := mload({})", "OMEGA_MPTR").as_str(), + format!("let omega_inv := mload({})", "OMEGA_INV_MPTR").as_str(), + "let x_pow_of_omega := mulmod(x, omega, R)" ] .map(str::to_string), (1..=max_rot).flat_map(|rot| { @@ -212,12 +262,12 @@ pub(crate) fn bdfg21_computations(meta: &ConstraintSystemMeta, data: &Data) -> V .get(&rot) .map(|point| format!("mstore({}, x_pow_of_omega)", point.ptr())), (rot != max_rot) - .then(|| { "x_pow_of_omega := mulmod(x_pow_of_omega, omega, r)".to_string() }) + .then(|| { "x_pow_of_omega := mulmod(x_pow_of_omega, omega, R)".to_string() }) ] }), [ format!("mstore({}, x)", points[&0].ptr()), - format!("x_pow_of_omega := mulmod(x, omega_inv, r)") + format!("x_pow_of_omega := mulmod(x, omega_inv, R)") ], (min_rot..0).rev().flat_map(|rot| { chain![ @@ -225,15 +275,16 @@ pub(crate) fn bdfg21_computations(meta: &ConstraintSystemMeta, data: &Data) -> V .get(&rot) .map(|point| format!("mstore({}, x_pow_of_omega)", point.ptr())), (rot != min_rot).then(|| { - "x_pow_of_omega := mulmod(x_pow_of_omega, omega_inv, r)".to_string() + "x_pow_of_omega := mulmod(x_pow_of_omega, omega_inv, R)".to_string() }) ] }) ] .collect_vec(); - + // print the point computations + // println!("{:?}", point_computations); let vanishing_computations = chain![ - ["let mu := mload(MU_MPTR)".to_string()], + [format!("let mu := mload(MU_MPTR)").to_string()], { let mptr = mu_minus_points.first_key_value().unwrap().1.ptr(); let mptr_end = mptr + mu_minus_points.len(); @@ -249,7 +300,7 @@ pub(crate) fn bdfg21_computations(meta: &ConstraintSystemMeta, data: &Data) -> V "point_mptr := add(point_mptr, 0x20)", ] .map(str::to_string), - ["mstore(mptr, addmod(mu, sub(r, mload(point_mptr)), r))".to_string()], + ["mstore(mptr, addmod(mu, sub(R, mload(point_mptr)), R))".to_string()], ) }, ["let s".to_string()], @@ -259,7 +310,7 @@ pub(crate) fn bdfg21_computations(meta: &ConstraintSystemMeta, data: &Data) -> V mu_minus_points[sets[0].rots().first().unwrap()] )], chain![sets[0].rots().iter().skip(1)] - .map(|rot| { format!("s := mulmod(s, {}, r)", mu_minus_points[rot]) }), + .map(|rot| { format!("s := mulmod(s, {}, R)", mu_minus_points[rot]) }), [format!("mstore({}, s)", vanishing_0.ptr())], ], ["let diff".to_string()], @@ -270,7 +321,7 @@ pub(crate) fn bdfg21_computations(meta: &ConstraintSystemMeta, data: &Data) -> V .map(|rot| format!("diff := {}", mu_minus_points[rot])) .unwrap_or_else(|| "diff := 1".to_string())], chain![set.diffs().iter().skip(1)] - .map(|rot| { format!("diff := mulmod(diff, {}, r)", mu_minus_points[rot]) }), + .map(|rot| { format!("diff := mulmod(diff, {}, R)", mu_minus_points[rot]) }), [format!("mstore({}, diff)", diff.ptr())], (set_idx == 0).then(|| format!("mstore({}, diff)", diff_0.ptr())), ] @@ -304,15 +355,15 @@ pub(crate) fn bdfg21_computations(meta: &ConstraintSystemMeta, data: &Data) -> V [coeff_points .first() .map(|(point_i, point_j)| { - format!("coeff := addmod({point_i}, sub(r, {point_j}), r)") + format!("coeff := addmod({point_i}, sub(R, {point_j}), R)") }) .unwrap_or_else(|| { "coeff := 1".to_string() })], coeff_points.iter().skip(1).map(|(point_i, point_j)| { - let item = format!("addmod({point_i}, sub(r, {point_j}), r)"); - format!("coeff := mulmod(coeff, {item}, r)") + let item = format!("addmod({point_i}, sub(R, {point_j}), R)"); + format!("coeff := mulmod(coeff, {item}, R)") }), [ - format!("coeff := mulmod(coeff, {}, r)", mu_minus_points[rot_i]), + format!("coeff := mulmod(coeff, {}, R)", mu_minus_points[rot_i]), format!("mstore({}, coeff)", coeff.ptr()) ], ] @@ -324,7 +375,7 @@ pub(crate) fn bdfg21_computations(meta: &ConstraintSystemMeta, data: &Data) -> V let normalized_coeff_computations = chain![ [ - format!("success := batch_invert(success, 0, {first_batch_invert_end}, r)"), + format!("success := batch_invert(success, 0, {first_batch_invert_end})"), format!("let diff_0_inv := {diff_0}"), format!("mstore({}, diff_0_inv)", diffs[0].ptr()), ], @@ -335,7 +386,7 @@ pub(crate) fn bdfg21_computations(meta: &ConstraintSystemMeta, data: &Data) -> V ], "lt(mptr, mptr_end)", ["mptr := add(mptr, 0x20)".to_string()], - ["mstore(mptr, mulmod(mload(mptr), diff_0_inv, r))".to_string()], + ["mstore(mptr, mulmod(mload(mptr), diff_0_inv, R))".to_string()], ), ] .collect_vec(); @@ -345,7 +396,11 @@ pub(crate) fn bdfg21_computations(meta: &ConstraintSystemMeta, data: &Data) -> V let is_single_rot_set = set.rots().len() == 1; chain![ is_single_rot_set.then(|| format!("let coeff := {}", coeffs[0])), - ["let zeta := mload(ZETA_MPTR)", "let r_eval := 0"].map(str::to_string), + [ + format!("let zeta := mload({})", "ZETA_MPTR").as_str(), + "let r_eval := 0" + ] + .map(str::to_string), if is_single_rot_set { let eval_groups = set.evals().iter().rev().fold( Vec::>::new(), @@ -372,17 +427,17 @@ pub(crate) fn bdfg21_computations(meta: &ConstraintSystemMeta, data: &Data) -> V chain![evals.iter().enumerate()] .flat_map(|(eval_idx, eval)| { let is_first_eval = group_idx == 0 && eval_idx == 0; - let item = format!("mulmod(coeff, {eval}, r)"); + let item = format!("mulmod(coeff, {eval}, R)"); chain![ (!is_first_eval).then(|| format!( - "r_eval := mulmod(r_eval, zeta, r)" + "r_eval := mulmod(r_eval, zeta, R)" )), - [format!("r_eval := addmod(r_eval, {item}, r)")], + [format!("r_eval := addmod(r_eval, {item}, R)")], ] }) .collect_vec() } else { - let item = "mulmod(coeff, calldataload(mptr), r)"; + let item = "mulmod(coeff, calldataload(mptr), R)"; for_loop( [ format!("let mptr := {}", evals[0].ptr()), @@ -391,7 +446,7 @@ pub(crate) fn bdfg21_computations(meta: &ConstraintSystemMeta, data: &Data) -> V "lt(mptr_end, mptr)".to_string(), ["mptr := sub(mptr, 0x20)".to_string()], [format!( - "r_eval := addmod(mulmod(r_eval, zeta, r), {item}, r)" + "r_eval := addmod(mulmod(r_eval, zeta, R), {item}, R)" )], ) } @@ -402,15 +457,15 @@ pub(crate) fn bdfg21_computations(meta: &ConstraintSystemMeta, data: &Data) -> V .flat_map(|(idx, evals)| { chain![ izip!(evals, coeffs).map(|(eval, coeff)| { - let item = format!("mulmod({coeff}, {eval}, r)"); - format!("r_eval := addmod(r_eval, {item}, r)") + let item = format!("mulmod({coeff}, {eval}, R)"); + format!("r_eval := addmod(r_eval, {item}, R)") }), - (idx != 0).then(|| format!("r_eval := mulmod(r_eval, zeta, r)")), + (idx != 0).then(|| format!("r_eval := mulmod(r_eval, zeta, R)")), ] }) .collect_vec() }, - (set_idx != 0).then(|| format!("r_eval := mulmod(r_eval, {set_coeff}, r)")), + (set_idx != 0).then(|| format!("r_eval := mulmod(r_eval, {set_coeff}, R)")), [format!("mstore({}, r_eval)", r_eval.ptr())], ] .collect_vec() @@ -423,12 +478,11 @@ pub(crate) fn bdfg21_computations(meta: &ConstraintSystemMeta, data: &Data) -> V [format!("let sum := {coeff_0}")], rest_coeffs .iter() - .map(|coeff_mptr| format!("sum := addmod(sum, {coeff_mptr}, r)")), + .map(|coeff_mptr| format!("sum := addmod(sum, {coeff_mptr}, R)")), [format!("mstore({}, sum)", sum.ptr())], ] .collect_vec() }); - let r_eval_computations = chain![ for_loop( [ @@ -441,9 +495,9 @@ pub(crate) fn bdfg21_computations(meta: &ConstraintSystemMeta, data: &Data) -> V ["mstore(mptr, mload(sum_mptr))".to_string()], ), [ - format!("success := batch_invert(success, 0, {second_batch_invert_end}, r)"), + format!("success := batch_invert(success, 0, {second_batch_invert_end})"), format!( - "let r_eval := mulmod(mload({}), {}, r)", + "let r_eval := mulmod(mload({}), {}, R)", second_batch_invert_end - 1, r_evals.last().unwrap() ) @@ -461,17 +515,16 @@ pub(crate) fn bdfg21_computations(meta: &ConstraintSystemMeta, data: &Data) -> V ] .map(str::to_string), [ - "r_eval := mulmod(r_eval, mload(NU_MPTR), r)", - "r_eval := addmod(r_eval, mulmod(mload(sum_inv_mptr), mload(r_eval_mptr), r), r)" + format!("r_eval := mulmod(r_eval, mload(NU_MPTR), R)").as_str(), + "r_eval := addmod(r_eval, mulmod(mload(sum_inv_mptr), mload(r_eval_mptr), R), R)" ] .map(str::to_string), ), - ["mstore(R_EVAL_MPTR, r_eval)".to_string()], + [format!("mstore(R_EVAL_MPTR, r_eval)")], ] .collect_vec(); - let pairing_input_computations = chain![ - ["let nu := mload(NU_MPTR)".to_string()], + [format!("let nu := mload(NU_MPTR)").to_string()], izip!(0.., &sets, &diffs).flat_map(|(set_idx, set, set_coeff)| { let is_first_set = set_idx == 0; let is_last_set = set_idx == sets.len() - 1; @@ -500,7 +553,6 @@ pub(crate) fn bdfg21_computations(meta: &ConstraintSystemMeta, data: &Data) -> V } }, ); - chain![ set.comms() .last() @@ -545,13 +597,13 @@ pub(crate) fn bdfg21_computations(meta: &ConstraintSystemMeta, data: &Data) -> V }), (!is_first_set) .then(|| { - let scalar = format!("mulmod(nu, {set_coeff}, r)"); + let scalar = format!("mulmod(nu, {set_coeff}, R)"); chain![ [ format!("success := ec_mul_tmp(success, {scalar})"), format!("success := ec_add_acc(success, mload(0x80), mload(0xa0))"), ], - (!is_last_set).then(|| format!("nu := mulmod(nu, mload(NU_MPTR), r)")) + (!is_last_set).then(|| format!("nu := mulmod(nu, mload(NU_MPTR), R)")) ] }) .into_iter() @@ -560,13 +612,13 @@ pub(crate) fn bdfg21_computations(meta: &ConstraintSystemMeta, data: &Data) -> V .collect_vec() }), [ - format!("mstore(0x80, mload(G1_X_MPTR))"), - format!("mstore(0xa0, mload(G1_Y_MPTR))"), - format!("success := ec_mul_tmp(success, sub(r, mload(R_EVAL_MPTR)))"), + format!("mstore(0x80, mload({}))", "G1_X_MPTR"), + format!("mstore(0xa0, mload({}))", "G1_Y_MPTR"), + format!("success := ec_mul_tmp(success, sub(R, mload(R_EVAL_MPTR)))"), format!("success := ec_add_acc(success, mload(0x80), mload(0xa0))"), format!("mstore(0x80, {})", w.x()), format!("mstore(0xa0, {})", w.y()), - format!("success := ec_mul_tmp(success, sub(r, {vanishing_0}))"), + format!("success := ec_mul_tmp(success, sub(R, {vanishing_0}))"), format!("success := ec_add_acc(success, mload(0x80), mload(0xa0))"), format!("mstore(0x80, {})", w_prime.x()), format!("mstore(0xa0, {})", w_prime.y()), @@ -590,3 +642,663 @@ pub(crate) fn bdfg21_computations(meta: &ConstraintSystemMeta, data: &Data) -> V ] .collect_vec() } + +// Holds the encoded data stored in the separate VK +// needed to perform the pcs computations portion of the reusable verifier. +#[derive(Clone, PartialEq, Eq, Default)] +pub struct PcsDataEncoded { + pub(crate) point_computations: Vec, + pub(crate) vanishing_computations: Vec, + pub(crate) coeff_computations: Vec, + pub(crate) normalized_coeff_computations: U256, + pub(crate) r_evals_computations: Vec, + pub(crate) coeff_sums_computation: Vec, + pub(crate) r_eval_computations: U256, + pub(crate) pairing_input_computations: Vec, +} + +// implement length of PcsDataEncoded +impl PcsDataEncoded { + pub fn len(&self) -> usize { + self.point_computations.len() + + self.vanishing_computations.len() + + self.coeff_computations.len() + + 1 // normalized_coeff_computations + + self.r_evals_computations.len() + + self.coeff_sums_computation.len() + + 1 // r_eval_computations + + self.pairing_input_computations.len() + } +} + +pub(crate) fn bdfg21_computations_dynamic( + meta: &ConstraintSystemMeta, + data: &Data, +) -> PcsDataEncoded { + let queries = queries(meta, data); + let (superset, sets) = rotation_sets(&queries); + let min_rot = *superset.first().unwrap(); + let max_rot = *superset.last().unwrap(); + let num_coeffs = sets.iter().map(|set| set.rots().len()).sum::(); + + let w = EcPoint::from(data.w_cptr); + let w_prime = EcPoint::from(data.w_cptr + 2); + + let diff_0 = Word::from(Ptr::memory(0x00)); + let coeffs = sets + .iter() + .scan(diff_0.ptr() + 1, |state, set| { + let ptrs = Word::range(*state).take(set.rots().len()).collect_vec(); + *state = *state + set.rots().len(); + Some(ptrs) + }) + .collect_vec(); + + let first_batch_invert_end = diff_0.ptr() + 1 + num_coeffs; + let second_batch_invert_end = diff_0.ptr() + sets.len(); + let free_mptr = diff_0.ptr() + 2 * (1 + num_coeffs) + 6; + + let point_mptr = free_mptr; + let mu_minus_point_mptr = point_mptr + superset.len(); + let vanishing_0_mptr = mu_minus_point_mptr + superset.len(); + let diff_mptr = vanishing_0_mptr + 1; + let r_eval_mptr = diff_mptr + sets.len(); + let sum_mptr = r_eval_mptr + sets.len(); + + let points = izip!(&superset, Word::range(point_mptr)).collect::>(); + let mu_minus_points = + izip!(&superset, Word::range(mu_minus_point_mptr)).collect::>(); + let vanishing_0 = Word::from(vanishing_0_mptr); + let diffs = Word::range(diff_mptr).take(sets.len()).collect_vec(); + let r_evals = Word::range(r_eval_mptr).take(sets.len()).collect_vec(); + let sums = Word::range(sum_mptr).take(sets.len()).collect_vec(); + + let point_computations: Vec = { + let pack_words = |points: Vec, interm_point: Option| { + let mut packed_words: Vec = vec![U256::from(0)]; + let mut bit_counter = 8; + let points_len = points.len(); + // assert that points_len is less than 256 bits so that it fits into 1 byte. + assert!(points_len < 256); + if let Some(interm_point) = interm_point { + packed_words[0] |= interm_point; + packed_words[0] |= U256::from(points_len) << 16; + bit_counter = 24; + } else { + packed_words[0] |= U256::from(points_len); + } + let mut last_idx = 0; + + for point in points.iter() { + let offset = 16; + + let next_bit_counter = bit_counter + offset; + if next_bit_counter > 256 { + last_idx += 1; + packed_words.push(U256::from(0)); + bit_counter = 0; + } + packed_words[last_idx] |= *point << bit_counter; + bit_counter += 16; + } + + packed_words + }; + let max_rot_computations = (1..=max_rot) + .map(|rot| { + points + .get(&rot) + .map(|point| U256::from(point.ptr().value().as_usize())) + .unwrap_or(U256::from(0)) + }) + .collect_vec(); + let min_rot_computations = (min_rot..0) + .rev() + .map(|rot| { + points + .get(&rot) + .map(|point| U256::from(point.ptr().value().as_usize())) + .unwrap_or(U256::from(0)) + }) + .collect_vec(); + chain!( + pack_words(max_rot_computations, None).into_iter(), + pack_words( + min_rot_computations, + Some(U256::from(points[&0].ptr().value().as_usize())), + ) + .into_iter() + ) + .collect_vec() + }; + + let vanishing_computations: Vec = { + let pack_mptrs_and_s_ptrs: Vec = { + let mptr = mu_minus_points.first_key_value().unwrap().1.ptr(); + let mptr_word = U256::from(mptr.value().as_usize()); + let mptr_end = mptr + mu_minus_points.len(); + let mptr_end_word = U256::from(mptr_end.value().as_usize()); + let mut last_idx = 0; + let mut packed_words = vec![U256::from(0)]; + // start packing the mptrs + packed_words[0] |= mptr_word; + packed_words[0] |= mptr_end_word << 16; + packed_words[0] |= U256::from(free_mptr.value().as_usize()) << 32; + // bit offset length to where the number of words allocated to the s_ptrs will be stored. + let words_alloc_offset = 48; + let mut bit_counter = words_alloc_offset + 8; + // start packing the s_ptrs + sets[0].rots().iter().for_each(|rot| { + // panic if offset is exceeds 256 + let next_bit_counter = bit_counter + 16; + if next_bit_counter > 256 { + last_idx += 1; + packed_words.push(U256::from(0)); + bit_counter = 0; + } + packed_words[last_idx] |= + U256::from(mu_minus_points[rot].ptr().value().as_usize()) << bit_counter; + bit_counter += 16; + }); + // store the num words allocated for s_ptrs + packed_words[0] |= U256::from(last_idx + 1) << words_alloc_offset; + packed_words + }; + let pack_vanishing_0_and_sets_len: ruint::Uint<256, 4> = { + let vanishing_0_word = U256::from(vanishing_0.ptr().value().as_usize()); + let sets_len = U256::from(sets.len()); + let mut packed_word = U256::from(0); + packed_word |= vanishing_0_word; + packed_word |= sets_len << 16; + packed_word + }; + let pack_set_diffs_words: Vec> = { + sets.iter() + .map(|set| { + let mut packed_word = U256::from(0); + let mut offset = 0; + set.diffs().iter().for_each(|rot| { + packed_word |= + U256::from(mu_minus_points[rot].ptr().value().as_usize()) << offset; + offset += 16; + }); + if set.diffs.is_empty() { + // 0x20 is where 1 is stored in memory in this block + packed_word |= U256::from(0x20) << offset; + offset += 16; + } + assert!( + offset <= 256, + "The offset for packing the set diff word exceeds 256 bits", + ); + packed_word + }) + .collect_vec() + }; + chain!( + pack_mptrs_and_s_ptrs.into_iter(), + [pack_vanishing_0_and_sets_len], + pack_set_diffs_words.into_iter() + ) + .collect_vec() + }; + let coeff_computations: Vec = { + // 1) The first LSG byte of the first word will contain the number of words that are used to store the + // the set.rots().len(). The rest of the bytes in the word will contain the set.rots().len() + let coeff_len_words: Vec = { + let mut packed_words: Vec = vec![U256::from(0)]; + let mut bit_counter = 8; + let mut last_idx = 0; + for set in sets.iter() { + let coeff_len = set.rots().len(); + assert!(coeff_len <= 5, "The number of rotations in a set exceeds 5 for the coeff_computations. Can't pack all the coef_data in a single word"); + let offset = 8; + let next_bit_counter = bit_counter + offset; + if next_bit_counter > 256 { + last_idx += 1; + packed_words.push(U256::from(0)); + bit_counter = 0; + } + packed_words[last_idx] |= U256::from(coeff_len) << bit_counter; + bit_counter += 8; + } + let packed_words_len = packed_words.len(); + // Encode the length of the exprs vec in the first word + packed_words[0] |= U256::from(packed_words_len); + packed_words + }; + // The next set of words will contain the points for the set.rots() followed by the mu_minus_points + // and the coeff.ptr(). Throw if set.rots().len > 5 b/c anything greater than 5 we can't pack all the ptr data into a single word. + let coeff_data_words: Vec = izip!(&sets, &coeffs) + .map(|(set, coeffs)| { + let mut packed_word = U256::from(0); + let mut offset = 0; + if set.rots().len() > 1 { + set.rots().iter().for_each(|rot| { + packed_word |= U256::from(points[rot].ptr().value().as_usize()) << offset; + offset += 16; + }); + } + set.rots().iter().for_each(|rot| { + packed_word |= + U256::from(mu_minus_points[rot].ptr().value().as_usize()) << offset; + offset += 16; + }); + coeffs.iter().for_each(|coeff| { + packed_word |= U256::from(coeff.ptr().value().as_usize()) << offset; + offset += 16; + }); + assert!( + offset <= 256, + "The offset for packing the coeff computation word exceeds 256 bits", + ); + packed_word + }) + .collect_vec(); + chain!(coeff_len_words.into_iter(), coeff_data_words.into_iter()).collect_vec() + }; + + let normalized_coeff_computations: U256 = { + let mut packed_word = U256::from(0); + let mut offset = 0; + packed_word |= U256::from(first_batch_invert_end.value().as_usize()) << offset; + offset += 16; + packed_word |= U256::from(diffs[0].ptr().value().as_usize()) << offset; + offset += 16; + packed_word |= U256::from(sets.len() * 32) << offset; + packed_word + }; + + // 1. The LSG byte of the first word will contain the total number of words that contain the set_coeff and the r_eval ptr followed + // by the packed Vec. + // 2. The next set of words will contain the evaluation pointers. The first LSG byte will contain the number of words + // that contain the packed evaluation pointers. The rest of the bytes will contain the evaluation pointers or evaluation pointers + coeff pointer. + // depending on if it is a single rotation set or not. + // 3. Here is how the encoding of the single rotation set will look like: + // 3a. After the 1 byte that contains the number of words with the packed ptr data for the given set, we encode the coeffs[0] ptr, + // the first eval ptr of the first group. + // 3b. Next we encode the number of eval ptrs in the eval_group, encoding the eval ptrs that follow until we reach the end of the eval_group, repeating the process for the next eval_group. + // 4. Here is how the encoding of the not single rotation set will look like: + // 4a. After the 1 byte that contains the number of words with the packed ptr data for the given set, the coeffs ptrs, + // and the eval ptrs. + // and the coeff.ptr(). Throw if set.rots().len > 5 b/c anything greater than 5 we can't pack all the ptr data into a single word. + + let r_evals_computations: Vec = { + let pack_value = |packed_word: &mut U256, value: usize, bit_counter: &mut usize| { + *packed_word |= U256::from(value) << *bit_counter; + *bit_counter += 16; + }; + let encode_coeff_length = |coeff_len: usize, single_rot_set: &mut usize| -> usize { + if coeff_len == 1 { + *single_rot_set += 1; + assert!( + *single_rot_set <= 1, + "Only one single rotation set in the r_evals_computations" + ); + coeff_len * 32 + } else { + assert!(coeff_len != 0, "The number of rotations in a set is 0"); + coeff_len * 32 + } + }; + + let r_evals_meta_data: Vec = { + let mut packed_words = vec![U256::from(0)]; + let mut bit_counter = 8; + + pack_value( + &mut packed_words[0], + diffs[1].ptr().value().as_usize(), + &mut bit_counter, + ); + pack_value( + &mut packed_words[0], + r_eval_mptr.value().as_usize(), + &mut bit_counter, + ); + + let mut last_idx = 0; + let mut single_rot_set = 0; + + for set in sets.iter() { + let coeff_len = set.rots().len(); + // if coeff_len is greater than 1 then we scale it by 16. + let encoded_length = encode_coeff_length(coeff_len, &mut single_rot_set); + + assert!( + encoded_length < 256, + "The encoded length for r_evals exceeds 256 bits" + ); + + let next_bit_counter = bit_counter + 8; + if next_bit_counter > 256 { + last_idx += 1; + packed_words.push(U256::from(0)); + bit_counter = 0; + } + packed_words[last_idx] |= U256::from(encoded_length) << bit_counter; + bit_counter += 8; + } + + let packed_words_len = packed_words.len(); + packed_words[0] |= U256::from(packed_words_len); + packed_words + }; + + let calculate_evals_len_offset = |evals: &[&Word]| -> (usize, usize) { + if evals.len() < 3 { + (evals.len(), 8 + (evals.len() * 16)) + } else { + (0, 8 + 32) + } + }; + + let pack_evals = |packed_words: &mut Vec, + evals: &[&Word], + evals_len: usize, + bit_counter: &mut usize, + last_idx: usize| { + if evals_len == 0 { + let mptr = evals[0].ptr(); + let mptr_end = evals[0].ptr() - evals.len(); + packed_words[last_idx] |= U256::from(mptr.value().as_usize()) << *bit_counter; + *bit_counter += 16; + packed_words[last_idx] |= U256::from(mptr_end.value().as_usize()) << *bit_counter; + *bit_counter += 16; + } else { + for eval in evals.iter() { + let eval_ptr = eval.ptr(); + packed_words[last_idx] |= + U256::from(eval_ptr.value().as_usize()) << *bit_counter; + *bit_counter += 16; + } + } + }; + + let process_single_rotation_set = + |set: &RotationSet, + packed_words: &mut Vec, + bit_counter: &mut usize, + last_idx: &mut usize| { + let eval_groups = set.evals().iter().rev().fold( + Vec::>::new(), + |mut eval_groups, evals| { + let eval = &evals[0]; + if let Some(last_group) = eval_groups.last_mut() { + let last_eval = **last_group.last().unwrap(); + if last_eval.ptr().value().is_integer() + && last_eval.ptr() - 1 == eval.ptr() + { + last_group.push(eval) + } else { + eval_groups.push(vec![eval]) + } + eval_groups + } else { + vec![vec![eval]] + } + }, + ); + + let first_eval_ptr = eval_groups[0][0].ptr(); + assert!( + eval_groups[1][0].ptr().loc() == Location::Memory, + "The second eval group for a single rotation set should be memory but it is not" + ); + pack_value( + &mut packed_words[0], + first_eval_ptr.value().as_usize(), + bit_counter, + ); + + for evals in eval_groups.iter().skip(2) { + let (evals_len, offset) = calculate_evals_len_offset(evals); + let next_bit_counter = *bit_counter + offset; + + if next_bit_counter > 256 { + *last_idx += 1; + packed_words.push(U256::from(0)); + *bit_counter = 0; + } + + packed_words[*last_idx] |= U256::from(evals_len) << *bit_counter; + *bit_counter += 8; + pack_evals(packed_words, evals, evals_len, bit_counter, *last_idx); + } + }; + + let process_multiple_rotation_set = + |set: &RotationSet, + packed_words: &mut Vec, + bit_counter: &mut usize, + last_idx: &mut usize| { + for evals in set.evals().iter().rev() { + let offset = coeffs.len() * 16; + let next_bit_counter = *bit_counter + offset; + + if next_bit_counter > 256 { + *last_idx += 1; + packed_words.push(U256::from(0)); + *bit_counter = 0; + } + for eval in evals.iter() { + let eval_ptr = eval.ptr(); + packed_words[*last_idx] |= + U256::from(eval_ptr.value().as_usize()) << *bit_counter; + *bit_counter += 16; + } + } + }; + + let r_evals_data: Vec = sets + .iter() + .flat_map(|set| { + let mut packed_words = vec![U256::from(0)]; + let mut last_idx = 0; + let mut bit_counter = 8; + + if set.rots().len() == 1 { + process_single_rotation_set( + set, + &mut packed_words, + &mut bit_counter, + &mut last_idx, + ); + } else { + process_multiple_rotation_set( + set, + &mut packed_words, + &mut bit_counter, + &mut last_idx, + ); + } + + let packed_words_len = packed_words.len(); + packed_words[0] |= U256::from(packed_words_len); + packed_words + }) + .collect(); + chain!(r_evals_meta_data.into_iter(), r_evals_data.into_iter()).collect_vec() + }; + + let coeff_sums_computation: Vec = { + let mut packed_words = vec![U256::from(0)]; + let mut bit_counter = 8; + let mut last_idx = 0; + for (coeffs, sum) in izip!(&coeffs, &sums) { + let offset = 24; + let next_bit_counter = bit_counter + offset; + let len = coeffs.len() * 32; + assert!(len < 256, "The length of the coeffs exceeds 256 bits"); + if next_bit_counter > 256 { + last_idx += 1; + packed_words.push(U256::from(0)); + bit_counter = 0; + } + packed_words[last_idx] |= U256::from(len) << bit_counter; + bit_counter += 8; + packed_words[last_idx] |= U256::from(sum.ptr().value().as_usize()) << bit_counter; + bit_counter += 16; + } + let packed_words_len = packed_words.len(); + packed_words[0] |= U256::from(packed_words_len); + packed_words + }; + + let r_eval_computations: U256 = { + let mut packed_word = U256::from(0); + let mut offset = 0; + packed_word |= U256::from(second_batch_invert_end.value().as_usize()) << offset; + offset += 16; + packed_word |= U256::from(sums[0].ptr().value().as_usize()) << offset; + offset += 16; + packed_word |= U256::from(r_evals.last().unwrap().ptr().value().as_usize()) << offset; + packed_word + }; + + let pairing_input_computations: Vec = { + let mut word_lengths = Vec::new(); + let data: Vec = sets + .iter() + .flat_map(|set| { + let comm_groups = set.comms().iter().rev().skip(1).fold( + Vec::<(Location, Vec<&EcPoint>)>::new(), + |mut comm_groups, comm| { + if let Some(last_group) = comm_groups.last_mut() { + let last_comm = **last_group.1.last().unwrap(); + if last_group.0 == comm.loc() + && last_comm.x().ptr().value().is_integer() + && last_comm.x().ptr() - 2 == comm.x().ptr() + { + last_group.1.push(comm) + } else { + comm_groups.push((comm.loc(), vec![comm])) + } + comm_groups + } else { + vec![(comm.loc(), vec![comm])] + } + }, + ); + let mut packed_words = vec![U256::from(0)]; + let mut bit_counter = 0; + let mut last_idx = 0; + let comm = set.comms().last().unwrap(); + packed_words[last_idx] |= + U256::from(comm.x().ptr().value().as_usize()) << bit_counter; + bit_counter += 16; + packed_words[last_idx] |= + U256::from(comm.y().ptr().value().as_usize()) << bit_counter; + bit_counter += 16; + for (loc, comms) in comm_groups.iter() { + let is_quotient_point = !comms[0].x().ptr().value().is_integer() + && !comms[0].y().ptr().value().is_integer(); + let offset = if comms.len() == 2 { 80 } else if is_quotient_point { 16 } else { 48 } ; + let next_bit_counter = bit_counter + offset; + if next_bit_counter > 256 { + last_idx += 1; + packed_words.push(U256::from(0)); + bit_counter = 0; + } + let loc_encoded = if is_quotient_point + { + assert!( + comms.len() == 1, + "The number of comms in the group containing the quotient points must be 1", + ); + // we encode 0x02 if the comm is a quotient point + 0x02 + } else { + // check the location of the comm. If it is memory then we encode 0x00, 0x01 otherwise + if *loc == Location::Memory { 0x00 } else { 0x01 } + }; + packed_words[last_idx] |= U256::from(loc_encoded) << bit_counter; + bit_counter += 8; + let len_encoded = if comms.len() < 3 { comms.len() } else { 0 }; + packed_words[last_idx] |= U256::from(len_encoded) << bit_counter; + bit_counter += 8; + if loc_encoded == 0x02 { + // we hardcode the location of the quotient points in reusable verifier so we skip encoding them + // in the vk + continue; + } + if comms.len() < 3 { + comms.iter().for_each(|comm| { + packed_words[last_idx] |= + U256::from(comm.x().ptr().value().as_usize()) << bit_counter; + bit_counter += 16; + packed_words[last_idx] |= + U256::from(comm.y().ptr().value().as_usize()) << bit_counter; + bit_counter += 16; + }); + } else { + let mptr = comms.first().unwrap().x().ptr(); + let mptr_end = mptr - 2 * comms.len(); + packed_words[last_idx] |= + U256::from(mptr.value().as_usize()) << bit_counter; + bit_counter += 16; + packed_words[last_idx] |= + U256::from(mptr_end.value().as_usize()) << bit_counter; + bit_counter += 16; + } + } + assert!( + packed_words.len() * 32 < 256, + "The bit counter for the pairing input computations exceeds 256 bits" + ); + // update the word lengths + word_lengths.push(packed_words.len() * 32); + packed_words + }) + .collect(); + + let meta_data: Vec = { + let mut packed_words = vec![U256::from(0)]; + let mut bit_counter = 8; + let mut last_idx = 0; + packed_words[last_idx] |= U256::from(diffs[1].ptr().value().as_usize()) << bit_counter; + bit_counter += 16; + // pack the ec points cptrs + packed_words[last_idx] |= U256::from(w.x().ptr().value().as_usize()) << bit_counter; + bit_counter += 16; + packed_words[last_idx] |= U256::from(w.y().ptr().value().as_usize()) << bit_counter; + bit_counter += 16; + packed_words[last_idx] |= + U256::from(vanishing_0.ptr().value().as_usize()) << bit_counter; + bit_counter += 16; + packed_words[last_idx] |= + U256::from(w_prime.x().ptr().value().as_usize()) << bit_counter; + bit_counter += 16; + packed_words[last_idx] |= + U256::from(w_prime.y().ptr().value().as_usize()) << bit_counter; + bit_counter += 16; + + // iterate through the word lengths and pack them into the first word + for len in word_lengths.iter() { + let next_bit_counter = bit_counter + 8; + if next_bit_counter > 256 { + last_idx += 1; + packed_words.push(U256::from(0)); + bit_counter = 0; + } + packed_words[last_idx] |= U256::from(*len) << bit_counter; + bit_counter += 8; + } + let packed_words_len = packed_words.len(); + packed_words[0] |= U256::from(packed_words_len); + packed_words + }; + chain!(meta_data.into_iter(), data.into_iter()).collect_vec() + }; + + PcsDataEncoded { + point_computations, + vanishing_computations, + coeff_computations, + normalized_coeff_computations, + r_evals_computations, + coeff_sums_computation, + r_eval_computations, + pairing_input_computations, + } +} diff --git a/src/codegen/template.rs b/src/codegen/template.rs index 55753f9..8a536fd 100644 --- a/src/codegen/template.rs +++ b/src/codegen/template.rs @@ -4,8 +4,13 @@ use crate::codegen::{ }; use askama::{Error, Template}; use ruint::aliases::U256; +use std::collections::HashMap; use std::fmt; +use super::{ + evaluator::{GateDataEncoded, LookupsDataEncoded, PermutationDataEncoded}, + pcs::PcsDataEncoded, +}; #[derive(Template)] #[template(path = "Halo2VerifyingKey.sol")] pub(crate) struct Halo2VerifyingKey { @@ -15,23 +20,88 @@ pub(crate) struct Halo2VerifyingKey { } impl Halo2VerifyingKey { - pub(crate) fn len(&self) -> usize { - (self.constants.len() * 0x20) - + (self.fixed_comms.len() + self.permutation_comms.len()) * 0x40 + pub(crate) fn len(&self, scaled: bool) -> usize { + let len = + self.constants.len() + (self.fixed_comms.len() + self.permutation_comms.len()) * 2; + if scaled { + len * 0x20 + } else { + len + } + } +} + +#[derive(Template)] +#[template(path = "Halo2VerifyingArtifact.sol")] +pub(crate) struct Halo2VerifyingArtifact { + pub(crate) constants: Vec<(&'static str, U256)>, + pub(crate) fixed_comms: Vec<(U256, U256)>, + pub(crate) permutation_comms: Vec<(U256, U256)>, + pub(crate) const_expressions: Vec, + pub(crate) gate_computations: GateDataEncoded, + pub(crate) permutation_computations: PermutationDataEncoded, + pub(crate) lookup_computations: LookupsDataEncoded, + pub(crate) pcs_computations: PcsDataEncoded, +} + +impl Halo2VerifyingArtifact { + pub(crate) fn len(&self, scaled: bool) -> usize { + let len = self.constants.len() + + (self.fixed_comms.len() + self.permutation_comms.len()) * 2 + + self.const_expressions.len() + + self.gate_computations.len() + + self.permutation_computations.len() + + self.lookup_computations.len() + + self.pcs_computations.len(); + if scaled { + len * 0x20 + } else { + len + } + } +} + +// Enum for handling both VerifyingKey and VerifyingArtifact +pub(crate) enum VerifyingCache<'a> { + Key(&'a Halo2VerifyingKey), + Artifact(&'a Halo2VerifyingArtifact), +} + +impl VerifyingCache<'_> { + pub(crate) fn len(&self, scaled: bool) -> usize { + match self { + VerifyingCache::Key(key) => key.len(scaled), + VerifyingCache::Artifact(artifact) => artifact.len(scaled), + } + } + + pub(crate) fn constants(&self) -> &Vec<(&'static str, U256)> { + match self { + VerifyingCache::Key(key) => &key.constants, + VerifyingCache::Artifact(artifact) => &artifact.constants, + } + } + + pub(crate) fn fixed_comms(&self) -> &Vec<(U256, U256)> { + match self { + VerifyingCache::Key(key) => &key.fixed_comms, + VerifyingCache::Artifact(artifact) => &artifact.fixed_comms, + } } } +// Halo2Verifier struct and implementation #[derive(Template)] #[template(path = "Halo2Verifier.sol")] pub(crate) struct Halo2Verifier { pub(crate) scheme: BatchOpenScheme, - pub(crate) embedded_vk: Option, - pub(crate) vk_len: usize, + pub(crate) embedded_vk: Halo2VerifyingKey, pub(crate) proof_len: usize, pub(crate) vk_mptr: Ptr, pub(crate) challenge_mptr: Ptr, pub(crate) theta_mptr: Ptr, pub(crate) proof_cptr: Ptr, + pub(crate) proof_len_cptr: Ptr, pub(crate) quotient_comm_cptr: Ptr, pub(crate) num_neg_lagranges: usize, pub(crate) num_advices: Vec, @@ -42,8 +112,15 @@ pub(crate) struct Halo2Verifier { pub(crate) pcs_computations: Vec>, } -impl Halo2VerifyingKey { - pub(crate) fn render(&self, writer: &mut impl fmt::Write) -> Result<(), fmt::Error> { +#[derive(Template)] +#[template(path = "Halo2VerifierReusable.sol")] +pub(crate) struct Halo2VerifierReusable { + pub(crate) scheme: BatchOpenScheme, + pub(crate) vk_const_offsets: HashMap<&'static str, U256>, +} + +impl Halo2VerifyingArtifact { + pub(crate) fn render(&self, writer: &mut (impl fmt::Write + ?Sized)) -> Result<(), fmt::Error> { self.render_into(writer).map_err(|err| match err { Error::Fmt(err) => err, _ => unreachable!(), @@ -60,6 +137,15 @@ impl Halo2Verifier { } } +impl Halo2VerifierReusable { + pub(crate) fn render(&self, writer: &mut impl fmt::Write) -> Result<(), fmt::Error> { + self.render_into(writer).map_err(|err| match err { + Error::Fmt(err) => err, + _ => unreachable!(), + }) + } +} + mod filters { use std::fmt::LowerHex; diff --git a/src/codegen/util.rs b/src/codegen/util.rs index 9d3aea9..e327101 100644 --- a/src/codegen/util.rs +++ b/src/codegen/util.rs @@ -1,10 +1,7 @@ -use crate::codegen::{ - template::Halo2VerifyingKey, - BatchOpenScheme::{self, Bdfg21, Gwc19}, -}; +use crate::codegen::BatchOpenScheme::{self, Bdfg21, Gwc19}; use halo2_proofs::{ halo2curves::{bn256, ff::PrimeField, CurveAffine}, - plonk::{Any, Column, ConstraintSystem}, + plonk::{Any, Column, ConstraintSystem, Expression, Gate}, }; use itertools::{chain, izip, Itertools}; use ruint::{aliases::U256, UintTryFrom}; @@ -15,7 +12,30 @@ use std::{ ops::{Add, Sub}, }; +use super::template::VerifyingCache; + +#[derive(Debug)] +#[cfg(feature = "mv-lookup")] +pub(crate) struct ConstraintSystemMeta { + pub(crate) num_fixeds: usize, + pub(crate) permutation_columns: Vec>, + pub(crate) permutation_chunk_len: usize, + pub(crate) num_lookup_ms: usize, + pub(crate) num_permutation_zs: usize, + pub(crate) num_lookup_phis: usize, + pub(crate) num_quotients: usize, + pub(crate) advice_queries: Vec<(usize, i32)>, + pub(crate) fixed_queries: Vec<(usize, i32)>, + pub(crate) num_evals: usize, + pub(crate) num_user_advices: Vec, + pub(crate) num_user_challenges: Vec, + pub(crate) advice_indices: Vec, + pub(crate) challenge_indices: Vec, + pub(crate) rotation_last: i32, +} + #[derive(Debug)] +#[cfg(not(feature = "mv-lookup"))] pub(crate) struct ConstraintSystemMeta { pub(crate) num_fixeds: usize, pub(crate) permutation_columns: Vec>, @@ -34,6 +54,142 @@ pub(crate) struct ConstraintSystemMeta { pub(crate) rotation_last: i32, } +#[cfg(feature = "mv-lookup")] +impl ConstraintSystemMeta { + pub(crate) fn new(cs: &ConstraintSystem) -> Self { + let num_fixeds = cs.num_fixed_columns(); + let permutation_columns = cs.permutation().get_columns(); + let permutation_chunk_len = cs.degree() - 2; + let num_lookup_ms = cs.lookups().len(); + let num_permutation_zs = cs + .permutation() + .get_columns() + .chunks(cs.degree() - 2) + .count(); + let num_lookup_phis = cs.lookups().len(); + let num_quotients = cs.degree() - 1; + let advice_queries = cs + .advice_queries() + .iter() + .map(|(column, rotation)| (column.index(), rotation.0)) + .collect_vec(); + let fixed_queries = cs + .fixed_queries() + .iter() + .map(|(column, rotation)| (column.index(), rotation.0)) + .collect_vec(); + let num_evals = advice_queries.len() + + fixed_queries.len() + + 1 + + cs.permutation().get_columns().len() + + (3 * num_permutation_zs - 1) + + 3 * cs.lookups().len(); + let num_phase = *cs.advice_column_phase().iter().max().unwrap_or(&0) as usize + 1; + // Indices of advice and challenge are not same as their position in calldata/memory, + // because we support multiple phases, we need to remap them and find their actual indices. + let remapping = |phase: Vec| { + let nums = phase.iter().fold(vec![0; num_phase], |mut nums, phase| { + nums[*phase as usize] += 1; + nums + }); + let offsets = nums + .iter() + .take(num_phase - 1) + .fold(vec![0], |mut offsets, n| { + offsets.push(offsets.last().unwrap() + n); + offsets + }); + let index = phase + .iter() + .scan(offsets, |state, phase| { + let index = state[*phase as usize]; + state[*phase as usize] += 1; + Some(index) + }) + .collect::>(); + (nums, index) + }; + let (num_user_advices, advice_indices) = remapping(cs.advice_column_phase()); + let (num_user_challenges, challenge_indices) = remapping(cs.challenge_phase()); + let rotation_last = -(cs.blinding_factors() as i32 + 1); + Self { + num_fixeds, + permutation_columns, + permutation_chunk_len, + num_lookup_ms, + num_permutation_zs, + num_lookup_phis, + num_quotients, + advice_queries, + fixed_queries, + num_evals, + num_user_advices, + num_user_challenges, + advice_indices, + challenge_indices, + rotation_last, + } + } + + pub(crate) fn num_advices(&self) -> Vec { + chain![ + self.num_user_advices.iter().cloned(), + (self.num_lookup_ms != 0).then_some(self.num_lookup_ms), // lookup permuted + [ + self.num_permutation_zs + self.num_lookup_phis + 1, // permutation and lookup grand products, random + self.num_quotients, // quotients + ], + ] + .collect() + } + + pub(crate) fn num_challenges(&self) -> Vec { + let mut num_challenges = self.num_user_challenges.clone(); + // If there is no lookup used, merge also beta and gamma into the last user phase, to avoid + // squeezing challenge from nothing. + // Otherwise, merge theta into last user phase since they are originally adjacent. + if self.num_lookup_ms == 0 { + *num_challenges.last_mut().unwrap() += 3; // theta, beta, gamma + num_challenges.extend([ + 1, // y + 1, // x + ]); + } else { + *num_challenges.last_mut().unwrap() += 1; // theta + num_challenges.extend([ + 2, // beta, gamma + 1, // y + 1, // x + ]); + } + num_challenges + } + + pub(crate) fn num_permutations(&self) -> usize { + self.permutation_columns.len() + } + + pub(crate) fn num_lookups(&self) -> usize { + self.num_lookup_phis + } + + pub(crate) fn proof_len(&self, scheme: BatchOpenScheme) -> usize { + self.num_advices().iter().sum::() * 0x40 + + self.num_evals * 0x20 + + self.batch_open_proof_len(scheme) + } + + pub(crate) fn batch_open_proof_len(&self, scheme: BatchOpenScheme) -> usize { + match scheme { + Bdfg21 => 2 * 0x40, + Gwc19 => { + unimplemented!() + } + } + } +} + +#[cfg(not(feature = "mv-lookup"))] impl ConstraintSystemMeta { pub(crate) fn new(cs: &ConstraintSystem) -> Self { let num_fixeds = cs.num_fixed_columns(); @@ -168,6 +324,38 @@ impl ConstraintSystemMeta { } } +#[derive(Debug)] +#[cfg(feature = "mv-lookup")] +pub(crate) struct Data { + pub(crate) challenge_mptr: Ptr, + pub(crate) theta_mptr: Ptr, + + pub(crate) quotient_comm_cptr: Ptr, + pub(crate) w_cptr: Ptr, + + pub(crate) fixed_comms: Vec, + pub(crate) permutation_comms: HashMap, EcPoint>, + pub(crate) advice_comms: Vec, + pub(crate) lookup_m_comms: Vec, + pub(crate) permutation_z_comms: Vec, + pub(crate) lookup_phi_comms: Vec, + pub(crate) random_comm: EcPoint, + + pub(crate) challenges: Vec, + + pub(crate) instance_eval: Word, + pub(crate) advice_evals: HashMap<(usize, i32), Word>, + pub(crate) fixed_evals: HashMap<(usize, i32), Word>, + pub(crate) random_eval: Word, + pub(crate) permutation_evals: HashMap, Word>, + pub(crate) permutation_z_evals: Vec<(Word, Word, Word)>, + pub(crate) lookup_evals: Vec<(Word, Word, Word)>, + + pub(crate) computed_quotient_comm: EcPoint, + pub(crate) computed_quotient_eval: Word, +} + +#[cfg(not(feature = "mv-lookup"))] #[derive(Debug)] pub(crate) struct Data { pub(crate) challenge_mptr: Ptr, @@ -198,16 +386,138 @@ pub(crate) struct Data { pub(crate) computed_quotient_eval: Word, } +#[cfg(feature = "mv-lookup")] +impl Data { + pub(crate) fn new( + meta: &ConstraintSystemMeta, + vk: &VerifyingCache, + vk_mptr: Ptr, + proof_cptr: Ptr, + ) -> Self { + let fixed_comm_mptr: Ptr = vk_mptr + vk.constants().len(); + let permutation_comm_mptr = fixed_comm_mptr + 2 * vk.fixed_comms().len(); + let challenge_mptr = vk_mptr + vk.len(false); + let theta_mptr = challenge_mptr + meta.challenge_indices.len(); + + let advice_comm_start = proof_cptr; + let lookup_m_comm_start = advice_comm_start + 2 * meta.advice_indices.len(); + let permutation_z_comm_start = lookup_m_comm_start + 2 * meta.num_lookup_ms; + let lookup_phi_comm_start = permutation_z_comm_start + 2 * meta.num_permutation_zs; + let random_comm_start = lookup_phi_comm_start + 2 * meta.num_lookup_phis; + let quotient_comm_start = random_comm_start + 2; + + let eval_cptr = quotient_comm_start + 2 * meta.num_quotients; + let advice_eval_cptr = eval_cptr; + let fixed_eval_cptr = advice_eval_cptr + meta.advice_queries.len(); + let random_eval_cptr = fixed_eval_cptr + meta.fixed_queries.len(); + let permutation_eval_cptr = random_eval_cptr + 1; + let permutation_z_eval_cptr = permutation_eval_cptr + meta.num_permutations(); + let lookup_eval_cptr = permutation_z_eval_cptr + 3 * meta.num_permutation_zs - 1; + let w_cptr = lookup_eval_cptr + 3 * meta.num_lookups(); + let l_0 = "INSTANCE_EVAL_MPTR"; + let quotient_eval = "QUOTIENT_EVAL_MPTR"; + let quotient_x = "QUOTIENT_X_MPTR"; + let quotient_y = "QUOTIENT_Y_MPTR"; + + let fixed_comms = EcPoint::range(fixed_comm_mptr) + .take(meta.num_fixeds) + .collect(); + let permutation_comms = izip!( + meta.permutation_columns.iter().cloned(), + EcPoint::range(permutation_comm_mptr) + ) + .collect(); + let advice_comms = meta + .advice_indices + .iter() + .map(|idx| advice_comm_start + 2 * idx) + .map_into() + .collect(); + let lookup_m_comms = EcPoint::range(lookup_m_comm_start) + .take(meta.num_lookup_ms) + .collect(); + let permutation_z_comms = EcPoint::range(permutation_z_comm_start) + .take(meta.num_permutation_zs) + .collect(); + let lookup_phi_comms = EcPoint::range(lookup_phi_comm_start) + .take(meta.num_lookup_phis) + .collect(); + let random_comm = random_comm_start.into(); + let computed_quotient_comm = EcPoint::new(Ptr::memory(quotient_x), Ptr::memory(quotient_y)); + + let challenges = meta + .challenge_indices + .iter() + .map(|idx| challenge_mptr + *idx) + .map_into() + .collect_vec(); + let instance_eval = Ptr::memory(l_0).into(); + let advice_evals = izip!( + meta.advice_queries.iter().cloned(), + Word::range(advice_eval_cptr) + ) + .collect(); + let fixed_evals = izip!( + meta.fixed_queries.iter().cloned(), + Word::range(fixed_eval_cptr) + ) + .collect(); + let random_eval = random_eval_cptr.into(); + let permutation_evals = izip!( + meta.permutation_columns.iter().cloned(), + Word::range(permutation_eval_cptr) + ) + .collect(); + let permutation_z_evals = Word::range(permutation_z_eval_cptr) + .take(3 * meta.num_permutation_zs) + .tuples() + .collect_vec(); + let lookup_evals = Word::range(lookup_eval_cptr) + .take(3 * meta.num_lookup_phis) + .tuples() + .collect_vec(); + let computed_quotient_eval = Ptr::memory(quotient_eval).into(); + + Self { + challenge_mptr, + theta_mptr, + quotient_comm_cptr: quotient_comm_start, + w_cptr, + + fixed_comms, + permutation_comms, + advice_comms, + lookup_m_comms, + permutation_z_comms, + lookup_phi_comms, + random_comm, + computed_quotient_comm, + + challenges, + + instance_eval, + advice_evals, + fixed_evals, + permutation_evals, + permutation_z_evals, + lookup_evals, + random_eval, + computed_quotient_eval, + } + } +} + +#[cfg(not(feature = "mv-lookup"))] impl Data { pub(crate) fn new( meta: &ConstraintSystemMeta, - vk: &Halo2VerifyingKey, + vk: &VerifyingCache, vk_mptr: Ptr, proof_cptr: Ptr, ) -> Self { - let fixed_comm_mptr = vk_mptr + vk.constants.len(); - let permutation_comm_mptr = fixed_comm_mptr + 2 * vk.fixed_comms.len(); - let challenge_mptr = permutation_comm_mptr + 2 * vk.permutation_comms.len(); + let fixed_comm_mptr: Ptr = vk_mptr + vk.constants().len(); + let permutation_comm_mptr = fixed_comm_mptr + 2 * vk.fixed_comms().len(); + let challenge_mptr = vk_mptr + vk.len(false); let theta_mptr = challenge_mptr + meta.challenge_indices.len(); let advice_comm_start = proof_cptr; @@ -279,7 +589,7 @@ impl Data { Word::range(permutation_eval_cptr) ) .collect(); - let permutation_z_evals = Word::range(permutation_z_eval_cptr) + let permutation_z_evals: Vec<(Word, Word, Word)> = Word::range(permutation_z_eval_cptr) .take(3 * meta.num_permutation_zs) .tuples() .collect_vec(); @@ -601,9 +911,117 @@ pub(crate) fn fr_to_u256(fe: impl Borrow) -> U256 { pub(crate) fn fe_to_u256(fe: impl Borrow) -> U256 where - F: PrimeField, + F: PrimeField>, { - U256::from_le_bytes(fe.borrow().to_repr()) + #[allow(clippy::clone_on_copy)] + U256::from_le_bytes(fe.borrow().to_repr().inner().clone()) +} + +#[cfg(feature = "mv-lookup")] +pub(crate) fn expression_consts(cs: &ConstraintSystem) -> Vec +where + F: PrimeField>, +{ + fn collect_constants(expression: &Expression, constants: &mut Vec) { + match expression { + Expression::Constant(constant) => { + constants.push(*constant); + } + Expression::Negated(inner) => { + collect_constants(inner, constants); + } + Expression::Sum(lhs, rhs) => { + collect_constants(lhs, constants); + collect_constants(rhs, constants); + } + Expression::Product(lhs, rhs) => { + collect_constants(lhs, constants); + collect_constants(rhs, constants); + } + Expression::Scaled(inner, scalar) => { + collect_constants(inner, constants); + // we consider scalar values constants + constants.push(*scalar); + } + _ => {} + } + } + + let mut inputs_consts: Vec = Vec::new(); + cs.gates() + .iter() + .flat_map(Gate::polynomials) + .for_each(|expression| collect_constants(expression, &mut inputs_consts)); + let evaluate_lookup_consts = |expressions: &Vec<_>, constants: &mut Vec| { + expressions.iter().for_each(|expression| { + collect_constants(expression, constants); + }); + }; + cs.lookups().iter().for_each(|lookup| { + let expressions: &Vec>> = lookup.input_expressions(); + expressions.iter().for_each(|arg| { + evaluate_lookup_consts(arg, &mut inputs_consts); + }); + }); + // Remove duplicates while preserving order + let mut unique_inputs_consts = Vec::new(); + for const_value in inputs_consts.clone() { + if !unique_inputs_consts.contains(&const_value) { + unique_inputs_consts.push(const_value); + } + } + unique_inputs_consts +} + +#[cfg(not(feature = "mv-lookup"))] +pub(crate) fn expression_consts(cs: &ConstraintSystem) -> Vec +where + F: PrimeField>, +{ + fn collect_constants(expression: &Expression, constants: &mut Vec) { + match expression { + Expression::Constant(constant) => { + constants.push(*constant); + } + Expression::Negated(inner) => { + collect_constants(inner, constants); + } + Expression::Sum(lhs, rhs) => { + collect_constants(lhs, constants); + collect_constants(rhs, constants); + } + Expression::Product(lhs, rhs) => { + collect_constants(lhs, constants); + collect_constants(rhs, constants); + } + Expression::Scaled(inner, scalar) => { + collect_constants(inner, constants); + // we consider scalar values constants + constants.push(*scalar); + } + _ => {} + } + } + + let mut inputs_consts: Vec = Vec::new(); + cs.gates() + .iter() + .flat_map(Gate::polynomials) + .for_each(|expression| collect_constants(expression, &mut inputs_consts)); + cs.lookups().iter().for_each(|lookup| { + let expressions: &Vec> = lookup.input_expressions(); + expressions.iter().for_each(|arg| { + collect_constants(arg, &mut inputs_consts); + }); + }); + // Remove duplicates while preserving order + let mut unique_inputs_consts = Vec::new(); + for const_value in inputs_consts.clone() { + if !unique_inputs_consts.contains(&const_value) { + unique_inputs_consts.push(const_value); + } + } + unique_inputs_consts } pub(crate) fn to_u256_be_bytes(value: T) -> [u8; 32] diff --git a/src/evm.rs b/src/evm.rs index 1156ee3..5d57097 100644 --- a/src/evm.rs +++ b/src/evm.rs @@ -50,8 +50,11 @@ pub fn encode_calldata( pub(crate) mod test { pub use revm; use revm::{ - primitives::{Address, CreateScheme, ExecutionResult, Output, TransactTo, TxEnv}, - InMemoryDB, EVM, + primitives::{ + Address, Bytes, CfgEnv, CfgEnvWithHandlerCfg, ExecutionResult, HaltReason, Output, + TransactTo, TxEnv, + }, + Evm as EVM, InMemoryDB, }; use std::{ fmt::{self, Debug, Formatter}, @@ -71,6 +74,8 @@ pub(crate) mod test { .stderr(Stdio::piped()) .arg("--bin") .arg("--optimize") + .arg("--model-checker-targets") + .arg("underflow,overflow") .arg("-") .spawn() { @@ -79,7 +84,7 @@ pub(crate) mod test { panic!("Command 'solc' not found"); } Err(err) => { - panic!("Failed to spwan process with command 'solc':\n{err}"); + panic!("Failed to spawn process with command 'solc':\n{err}"); } }; process @@ -106,38 +111,30 @@ pub(crate) mod test { } /// Evm runner. - pub struct Evm { - evm: EVM, + pub struct Evm<'a> { + evm: EVM<'a, (), InMemoryDB>, } - impl Debug for Evm { + impl Debug for Evm<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - let mut debug_struct = f.debug_struct("Evm"); - debug_struct - .field("env", &self.evm.env) - .field("db", &self.evm.db.as_ref().unwrap()) - .finish() + self.evm.fmt(f) } } - impl Default for Evm { + impl Default for Evm<'_> { fn default() -> Self { - Self { - evm: EVM { - env: Default::default(), - db: Some(Default::default()), - }, - } + let evm = EVM::builder().with_db(InMemoryDB::default()).build(); + Self { evm } } } - impl Evm { + impl Evm<'_> { /// Return code_size of given address. /// /// # Panics /// Panics if given address doesn't have bytecode. pub fn code_size(&mut self, address: Address) -> usize { - self.evm.db.as_ref().unwrap().accounts[&address] + self.evm.db().accounts[&address] .info .code .as_ref() @@ -145,20 +142,34 @@ pub(crate) mod test { .len() } + /// Return a version of the evm that allows for unlimited deployments sizes. + pub fn unlimited() -> Self { + let mut cfg_env: CfgEnv = Default::default(); + cfg_env.limit_contract_code_size = Some(usize::MAX); + let evm = EVM::builder() + .with_db(InMemoryDB::default()) + .with_cfg_env_with_handler_cfg(CfgEnvWithHandlerCfg { + cfg_env, + handler_cfg: Default::default(), + }) + .build(); + Self { evm } + } + /// Apply create transaction with given `bytecode` as creation bytecode. /// Return created `address`. /// /// # Panics /// Panics if execution reverts or halts unexpectedly. - pub fn create(&mut self, bytecode: Vec) -> Address { - let (_, output) = self.transact_success_or_panic(TxEnv { + pub fn create(&mut self, bytecode: Vec) -> (Address, u64) { + let (gas_used, output) = self.transact_success_or_panic(TxEnv { gas_limit: u64::MAX, - transact_to: TransactTo::Create(CreateScheme::Create), + transact_to: TransactTo::Create, data: bytecode.into(), ..Default::default() }); match output { - Output::Create(_, Some(address)) => address, + Output::Create(_, Some(address)) => (address, gas_used), _ => unreachable!(), } } @@ -181,10 +192,24 @@ pub(crate) mod test { } } + /// Apply call transaction to given `address` with `calldata` with the expectation of failure. + /// Returns `gas_used` and `return_data`. + /// + /// # Panics + /// Panics if execution succeeds. + pub fn call_fail(&mut self, address: Address, calldata: Vec) { + let (_, _) = self.transact_failure_or_panic(TxEnv { + gas_limit: u64::MAX, + transact_to: TransactTo::Call(address), + data: calldata.into(), + ..Default::default() + }); + } + fn transact_success_or_panic(&mut self, tx: TxEnv) -> (u64, Output) { - self.evm.env.tx = tx; + self.evm.context.evm.env.tx = tx; let result = self.evm.transact_commit().unwrap(); - self.evm.env.tx = Default::default(); + self.evm.context.evm.env.tx = Default::default(); match result { ExecutionResult::Success { gas_used, @@ -196,7 +221,7 @@ pub(crate) mod test { println!("--- logs from {} ---", logs[0].address); for (log_idx, log) in logs.iter().enumerate() { println!("log#{log_idx}"); - for (topic_idx, topic) in log.topics.iter().enumerate() { + for (topic_idx, topic) in log.topics().iter().enumerate() { println!(" topic{topic_idx}: {topic:?}"); } } @@ -212,5 +237,41 @@ pub(crate) mod test { ), } } + + fn transact_failure_or_panic(&mut self, tx: TxEnv) -> (u64, Result) { + self.evm.context.evm.env.tx = tx; + let result = self.evm.transact_commit().unwrap(); + self.evm.context.evm.env.tx = Default::default(); + match result { + ExecutionResult::Success { + gas_used, + output, + logs, + .. + } => { + if !logs.is_empty() { + println!("--- logs from {} ---", logs[0].address); + for (log_idx, log) in logs.iter().enumerate() { + println!("log#{log_idx}"); + for (topic_idx, topic) in log.topics().iter().enumerate() { + println!(" topic{topic_idx}: {topic:?}"); + } + } + println!("--- end ---"); + } + panic!("Transaction succeeds unexpectedly with gas_used {gas_used} and output {output:?}") + } + ExecutionResult::Revert { gas_used, output } => { + println!("Transaction reverts with gas_used {gas_used} and output {output:#x}"); + (gas_used, Ok(output)) + } + ExecutionResult::Halt { reason, gas_used } => { + println!( + "Transaction halts unexpectedly with gas_used {gas_used} and reason {reason:?}" + ); + (gas_used, Err(reason)) + } + } + } } } diff --git a/src/test.rs b/src/test.rs index fb31148..9648b02 100644 --- a/src/test.rs +++ b/src/test.rs @@ -5,7 +5,9 @@ use crate::{ FN_SIG_VERIFY_PROOF, FN_SIG_VERIFY_PROOF_WITH_VK_ADDRESS, }; use halo2_proofs::halo2curves::bn256::{Bn256, Fr}; +use rand::Rng; use rand::{rngs::StdRng, RngCore, SeedableRng}; +use revm::primitives::Address; use sha3::Digest; use std::{fs::File, io::Write}; @@ -56,16 +58,20 @@ fn run_render>() { let verifier_creation_code = compile_solidity(verifier_solidity); let verifier_creation_code_size = verifier_creation_code.len(); - let mut evm = Evm::default(); - let verifier_address = evm.create(verifier_creation_code); + let mut evm = Evm::unlimited(); + let (verifier_address, gas_cost) = evm.create(verifier_creation_code); let verifier_runtime_code_size = evm.code_size(verifier_address); println!("Verifier creation code size: {verifier_creation_code_size}"); println!("Verifier runtime code size: {verifier_runtime_code_size}"); + println!("Gas deployment cost verifier: {gas_cost}"); let (gas_cost, output) = evm.call(verifier_address, encode_calldata(None, &proof, &instances)); assert_eq!(output, [vec![0; 31], vec![1]].concat()); - println!("Gas cost: {gas_cost}"); + println!("Gas cost conjoined: {gas_cost}"); + + // Fuzzing tests + bit_flip_fuzzing_test::(verifier_address, proof, instances, &mut evm); } fn run_render_separately>() { @@ -80,7 +86,7 @@ fn run_render_separately>() { let verifier_creation_code_size = verifier_creation_code.len(); let mut evm = Evm::default(); - let verifier_address = evm.create(verifier_creation_code); + let (verifier_address, _gas_cost) = evm.create(verifier_creation_code); let verifier_runtime_code_size = evm.code_size(verifier_address); println!("Verifier creation code size: {verifier_creation_code_size}"); @@ -96,16 +102,48 @@ fn run_render_separately>() { let (verifier_solidity, vk_solidity) = generator.render_separately().unwrap(); assert_eq!(deployed_verifier_solidity, verifier_solidity); + // // print verifier_solidity + // println!("Verifier solidity: {verifier_solidity}"); + // print vk_solidity + // println!("VK solidity: {vk_solidity}"); + // VK creation code size let vk_creation_code = compile_solidity(&vk_solidity); - let vk_address = evm.create(vk_creation_code); + let vk_creation_code_size = vk_creation_code.len(); + println!("VK creation code size: {vk_creation_code_size}"); + let (vk_address, gas_cost) = evm.create(vk_creation_code); + let vk_runtime_code_size = evm.code_size(vk_address); + println!("VK runtime code size: {vk_runtime_code_size}"); + println!("Gas deployment cost VK: {gas_cost}"); let (gas_cost, output) = evm.call( verifier_address, encode_calldata(Some(vk_address.into()), &proof, &instances), ); assert_eq!(output, [vec![0; 31], vec![1]].concat()); - println!("Gas cost: {gas_cost}"); + println!("Gas cost separate: {gas_cost}"); + bit_flip_fuzzing_test::(verifier_address, proof, instances, &mut evm); + } +} + +fn bit_flip_fuzzing_test>( + verifier_address: Address, + proof: Vec, + instances: Vec, + evm: &mut Evm, +) { + let mut rng = rand::thread_rng(); + for i in 0..10 { + let mut modified_proof = proof.clone(); + let random_byte = rng.gen_range(0..modified_proof.len()); + let random_bit = rng.gen_range(0..8); + modified_proof[random_byte] ^= 1 << random_bit; + + evm.call_fail( + verifier_address, + encode_calldata(None, &modified_proof, &instances), + ); + println!("Modified proof {} failed verification as expected", i); } } @@ -165,7 +203,7 @@ mod halo2 { pub fn create_testdata_bdfg21>( k: u32, acc_encoding: Option, - mut rng: impl RngCore + Clone, + mut rng: impl RngCore + Clone + Send + Sync, ) -> ( ParamsKZG, VerifyingKey, @@ -201,6 +239,7 @@ mod halo2 { SingleStrategy::new(¶ms), &[&[&instances]], &mut transcript, + 1 << k, ) }; assert!(result.is_ok()); @@ -211,17 +250,19 @@ mod halo2 { fn random_accumulator_limbs( acc_encoding: AccumulatorEncoding, mut rng: impl RngCore, - ) -> Vec + ) -> Vec where M: MultiMillerLoop, - ::Base: PrimeField, - M::Scalar: PrimeField, + M::G1Affine: CurveAffine, + ::Base: + PrimeField>, + M::Fr: PrimeField>, { - let s = M::Scalar::random(&mut rng); + let s = M::Fr::random(&mut rng); let g1 = M::G1Affine::generator(); let g2 = M::G2Affine::generator(); let neg_s_g2 = (g2 * -s).to_affine(); - let lhs_scalar = M::Scalar::random(&mut rng); + let lhs_scalar = M::Fr::random(&mut rng); let rhs_scalar = lhs_scalar * s.invert().unwrap(); let [lhs, rhs] = [lhs_scalar, rhs_scalar].map(|scalar| (g1 * scalar).to_affine()); @@ -240,8 +281,8 @@ mod halo2 { fn ec_point_to_limbs(ec_point: impl Borrow, num_limb_bits: usize) -> Vec where C: CurveAffine, - C::Base: PrimeField, - C::Scalar: PrimeField, + C::Base: PrimeField>, + C::Scalar: PrimeField>, { let coords = ec_point.borrow().coordinates().unwrap(); [*coords.x(), *coords.y()] @@ -252,10 +293,11 @@ mod halo2 { fn fe_to_limbs(fe: impl Borrow, num_limb_bits: usize) -> Vec where - F1: PrimeField, - F2: PrimeField, + F1: PrimeField>, + F2: PrimeField>, { - let big = U256::from_le_bytes(fe.borrow().to_repr()); + #[allow(clippy::clone_on_copy)] + let big = U256::from_le_bytes(fe.borrow().to_repr().inner().clone()); let mask = &((U256::from(1) << num_limb_bits) - U256::from(1)); (0usize..) .step_by(num_limb_bits) @@ -266,10 +308,10 @@ mod halo2 { fn fe_from_u256(u256: impl Borrow) -> F where - F: PrimeField, + F: PrimeField>, { let bytes = u256.borrow().to_le_bytes::<32>(); - F::from_repr_vartime(bytes).unwrap() + F::from_repr_vartime(bytes.into()).unwrap() } pub mod huge { @@ -295,13 +337,17 @@ mod halo2 { use std::{array, fmt::Debug, iter, mem}; #[derive(Clone, Debug, Default)] - pub struct HugeCircuit(Vec); + pub struct HugeCircuit(Vec); - impl TestCircuit for HugeCircuit + impl TestCircuit for HugeCircuit where M: MultiMillerLoop, - ::Base: PrimeField, - M::Scalar: PrimeField, + M::G1Affine: CurveAffine, + ::ScalarExt: + PrimeField>, + ::Base: + PrimeField>, + M::Fr: PrimeField>, { fn min_k() -> u32 { 6 @@ -311,21 +357,21 @@ mod halo2 { let instances = if let Some(acc_encoding) = acc_encoding { random_accumulator_limbs::(acc_encoding, rng) } else { - iter::repeat_with(|| M::Scalar::random(&mut rng)) + iter::repeat_with(|| M::Fr::random(&mut rng)) .take(10) .collect() }; Self(instances) } - fn instances(&self) -> Vec { + fn instances(&self) -> Vec { self.0.clone() } } - impl Circuit for HugeCircuit + impl Circuit for HugeCircuit where - M::Scalar: PrimeField, + M::Fr: PrimeField, { type Config = ( [Selector; 10], @@ -335,14 +381,15 @@ mod halo2 { Column, ); type FloorPlanner = SimpleFloorPlanner; - #[cfg(feature = "halo2_circuit_params")] - type Params = (); fn without_witnesses(&self) -> Self { unimplemented!() } - fn configure(meta: &mut ConstraintSystem) -> Self::Config { + fn configure(meta: &mut ConstraintSystem) -> Self::Config { + #[cfg(feature = "mv-lookup")] + meta.set_minimum_degree(9); + let selectors = [(); 10].map(|_| meta.selector()); let complex_selectors = [(); 10].map(|_| meta.complex_selector()); let fixeds = [(); 10].map(|_| meta.fixed_column()); @@ -369,7 +416,7 @@ mod halo2 { meta.create_gate("", |meta| { let selectors = selectors.map(|selector| meta.query_selector(selector)); - let advices: [Expression; 10] = array::from_fn(|idx| { + let advices: [Expression; 10] = array::from_fn(|idx| { let rotation = Rotation((idx as i32 - advices.len() as i32) / 2); meta.query_advice(advices[idx], rotation) }); @@ -405,6 +452,23 @@ mod halo2 { }); } + #[cfg(feature = "mv-lookup")] + for _ in 0..10 { + meta.lookup_any("", |meta| { + let (q1, q2, q3) = complex_selectors.iter().tuple_windows().next().unwrap(); + let (f1, f2, f3) = fixeds.iter().tuple_windows().next().unwrap(); + let (a1, a2, a3) = advices.iter().tuple_windows().next().unwrap(); + izip!([q1, q2, q3], [f1, f2, f3], [a1, a2, a3]) + .map(|(q, f, a)| { + let q = meta.query_selector(*q); + let f = meta.query_fixed(*f, Rotation::cur()); + let a = meta.query_advice(*a, Rotation::cur()); + (q * a, f) + }) + .collect_vec() + }); + } + fixeds.map(|column| meta.enable_equality(column)); advices.map(|column| meta.enable_equality(column)); meta.enable_equality(instance); @@ -415,7 +479,7 @@ mod halo2 { fn synthesize( &self, (selectors, complex_selectors, fixeds, advices, instance): Self::Config, - mut layouter: impl Layouter, + mut layouter: impl Layouter, ) -> Result<(), plonk::Error> { let assigneds = layouter.assign_region( || "", @@ -430,7 +494,7 @@ mod halo2 { q.enable(&mut region, next_offset())?; } for (idx, column) in izip!(1.., fixeds) { - let value = Value::known(M::Scalar::from(idx)); + let value = Value::known(M::Fr::from(idx)); region.assign_fixed(|| "", column, next_offset(), || value)?; } izip!(advices, &self.0) @@ -507,14 +571,18 @@ mod halo2 { #[derive(Clone, Default)] pub struct MainGateWithRange { - instances: Vec, + instances: Vec, } - impl TestCircuit for MainGateWithRange + impl TestCircuit for MainGateWithRange where M: MultiMillerLoop, - ::Base: PrimeField, - M::Scalar: PrimeField, + M::G1Affine: CurveAffine, + ::ScalarExt: + PrimeField>, + ::Base: + PrimeField>, + M::Fr: PrimeField>, { fn min_k() -> u32 { 9 @@ -524,39 +592,37 @@ mod halo2 { let instances = if let Some(acc_encoding) = acc_encoding { random_accumulator_limbs::(acc_encoding, rng) } else { - iter::repeat_with(|| M::Scalar::random(&mut rng)) + iter::repeat_with(|| M::Fr::random(&mut rng)) .take(10) .collect() }; Self { instances } } - fn instances(&self) -> Vec { + fn instances(&self) -> Vec { self.instances.clone() } } - impl Circuit for MainGateWithRange + impl Circuit for MainGateWithRange where - M::Scalar: PrimeField, + M::Fr: PrimeField, { type Config = MainGateWithRangeConfig; type FloorPlanner = SimpleFloorPlanner; - #[cfg(feature = "halo2_circuit_params")] - type Params = (); fn without_witnesses(&self) -> Self { unimplemented!() } - fn configure(meta: &mut ConstraintSystem) -> Self::Config { + fn configure(meta: &mut ConstraintSystem) -> Self::Config { MainGateWithRangeConfig::configure(meta, vec![8], vec![4, 7]) } fn synthesize( &self, config: Self::Config, - mut layouter: impl Layouter, + mut layouter: impl Layouter, ) -> Result<(), Error> { let main_gate = config.main_gate(); let range_chip = config.range_chip(); @@ -576,25 +642,20 @@ mod halo2 { // Dummy gates to make all fixed column with values range_chip.decompose( &mut ctx, - Value::known(M::Scalar::from(u64::MAX)), + Value::known(M::Fr::from(u64::MAX)), 8, 64, )?; range_chip.decompose( &mut ctx, - Value::known(M::Scalar::from(u32::MAX as u64)), + Value::known(M::Fr::from(u32::MAX as u64)), 8, 39, )?; let a = &advices[0]; - let b = main_gate.sub_sub_with_constant( - &mut ctx, - a, - a, - a, - M::Scalar::from(2), - )?; - let cond = main_gate.assign_bit(&mut ctx, Value::known(M::Scalar::ONE))?; + let b = + main_gate.sub_sub_with_constant(&mut ctx, a, a, a, M::Fr::from(2))?; + let cond = main_gate.assign_bit(&mut ctx, Value::known(M::Fr::ONE))?; main_gate.select(&mut ctx, a, &b, &cond)?; Ok(advices) diff --git a/src/transcript.rs b/src/transcript.rs index f125329..346404c 100644 --- a/src/transcript.rs +++ b/src/transcript.rs @@ -37,12 +37,12 @@ impl Keccak256Transcript { pub struct ChallengeEvm(C::Scalar) where C: CurveAffine, - C::Scalar: PrimeField; + C::Scalar: PrimeField>; impl EncodedChallenge for ChallengeEvm where C: CurveAffine, - C::Scalar: PrimeField, + C::Scalar: PrimeField>, { type Input = [u8; 0x20]; @@ -58,7 +58,7 @@ where impl Transcript> for Keccak256Transcript where C: CurveAffine, - C::Scalar: PrimeField, + C::Scalar: PrimeField>, { fn squeeze_challenge(&mut self) -> ChallengeEvm { let buf_len = self.buf.len(); @@ -95,7 +95,7 @@ where impl TranscriptRead> for Keccak256Transcript where C: CurveAffine, - C::Scalar: PrimeField, + C::Scalar: PrimeField>, { fn read_point(&mut self) -> io::Result { let mut reprs = [::Repr::default(); 2]; @@ -121,7 +121,7 @@ where let mut data = [0; 0x20]; self.stream.read_exact(data.as_mut())?; data.reverse(); - let scalar = C::Scalar::from_repr_vartime(data) + let scalar = C::Scalar::from_repr_vartime(data.into()) .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Invalid scalar".to_string()))?; Transcript::>::common_scalar(self, scalar)?; Ok(scalar) @@ -131,7 +131,7 @@ where impl TranscriptReadBuffer> for Keccak256Transcript where C: CurveAffine, - C::Scalar: PrimeField, + C::Scalar: PrimeField>, { fn init(reader: R) -> Self { Keccak256Transcript::new(reader) @@ -141,7 +141,7 @@ where impl TranscriptWrite> for Keccak256Transcript where C: CurveAffine, - C::Scalar: PrimeField, + C::Scalar: PrimeField>, { fn write_point(&mut self, ec_point: C) -> io::Result<()> { self.common_point(ec_point)?; @@ -165,7 +165,7 @@ where impl TranscriptWriterBuffer> for Keccak256Transcript where C: CurveAffine, - C::Scalar: PrimeField, + C::Scalar: PrimeField>, { fn init(writer: W) -> Self { Keccak256Transcript::new(writer) @@ -178,15 +178,17 @@ where fn u256_to_fe(value: U256) -> F where - F: PrimeField, + F: PrimeField>, { let value = value % modulus::(); - F::from_repr(value.to_le_bytes::<0x20>()).unwrap() + F::from_repr(value.to_le_bytes::<0x20>().into()).unwrap() } fn modulus() -> U256 where - F: PrimeField, + F: PrimeField>, { - U256::from_le_bytes((-F::ONE).to_repr()) + U256::from(1) + #[allow(clippy::clone_on_copy)] + let upper = U256::from_le_bytes((-F::ONE).to_repr().inner().clone()); + upper + U256::from(1) } diff --git a/templates/Halo2Verifier.sol b/templates/Halo2Verifier.sol index 7abfb0b..0cc10da 100644 --- a/templates/Halo2Verifier.sol +++ b/templates/Halo2Verifier.sol @@ -3,7 +3,9 @@ pragma solidity ^0.8.0; contract Halo2Verifier { - uint256 internal constant PROOF_LEN_CPTR = {{ proof_cptr - 1 }}; + uint256 internal constant DELTA = 4131629893567559867359510883348571134090853742863529169391034518566172092834; + uint256 internal constant R = 21888242871839275222246405745257275088548364400416034343698204186575808495617; + uint256 internal constant PROOF_LEN_CPTR = {{ proof_len_cptr }}; uint256 internal constant PROOF_CPTR = {{ proof_cptr }}; uint256 internal constant NUM_INSTANCE_CPTR = {{ proof_cptr + (proof_len / 32) }}; uint256 internal constant INSTANCE_CPTR = {{ proof_cptr + (proof_len / 32) + 1 }}; @@ -70,11 +72,6 @@ contract Halo2Verifier { uint256 internal constant PAIRING_RHS_Y_MPTR = {{ theta_mptr + 25 }}; function verifyProof( - {%- match self.embedded_vk %} - {%- when None %} - address vk, - {%- else %} - {%- endmatch %} bytes calldata proof, uint256[] calldata instances ) public returns (bool) { @@ -122,7 +119,7 @@ contract Halo2Verifier { // Batch invert values in memory[mptr_start..mptr_end] in place. // Return updated (success). - function batch_invert(success, mptr_start, mptr_end, r) -> ret { + function batch_invert(success, mptr_start, mptr_end) -> ret { let gp_mptr := mptr_end let gp := mload(mptr_start) let mptr := add(mptr_start, 0x20) @@ -131,19 +128,19 @@ contract Halo2Verifier { lt(mptr, sub(mptr_end, 0x20)) {} { - gp := mulmod(gp, mload(mptr), r) + gp := mulmod(gp, mload(mptr), R) mstore(gp_mptr, gp) mptr := add(mptr, 0x20) gp_mptr := add(gp_mptr, 0x20) } - gp := mulmod(gp, mload(mptr), r) + gp := mulmod(gp, mload(mptr), R) mstore(gp_mptr, 0x20) mstore(add(gp_mptr, 0x20), 0x20) mstore(add(gp_mptr, 0x40), 0x20) mstore(add(gp_mptr, 0x60), gp) - mstore(add(gp_mptr, 0x80), sub(r, 2)) - mstore(add(gp_mptr, 0xa0), r) + mstore(add(gp_mptr, 0x80), sub(R, 2)) + mstore(add(gp_mptr, 0xa0), R) ret := and(success, staticcall(gas(), 0x05, gp_mptr, 0xc0, gp_mptr, 0x20)) let all_inv := mload(gp_mptr) @@ -155,14 +152,14 @@ contract Halo2Verifier { lt(second_mptr, mptr) {} { - let inv := mulmod(all_inv, mload(gp_mptr), r) - all_inv := mulmod(all_inv, mload(mptr), r) + let inv := mulmod(all_inv, mload(gp_mptr), R) + all_inv := mulmod(all_inv, mload(mptr), R) mstore(mptr, inv) mptr := sub(mptr, 0x20) gp_mptr := sub(gp_mptr, 0x20) } - let inv_first := mulmod(all_inv, mload(second_mptr), r) - let inv_second := mulmod(all_inv, mload(first_mptr), r) + let inv_first := mulmod(all_inv, mload(second_mptr), R) + let inv_second := mulmod(all_inv, mload(first_mptr), R) mstore(first_mptr, inv_first) mstore(second_mptr, inv_second) } @@ -217,25 +214,19 @@ contract Halo2Verifier { // Modulus let q := 21888242871839275222246405745257275088696311157297823662689037894645226208583 // BN254 base field - let r := 21888242871839275222246405745257275088548364400416034343698204186575808495617 // BN254 scalar field + let r := 21888242871839275222246405745257275088548364400416034343698204186575808495617 // BN254 scalar field // Initialize success as true let success := true { - {%- match self.embedded_vk %} - {%- when Some with (embedded_vk) %} // Load vk_digest and num_instances of vk into memory {%- for (name, chunk) in embedded_vk.constants[..2] %} mstore({{ vk_mptr + loop.index0 }}, {{ chunk|hex_padded(64) }}) // {{ name }} {%- endfor %} - {%- when None %} - // Copy vk_digest and num_instances of vk into memory - extcodecopy(vk, VK_MPTR, 0x00, 0x40) - {%- endmatch %} // Check valid length of proof - success := and(success, eq({{ proof_len|hex() }}, calldataload(PROOF_LEN_CPTR))) + success := and(success, eq({{ proof_len|hex() }}, calldataload(sub(PROOF_LEN_CPTR, 0x6014F51900)))) // Check valid length of instances let num_instances := mload(NUM_INSTANCES_MPTR) @@ -261,6 +252,7 @@ contract Halo2Verifier { let proof_cptr := PROOF_CPTR let challenge_mptr := CHALLENGE_MPTR + {%- for num_advices in num_advices %} // Phase {{ loop.index }} @@ -306,8 +298,6 @@ contract Halo2Verifier { // TODO {%- endmatch %} - {%~ match self.embedded_vk %} - {%- when Some with (embedded_vk) %} // Load full vk into memory {%- for (name, chunk) in embedded_vk.constants %} mstore({{ vk_mptr + loop.index0 }}, {{ chunk|hex_padded(64) }}) // {{ name }} @@ -322,10 +312,6 @@ contract Halo2Verifier { mstore({{ vk_mptr + offset + 2 * loop.index0 }}, {{ x|hex_padded(64) }}) // permutation_comms[{{ loop.index0 }}].x mstore({{ vk_mptr + offset + 2 * loop.index0 + 1 }}, {{ y|hex_padded(64) }}) // permutation_comms[{{ loop.index0 }}].y {%- endfor %} - {%- when None %} - // Copy full vk into memory - extcodecopy(vk, VK_MPTR, 0x00, {{ vk_len|hex() }}) - {%- endmatch %} // Read accumulator from instances if mload(HAS_ACCUMULATOR_MPTR) { @@ -403,7 +389,7 @@ contract Halo2Verifier { } let x_n_minus_1 := addmod(x_n, sub(r, 1), r) mstore(mptr_end, x_n_minus_1) - success := batch_invert(success, X_N_MPTR, add(mptr_end, 0x20), r) + success := batch_invert(success, X_N_MPTR, add(mptr_end, 0x20)) mptr := X_N_MPTR let l_i_common := mulmod(x_n_minus_1, mload(N_INV_MPTR), r) @@ -456,7 +442,6 @@ contract Halo2Verifier { // Compute quotient evavluation { let quotient_eval_numer - let delta := 4131629893567559867359510883348571134090853742863529169391034518566172092834 let y := mload(Y_MPTR) {%- for code_block in quotient_eval_numer_computations %} @@ -468,7 +453,6 @@ contract Halo2Verifier { {%- endfor %} pop(y) - pop(delta) let quotient_eval := mulmod(quotient_eval_numer, mload(X_N_MINUS_1_INV_MPTR), r) mstore(QUOTIENT_EVAL_MPTR, quotient_eval) diff --git a/templates/Halo2VerifierReusable.sol b/templates/Halo2VerifierReusable.sol new file mode 100644 index 0000000..b2fe468 --- /dev/null +++ b/templates/Halo2VerifierReusable.sol @@ -0,0 +1,1513 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.8.0; + +contract Halo2VerifierReusable { + uint256 internal constant PROOF_LEN_CPTR = 0x64; + uint256 internal constant PROOF_CPTR = 0x84; + uint256 internal constant Q = 21888242871839275222246405745257275088696311157297823662689037894645226208583; + uint256 internal constant R = 21888242871839275222246405745257275088548364400416034343698204186575808495617; // BN254 scalar field + uint256 internal constant DELTA = 4131629893567559867359510883348571134090853742863529169391034518566172092834; + uint256 internal constant PTR_BITMASK = 0xFFFF; + uint256 internal constant BYTE_FLAG_BITMASK = 0xFF; + + + + function verifyProof( + address vk, + bytes calldata proof, + uint256[] calldata instances + ) public returns (bool) { + assembly { + // Read EC point (x, y) at (proof_cptr, proof_cptr + 0x20), + // and check if the point is on affine plane, + // and store them in (hash_mptr, hash_mptr + 0x20). + // Return updated (success, proof_cptr, hash_mptr). + function read_ec_point(success, proof_cptr, hash_mptr) -> ret0, ret1, ret2 { + let x := calldataload(proof_cptr) + let y := calldataload(add(proof_cptr, 0x20)) + ret0 := and(success, lt(x, Q)) + ret0 := and(ret0, lt(y, Q)) + ret0 := and(ret0, eq(mulmod(y, y, Q), addmod(mulmod(x, mulmod(x, x, Q), Q), 3, Q))) + mstore(hash_mptr, x) + mstore(add(hash_mptr, 0x20), y) + ret1 := add(proof_cptr, 0x40) + ret2 := add(hash_mptr, 0x40) + } + + // Squeeze challenge by keccak256(memory[0..hash_mptr]), + // and store hash mod r as challenge in challenge_mptr, + // and push back hash in 0x00 as the first input for next squeeze. + // Return updated (challenge_mptr, hash_mptr). + function squeeze_challenge(challenge_mptr, hash_mptr) -> ret0, ret1 { + let hash := keccak256(0x00, hash_mptr) + mstore(challenge_mptr, mod(hash, R)) + mstore(0x00, hash) + ret0 := add(challenge_mptr, 0x20) + ret1 := 0x20 + } + + // Squeeze challenge without absorbing new input from calldata, + // by putting an extra 0x01 in memory[0x240] and squeeze by keccak256(memory[0..21]), + // and store hash mod r as challenge in challenge_mptr, + // and push back hash in 0x220 as the first input for next squeeze. + // Return updated (challenge_mptr). + function squeeze_challenge_cont(challenge_mptr) -> ret { + mstore8(0x20, 0x01) + let hash := keccak256(0x00, 0x21) + mstore(challenge_mptr, mod(hash, R)) + mstore(0x00, hash) + ret := add(challenge_mptr, 0x20) + } + + // Batch invert values in memory[mptr_start..mptr_end] in place. + // Return updated (success). + function batch_invert(success, mptr_start, mptr_end) -> ret { + let gp_mptr := mptr_end + let gp := mload(mptr_start) + let mptr := add(mptr_start, 0x20) + for + {} + lt(mptr, sub(mptr_end, 0x20)) + {} + { + gp := mulmod(gp, mload(mptr), R) + mstore(gp_mptr, gp) + mptr := add(mptr, 0x20) + gp_mptr := add(gp_mptr, 0x20) + } + gp := mulmod(gp, mload(mptr), R) + + mstore(gp_mptr, 0x20) + mstore(add(gp_mptr, 0x20), 0x20) + mstore(add(gp_mptr, 0x40), 0x20) + mstore(add(gp_mptr, 0x60), gp) + mstore(add(gp_mptr, 0x80), sub(R, 2)) + mstore(add(gp_mptr, 0xa0), R) + ret := and(success, staticcall(gas(), 0x05, gp_mptr, 0xc0, gp_mptr, 0x20)) + let all_inv := mload(gp_mptr) + + let first_mptr := mptr_start + let second_mptr := add(first_mptr, 0x20) + gp_mptr := sub(gp_mptr, 0x20) + for + {} + lt(second_mptr, mptr) + {} + { + let inv := mulmod(all_inv, mload(gp_mptr), R) + all_inv := mulmod(all_inv, mload(mptr), R) + mstore(mptr, inv) + mptr := sub(mptr, 0x20) + gp_mptr := sub(gp_mptr, 0x20) + } + let inv_first := mulmod(all_inv, mload(second_mptr), R) + let inv_second := mulmod(all_inv, mload(first_mptr), R) + mstore(first_mptr, inv_first) + mstore(second_mptr, inv_second) + } + + // Add (x, y) into point at (0x00, 0x20). + // Return updated (success). + function ec_add_acc(success, x, y) -> ret { + mstore(0x40, x) + mstore(0x60, y) + ret := and(success, staticcall(gas(), 0x06, 0x00, 0x80, 0x00, 0x40)) + } + + // Scale point at (0x00, 0x20) by scalar. + function ec_mul_acc(success, scalar) -> ret { + mstore(0x40, scalar) + ret := and(success, staticcall(gas(), 0x07, 0x00, 0x60, 0x00, 0x40)) + } + + // Add (x, y) into point at (0x80, 0xa0). + // Return updated (success). + function ec_add_tmp(success, x, y) -> ret { + mstore(0xc0, x) + mstore(0xe0, y) + ret := and(success, staticcall(gas(), 0x06, 0x80, 0x80, 0x80, 0x40)) + } + + // Scale point at (0x80, 0xa0) by scalar. + // Return updated (success). + function ec_mul_tmp(success, scalar) -> ret { + mstore(0xc0, scalar) + ret := and(success, staticcall(gas(), 0x07, 0x80, 0x60, 0x80, 0x40)) + } + + // Perform pairing check. + // Return updated (success). + function ec_pairing(success, vk_mptr, lhs_x, lhs_y, rhs_x, rhs_y) -> ret { + mstore(0x00, lhs_x) + mstore(0x20, lhs_y) + mstore(0x40, mload(add(vk_mptr, {{ vk_const_offsets["g2_x_1"]|hex() }}))) + mstore(0x60, mload(add(vk_mptr, {{ vk_const_offsets["g2_x_2"]|hex() }}))) + mstore(0x80, mload(add(vk_mptr, {{ vk_const_offsets["g2_y_1"]|hex() }}))) + mstore(0xa0, mload(add(vk_mptr, {{ vk_const_offsets["g2_y_2"]|hex() }}))) + mstore(0xc0, rhs_x) + mstore(0xe0, rhs_y) + mstore(0x100, mload(add(vk_mptr, {{ vk_const_offsets["neg_s_g2_x_1"]|hex() }}))) + mstore(0x120, mload(add(vk_mptr, {{ vk_const_offsets["neg_s_g2_x_2"]|hex() }}))) + mstore(0x140, mload(add(vk_mptr, {{ vk_const_offsets["neg_s_g2_y_1"]|hex() }}))) + mstore(0x160, mload(add(vk_mptr, {{ vk_const_offsets["neg_s_g2_y_2"]|hex() }}))) + ret := and(success, staticcall(gas(), 0x08, 0x00, 0x180, 0x00, 0x20)) + ret := and(ret, mload(0x00)) + } + + function reposition_challenge_len_data() -> ret0, ret1, ret2 { + let fsm_challenges := mload({{ vk_const_offsets["fsm_challenges"]|hex() }}) + let challenge_len_ptr := {{ vk_const_offsets["num_advices_user_challenges_0"]|hex() }} + let challenge_len_data := mload(challenge_len_ptr) + let num_words := and(challenge_len_data, BYTE_FLAG_BITMASK) + ret0 := num_words + num_words := mul(0x20, num_words) + challenge_len_data := shr(8, challenge_len_data) + for { let i := 0x20 } lt(i, num_words) { i := add(i, 0x20) } { + mstore(add(fsm_challenges, i), mload(add(challenge_len_ptr, i))) + } + ret1 := fsm_challenges + ret2 := challenge_len_data + } + + // Returns start of computaions ptr and length of SoA layout memory + // encoding for quotient evaluation data (gate, permutation and lookup computations) + function soa_layout_metadata(offset, vk_mptr) -> ret0, ret1 { + let computations_len_ptr := add(vk_mptr, mload(add(vk_mptr, offset))) + ret0 := add(computations_len_ptr, 0x20) + ret1 := mload(computations_len_ptr) // Remember this length represented in bytes + } + + function col_evals(z, num_words, permutation_z_evals_ptr, theta_mptr) { + let gamma := mload(add(theta_mptr, 0x40)) + let beta := mload(add(theta_mptr, 0x20)) + let x := mload(add(theta_mptr, 0x80)) + let l_last := mload(add(theta_mptr, 0x1c0)) + let l_blind := mload(add(theta_mptr, 0x1e0)) + let i_eval := mload(add(theta_mptr, 0x220)) + // Extract the index 1 and index 0 z evaluations from the z word. + let lhs := calldataload(and(shr(16,z), PTR_BITMASK)) + let rhs := calldataload(and(z, PTR_BITMASK)) + z := shr(48, z) + // loop through the word_len_chunk + for { let j := 0 } lt(j, num_words) { j := add(j, 0x20) } { + for { } z { } { + let eval := i_eval + if eq(and(z, BYTE_FLAG_BITMASK), 0x00) { + eval := calldataload(and(shr(8, z), PTR_BITMASK)) + } + lhs := mulmod(lhs, addmod(addmod(eval, mulmod(beta, calldataload(and(shr(24, z), PTR_BITMASK)), R), R), gamma, R), R) + rhs := mulmod(rhs, addmod(addmod(eval, mload(0x00), R), gamma, R), R) + z := shr(40, z) + mstore(0x00, mulmod(mload(0x00), DELTA, R)) + } + z := mload(add(permutation_z_evals_ptr, add(j, 0x20))) + } + let left_sub_right := addmod(lhs, sub(R, rhs), R) + let fsm_ptr := mload(0x20) + mstore(fsm_ptr, addmod(left_sub_right, sub(R, mulmod(left_sub_right, addmod(l_last, l_blind, R), R)), R)) + mstore(0x20, add(fsm_ptr,0x20)) + } + + function z_evals(z, num_words_packed, perm_z_last_ptr, permutation_z_evals_ptr, theta_mptr, l_0, y, quotient_eval_numer) -> ret { + let num_words := and(num_words_packed, PTR_BITMASK) + // Initialize the free static memory pointer to store the column evals. + mstore(0x20, 0x40) + // Iterate through the tuple window length ( permutation_z_evals_len.len() - 1 ) offset by one word. + for { } lt(permutation_z_evals_ptr, perm_z_last_ptr) { } { + let next_z_ptr := add(permutation_z_evals_ptr, num_words) + let z_j := mload(next_z_ptr) + quotient_eval_numer := addmod( + mulmod(quotient_eval_numer, y, R), + mulmod(l_0, addmod(calldataload(and(z_j, PTR_BITMASK)), sub(R, calldataload(and(shr(32,z), PTR_BITMASK))), R), R), + R + ) + col_evals(z, num_words, permutation_z_evals_ptr, theta_mptr) + permutation_z_evals_ptr := next_z_ptr + z := z_j + } + // Due to the fact that permutation_columns.len() in H2 might not be divisible by permutation_chunk_len, the last column length might be less than permutation_chunk_len + // We store this length in the last 16 bits of the num_words_packed word. + num_words := and(shr(16, num_words_packed), PTR_BITMASK) + col_evals(z, num_words, permutation_z_evals_ptr, theta_mptr) + // iterate through col_evals to update the quotient_eval_numer accumulator + let end_ptr := mload(0x20) + for { let j := 0x40 } lt(j, end_ptr) { j := add(j, 0x20) } { + quotient_eval_numer := addmod(mulmod(quotient_eval_numer, y, R), mload(j), R) + } + ret := quotient_eval_numer + } + + function lookup_input_accum(expressions_word, fsmp, i, code_ptr) -> ret0, ret1, ret2 { + expressions_word := shr(8, expressions_word) + // Number of words the mptr vars for the accumulator evaluations shifted up by one + let num_words_vars := mul(0x20, and(expressions_word, BYTE_FLAG_BITMASK)) + expressions_word := shr(8, expressions_word) + // initlaize the accumulator with the first value in the vars + let a := mload(and(expressions_word, PTR_BITMASK)) + expressions_word := shr(16, expressions_word) + let theta := mload(0x60) + for { let j } lt(j, num_words_vars) { j := add(j, 0x20) } { + for { } expressions_word { } { + a := addmod( + mulmod(a, theta, R), + mload(and(expressions_word, PTR_BITMASK)), + R + ) + expressions_word := shr(16, expressions_word) + } + ret0 := add(code_ptr, add(i, j)) + expressions_word := mload(ret0) + } + ret1 := expressions_word + ret2 := a + } + + function expression_evals_packed(fsmp, code_ptr, expressions_word) -> ret0, ret1, ret2 { + // Load in the least significant byte of the `expressions_word` word to get the total number of words we will need to load in. + let num_words_shift_up_one := add(mul(0x20, and(expressions_word, BYTE_FLAG_BITMASK)), 0x20) + // start of the expression encodings + expressions_word := shr(8, expressions_word) + let acc + for { let i := 0x20 } lt(i, num_words_shift_up_one) { i := add(i, 0x20) } { + for { } expressions_word { } { + let mstore_ptr := add(fsmp, acc) + // Load in the least significant byte of the `expression` word to get the operation type + // Then determine which operation to peform and then store the result in the next available memory slot. + switch and(expressions_word, BYTE_FLAG_BITMASK) + // 0x00 => Advice/Fixed expression + case 0x00 { + expressions_word := shr(8, expressions_word) + // Load the calldata ptr from the expression, which come from the 2nd and 3rd least significant bytes. + mstore(mstore_ptr, calldataload(and(expressions_word, PTR_BITMASK))) + // Move to the next expression + expressions_word := shr(16, expressions_word) + } + // 0x01 => Negated expression + case 0x01 { + expressions_word := shr(8, expressions_word) + // Load the memory ptr from the expression, which come from the 2nd and 3rd least significant bytes + mstore(mstore_ptr, sub(R, mload(and(expressions_word, PTR_BITMASK)))) + // Move to the next expression + expressions_word := shr(16, expressions_word) + } + // 0x02 => Sum expression + case 0x02 { + expressions_word := shr(8, expressions_word) + // Load the lhs operand memory ptr from the expression, which comes from the 2nd and 3rd least significant bytes + // Load the rhs operand memory ptr from the expression, which comes from the 4th and 5th least significant bytes + mstore(mstore_ptr, addmod(mload(and(expressions_word, PTR_BITMASK)), mload(and(shr(16, expressions_word), PTR_BITMASK)),R)) + // Move to the next expression + expressions_word := shr(32, expressions_word) + } + // 0x03 => Product/scalar expression + case 0x03 { + expressions_word := shr(8, expressions_word) + // Load the lhs operand memory ptr from the expression, which comes from the 2nd and 3rd least significant bytes + // Load the rhs operand memory ptr from the expression, which comes from the 4th and 5th least significant bytes + mstore(mstore_ptr, mulmod(mload(and(expressions_word, PTR_BITMASK)),mload(and(shr(16, expressions_word), PTR_BITMASK)),R)) + // Move to the next expression + expressions_word := shr(32, expressions_word) + } + // 0x04 => (For lookup expressions) Start accumulator evaluations for the lookup (table or input) + // Will always occur at the end of the last word of the lookup expression. + case 0x04 { + ret0, ret1, ret2 := lookup_input_accum(expressions_word, fsmp, i, code_ptr) + leave + } + acc := add(acc, 0x20) + } + ret0 := add(code_ptr, i) + expressions_word := mload(ret0) + } + ret1 := expressions_word + ret2 := sub(acc, 0x20) + } + + function lookup_expr_evals_packed(fsmp, code_ptr, expressions_word, mv) -> ret0, ret1, ret2 { + // expression evaluation. + ret0, ret1, ret2 := expression_evals_packed(fsmp, code_ptr, expressions_word) + if mv { + // add the beta accum addmod if mv lookup + ret2 := addmod(ret2, mload(0x80), R) + } + } + + function mv_lookup_evals(table, evals_ptr, quotient_eval_numer, y) -> ret0, ret1, ret2 { + // iterate through the input_tables_len + let evals := mload(evals_ptr) + // We store a boolean flag in the first LSG byte of the evals ptr to determine if we need to load in a new table or reuse the previous table. + let new_table := and(evals, BYTE_FLAG_BITMASK) + evals := shr(8, evals) + let phi := and(evals, PTR_BITMASK) + quotient_eval_numer := addmod( + mulmod(quotient_eval_numer, y, R), + mulmod(mload(0x20),calldataload(phi), R), + R + ) + quotient_eval_numer := addmod( + mulmod(quotient_eval_numer, y, R), + mulmod(mload(0x00), calldataload(phi), R), + R + ) + // load in the lookup_table_lines from the evals_ptr + evals_ptr := add(evals_ptr, 0x20) + // Due to the fact that lookups can share the previous table, we can cache it for reuse. + let input_expression := mload(evals_ptr) + if new_table { + evals_ptr, input_expression, table := lookup_expr_evals_packed(0xa0, evals_ptr, mload(evals_ptr), 0x1) + } + // outer inputs len, stored in the first input expression word + let outer_inputs_len := and(input_expression, PTR_BITMASK) + input_expression := shr(16, input_expression) + // shift up the inputs iterator by the free static memory offset of 0xa0 + for { let j := 0xa0 } lt(j, add(outer_inputs_len, 0xa0)) { j := add(j, 0x20) } { + // call the expression_evals function to evaluate the input_lines + let ident + evals_ptr, input_expression, ident := lookup_expr_evals_packed(j, evals_ptr, input_expression, 0x1) + // store ident in free static memory + mstore(j, ident) + } + let lhs + let rhs + switch eq(outer_inputs_len, 0x20) + case 1 { + rhs := table + } default { + // iterate through the outer_inputs_len + let last_idx := sub(outer_inputs_len, 0x20) + for { let i := 0 } lt(i, outer_inputs_len) { i := add(i, 0x20) } { + let tmp := mload(0xa0) + let j := 0x20 + if eq(i, 0){ + tmp := mload(0xc0) + j := 0x40 + } + for { } lt(j, outer_inputs_len) { j := add(j, 0x20) } { + if eq(i, j) { + continue + } + tmp := mulmod(tmp, mload(add(j, 0xa0)), R) + + } + rhs := addmod(rhs, tmp, R) + if eq(i, last_idx) { + rhs := mulmod(rhs, table, R) + } + } + } + let tmp := mload(0xa0) + for { let j := 0x20 } lt(j, outer_inputs_len) { j := add(j, 0x20) } { + tmp := mulmod(tmp, mload(add(j, 0xa0)), R) + } + rhs := addmod( + rhs, + sub(R, mulmod(calldataload(and(shr(32, evals), PTR_BITMASK)), tmp, R)), + R + ) + lhs := mulmod( + mulmod(table, tmp, R), + addmod(calldataload(and(shr(16, evals), PTR_BITMASK)), sub(R, calldataload(phi)), R), + R + ) + quotient_eval_numer := addmod( + mulmod(quotient_eval_numer, y, R), + mulmod( + addmod( + 1, + sub(R, addmod(mload(0x40), mload(0x00), R)), + R + ), + addmod(lhs, sub(R, rhs), R), + R + ), + R + ) + ret0 := evals_ptr + ret1 := table + ret2 := quotient_eval_numer + } + + function lookup_evals(table, evals_ptr, quotient_eval_numer, y) -> ret0, ret1, ret2 { + // iterate through the input_tables_len + let evals := mload(evals_ptr) + // We store a boolean flag in the first LSG byte of the evals ptr to determine if we need to load in a new table or reuse the previous table. + let new_table := and(evals, BYTE_FLAG_BITMASK) + evals := shr(8, evals) + let z := and(evals, PTR_BITMASK) + evals := shr(16, evals) + quotient_eval_numer := addmod( + mulmod(quotient_eval_numer, y, R), + addmod( + mload(0x20), + mulmod( + mload(0x20), + sub(R, calldataload(z)), + R + ), + R + ), + R + ) + quotient_eval_numer := addmod( + mulmod(quotient_eval_numer, y, R), + mulmod( + mload(0x00), + addmod( + mulmod(calldataload(z), calldataload(z), R), + sub(R, calldataload(z)), + R + ), + R + ), + R + ) + // load in the lookup_table_lines from the evals_ptr + evals_ptr := add(evals_ptr, 0x20) + // Due to the fact that lookups can share the previous table, we can cache it for reuse. + let input_expression := mload(evals_ptr) + if new_table { + evals_ptr, input_expression, table := lookup_expr_evals_packed(0xc0, evals_ptr, mload(evals_ptr), 0x0) + } + // call the expression_evals function to evaluate the input_lines + let input + evals_ptr, input_expression, input := lookup_expr_evals_packed(0xc0, evals_ptr, input_expression, 0x0) + let p_input := and(shr(16, evals), PTR_BITMASK) + let p_table := and(shr(48, evals), PTR_BITMASK) + quotient_eval_numer := addmod( + mulmod(quotient_eval_numer, y, R), + mulmod( + addmod( + 1, + sub(R, addmod(mload(0x40), mload(0x0), R)), + R + ), + addmod( + mulmod( + calldataload(and(evals, PTR_BITMASK)), + mulmod( + addmod(calldataload(p_input), mload(0x80), R), + addmod(calldataload(p_table), mload(0xa0), R), + R + ), + R + ), + sub( + R, + mulmod( + calldataload(z), + mulmod(addmod(input, mload(0x80), R), addmod(table, mload(0xa0), R), R), + R + ) + ), + R + ), + R + ), + R + ) + quotient_eval_numer := addmod( + mulmod(quotient_eval_numer, y, R), + mulmod(mload(0x20), addmod(calldataload(p_input), sub(R, calldataload(p_table)), R), R), + R + ) + quotient_eval_numer := addmod( + mulmod(quotient_eval_numer, y, R), + mulmod( + addmod( + 1, + sub(R, addmod(mload(0x40), mload(0x0), R)), R), + mulmod( + addmod(calldataload(p_input), sub(R, calldataload(p_table)), R), + addmod(calldataload(p_input), sub(R, calldataload(and(shr(32, evals), PTR_BITMASK))), R), + R + ), + R + ), + R + ) + ret0 := evals_ptr + ret1 := table + ret2 := quotient_eval_numer + } + + function point_rots(pcs_computations, pcs_ptr, word_shift, x_pow_of_omega, omega) -> ret0, ret1 { + // Extract the 32 LSG bits (4 bytes) from the pcs_computations word to get the max rot + let values_max_rot := and(pcs_computations, BYTE_FLAG_BITMASK) + pcs_computations := shr(8, pcs_computations) + for { let i := 0 } lt(i, values_max_rot) { i := add(i, 1) } { + let value := and(pcs_computations, PTR_BITMASK) + if not(eq(value, 0)) { + mstore(value, x_pow_of_omega) + } + if eq(i, sub(values_max_rot, 1)) { + break + } + x_pow_of_omega := mulmod(x_pow_of_omega, omega, R) + word_shift := add(word_shift, 16) + pcs_computations := shr(16, pcs_computations) + if eq(word_shift, 256) { + word_shift := 0 + pcs_ptr := add(pcs_ptr, 0x20) + pcs_computations := mload(pcs_ptr) + } + } + ret0 := x_pow_of_omega + ret1 := pcs_ptr + } + + function coeff_computations(coeff_len_data, coeff_data) -> ret { + let coeff_len := and(coeff_len_data, BYTE_FLAG_BITMASK) + ret := shr(8, coeff_len_data) + switch coeff_len + case 0x01 { + // We only encode the points if the coeff length is greater than 1. + // Otherwise we just encode the mu_minus_point and coeff ptr. + mstore(and(shr(16, coeff_data), PTR_BITMASK), mod(mload(and(coeff_data, PTR_BITMASK)), R)) + } + default { + let coeff + let offset_aggr := mul(coeff_len, 16) + for { let i := 0 } lt(i, coeff_len) { i := add(i, 1) } { + let first := 0x01 + let offset_base := mul(i, 16) + let point_i := mload(and(shr(offset_base, coeff_data), PTR_BITMASK)) + for { let j:= 0 } lt(j, coeff_len) { j := add(j, 1) } { + if eq(j, i) { + continue + } + if first { + coeff := addmod(point_i, sub(R, mload(and(shr(mul(j, 16), coeff_data), PTR_BITMASK))), R) + first := 0 + continue + } + coeff := mulmod(coeff, addmod(point_i, sub(R, mload(and(shr(mul(j, 16), coeff_data), PTR_BITMASK))), R), R) + } + offset_base := add(offset_base, offset_aggr) + coeff := mulmod(coeff, mload(and(shr(offset_base, coeff_data), PTR_BITMASK)), R) + offset_base := add(offset_base, offset_aggr) + mstore(and(shr(offset_base, coeff_data), PTR_BITMASK), coeff) + } + } + } + + function single_rot_set(r_evals_data, ptr, num_words, zeta, quotient_eval, coeff_ptr) -> ret0, ret1 { + let coeff := mload(coeff_ptr) + let r_eval + r_eval := addmod(r_eval, mulmod(coeff, calldataload(and(r_evals_data, PTR_BITMASK)), R), R) + r_evals_data := shr(16, r_evals_data) + r_eval := mulmod(r_eval, zeta, R) + r_eval := addmod(r_eval, mulmod(coeff, quotient_eval, R), R) + for { let i := 0 } lt(i, num_words) { i := add(i, 1) } { + for { } r_evals_data { } { + let eval_group_len := and(r_evals_data, BYTE_FLAG_BITMASK) + r_evals_data := shr(8, r_evals_data) + switch eq(eval_group_len, 0x0) + case 0x0 { + for { let j := 0 } lt(j, eval_group_len) { j := add(j, 1) } { + r_eval := addmod(mulmod(r_eval, zeta, R), mulmod(coeff, calldataload(and(r_evals_data, PTR_BITMASK)), R), R) + r_evals_data := shr(16, r_evals_data) + } + } default { + for + { + let mptr := and(r_evals_data, PTR_BITMASK) + r_evals_data := shr(16, r_evals_data) + let mptr_end := and(r_evals_data, PTR_BITMASK) + } + lt(mptr_end, mptr) + { mptr := sub(mptr, 0x20) } + { + r_eval := addmod(mulmod(r_eval, zeta, R), mulmod(coeff, calldataload(mptr), R), R) + } + r_evals_data := shr(16, r_evals_data) + } + } + ptr := add(ptr, 0x20) + r_evals_data := mload(ptr) + } + ret0 := r_eval + ret1 := ptr + } + + function multi_rot_set(r_evals_data, ptr, num_words, rot_len, zeta, coeff_ptr) -> ret0, ret1 { + let r_eval := 0 + for { let i := 0 } lt(i, num_words) { i := add(i, 1) } { + for { } r_evals_data { } { + for { let j := 0 } lt(j, rot_len) { j := add(j, 0x20) } { + r_eval := addmod(r_eval, mulmod(mload(add(coeff_ptr, j)), calldataload(and(r_evals_data, PTR_BITMASK)), R), R) + r_evals_data := shr(16, r_evals_data) + } + // Only on the last index do we NOT execute this if block. + if or(r_evals_data, lt(i, sub(num_words, 1))) { + r_eval := mulmod(r_eval, zeta, R) + } + } + ptr := add(ptr, 0x20) + r_evals_data := mload(ptr) + } + ret0 := r_eval + ret1 := ptr + } + + function r_evals_computation(rot_len, r_evals_data_ptr, zeta, quotient_eval, coeff_ptr) -> ret0, ret1 { + let r_evals_data := mload(r_evals_data_ptr) + // number of words to encode the data needed for this set in the r_evals computation. + let num_words := and(r_evals_data, BYTE_FLAG_BITMASK) + r_evals_data := shr(8, r_evals_data) + switch rot_len + case 0x20 { + ret0, ret1 := single_rot_set(r_evals_data, r_evals_data_ptr, num_words, zeta, quotient_eval, coeff_ptr) + } default { + ret0, ret1 := multi_rot_set(r_evals_data, r_evals_data_ptr, num_words, rot_len, zeta, coeff_ptr) + } + } + + function pairing_input_computations_first(len, pcs_ptr, data, theta_mptr, success) -> ret { + mstore(0x00, calldataload(and(data, PTR_BITMASK))) + data := shr(16, data) + mstore(0x20, calldataload(and(data, PTR_BITMASK))) + data := shr(16, data) + for { let i := 0 } lt(i, len) { i := add(i, 0x20) } { + for { } data { } { + let ptr_loc := and(data, BYTE_FLAG_BITMASK) + data := shr(8, data) + let comm_len := and(data, BYTE_FLAG_BITMASK) + data := shr(8, data) + switch comm_len + case 0x0 { + switch ptr_loc + case 0x00 { + for + { + let mptr := and(data, PTR_BITMASK) + data := shr(16, data) + let mptr_end := and(data, PTR_BITMASK) + } + lt(mptr_end, mptr) + { mptr := sub(mptr, 0x40) } + { + success := ec_mul_acc(success, mload(add(theta_mptr, 0xA0))) + success := ec_add_acc(success, mload(mptr), mload(add(mptr, 0x20))) + } + } + case 0x01 { + for + { + let mptr := and(data, PTR_BITMASK) + data := shr(16, data) + let mptr_end := and(data, PTR_BITMASK) + } + lt(mptr_end, mptr) + { mptr := sub(mptr, 0x40) } + { + success := ec_mul_acc(success, mload(add(theta_mptr, 0xA0))) + success := ec_add_acc(success, calldataload(mptr), calldataload(add(mptr, 0x20))) + } + } + data := shr(16, data) + } default { + switch ptr_loc + case 0x00 { + success := ec_mul_acc(success, mload(add(theta_mptr, 0xA0))) + success := ec_add_acc(success, mload(and(data, PTR_BITMASK)), mload(and(shr(16,data), PTR_BITMASK))) + if eq(comm_len, 0x02) { + data := shr(32, data) + success := ec_mul_acc(success, mload(add(theta_mptr, 0xA0))) + success := ec_add_acc(success, mload(and(data, PTR_BITMASK)), mload(and(shr(16,data), PTR_BITMASK))) + } + data := shr(32, data) + } + case 0x01 { + success := ec_mul_acc(success, mload(add(theta_mptr, 0xA0))) + success := ec_add_acc(success, calldataload(and(data, PTR_BITMASK)), calldataload(and(shr(16,data), PTR_BITMASK))) + if eq(comm_len, 0x02) { + data := shr(32, data) + success := ec_mul_acc(success, mload(add(theta_mptr, 0xA0))) + success := ec_add_acc(success, calldataload(and(data, PTR_BITMASK)), calldataload(and(shr(16,data), PTR_BITMASK))) + } + data := shr(32, data) + } + // Quotient eval x and y points + case 0x02 { + success := ec_mul_acc(success, mload(add(theta_mptr, 0xA0))) + success := ec_add_acc(success, mload(add(theta_mptr, 0x260)), mload(add(theta_mptr, 0x280))) + } + } + } + pcs_ptr := add(pcs_ptr, 0x20) + data := mload(pcs_ptr) + } + ret := success + } + + function pairing_input_computations(len, pcs_ptr, data, theta_mptr, success) -> ret { + mstore(0x80, calldataload(and(data, PTR_BITMASK))) + data := shr(16, data) + mstore(0xa0, calldataload(and(data, PTR_BITMASK))) + data := shr(16, data) + for { let i := 0 } lt(i, len) { i := add(i, 0x20) } { + for { } data { } { + let ptr_loc := and(data, BYTE_FLAG_BITMASK) + data := shr(8, data) + let comm_len := and(data, BYTE_FLAG_BITMASK) + data := shr(8, data) + switch comm_len + case 0x0 { + switch ptr_loc + case 0x00 { + for + { + let mptr := and(data, PTR_BITMASK) + data := shr(16, data) + let mptr_end := and(data, PTR_BITMASK) + } + lt(mptr_end, mptr) + { mptr := sub(mptr, 0x40) } + { + success := ec_mul_tmp(success, mload(add(theta_mptr, 0xA0))) + success := ec_add_tmp(success, mload(mptr), mload(add(mptr, 0x20))) + } + } + case 0x01 { + for + { + let mptr := and(data, PTR_BITMASK) + data := shr(16, data) + let mptr_end := and(data, PTR_BITMASK) + } + lt(mptr_end, mptr) + { mptr := sub(mptr, 0x40) } + { + success := ec_mul_tmp(success, mload(add(theta_mptr, 0xA0))) + success := ec_add_tmp(success, calldataload(mptr), calldataload(add(mptr, 0x20))) + } + } + data := shr(16, data) + } default { + switch ptr_loc + case 0x00 { + success := ec_mul_tmp(success, mload(add(theta_mptr, 0xA0))) + success := ec_add_tmp(success, mload(and(data, PTR_BITMASK)), mload(and(shr(16,data), PTR_BITMASK))) + if eq(comm_len, 0x2) { + data := shr(32, data) + success := ec_mul_tmp(success, mload(add(theta_mptr, 0xA0))) + success := ec_add_tmp(success, mload(and(data, PTR_BITMASK)), mload(and(shr(16,data), PTR_BITMASK))) + } + data := shr(32, data) + } + case 0x01 { + success := ec_mul_tmp(success, mload(add(theta_mptr, 0xA0))) + success := ec_add_tmp(success, calldataload(and(data, PTR_BITMASK)), calldataload(and(shr(16,data), PTR_BITMASK))) + if eq(comm_len, 0x2) { + data := shr(32, data) + success := ec_mul_tmp(success, mload(add(theta_mptr, 0xA0))) + success := ec_add_tmp(success, calldataload(and(data, PTR_BITMASK)), calldataload(and(shr(16,data), PTR_BITMASK))) + } + data := shr(32, data) + } + // Quotient eval x and y points + case 0x02 { + success := ec_mul_tmp(success, mload(add(theta_mptr, 0xA0))) + success := ec_add_tmp(success, mload(add(theta_mptr, 0x260)), mload(add(theta_mptr, 0x280))) + } + } + } + pcs_ptr := add(pcs_ptr, 0x20) + data := mload(pcs_ptr) + } + ret := success + } + + // Initialize success as true + let success := true + // Initialize vk_mptr as 0x0 on the stack + let vk_mptr := 0x0 + // Initialize theta_mptr as 0x0 on the stack + let theta_mptr := 0x0 + { + // Load in the vk_digest, vk_mptr and vk_len and the rest of the constants needed for the + // challenge data generation process. + extcodecopy(vk, 0x0, 0x00, 0x240) + // Set the vk_mptr + vk_mptr := mload(0x20) + let vk_len := mload(0x40) + + let instance_cptr := mload({{ vk_const_offsets["instance_cptr"]|hex() }}) + + // Check valid length of proof + success := and(success, eq(sub(instance_cptr, 0xa4), calldataload(PROOF_LEN_CPTR))) + + // Check valid length of instances + let num_instances := mload({{ vk_const_offsets["num_instances"]|hex() }}) + success := and(success, eq(num_instances, calldataload(sub(instance_cptr,0x20)))) + + + // Read instances and witness commitments and generate challenges + let hash_mptr := 0x20 + + let proof_cptr := PROOF_CPTR + let challenge_mptr := add(vk_mptr, vk_len) // challenge_mptr is at the end of vk in memory + // Set the theta_mptr (vk_mptr + vk_len + challenges_length) + theta_mptr := add(challenge_mptr, mload({{ vk_const_offsets["challenges_offset"]|hex() }})) + + // Move the challenge length data to the free static memory location for the challenge data generation process. + let num_words, challenge_len_ptr, challenge_len_data := reposition_challenge_len_data() + + let num_evals := mul(0x20, mload({{ vk_const_offsets["num_evals"]|hex() }})) + + for + { let instance_cptr_end := add(instance_cptr, mul(0x20, num_instances)) } + lt(instance_cptr, instance_cptr_end) + {} + { + let instance := calldataload(instance_cptr) + success := and(success, lt(instance, R)) + mstore(hash_mptr, instance) + instance_cptr := add(instance_cptr, 0x20) + hash_mptr := add(hash_mptr, 0x20) + } + + for { let i := 0 } lt(i, num_words) { i := add(i, 1) } { + challenge_len_ptr := add(challenge_len_ptr, 0x20) + for { } challenge_len_data { } { + // add proof_cpt to num advices len + let proof_cptr_end := add(proof_cptr, and(challenge_len_data, PTR_BITMASK)) + challenge_len_data := shr(16, challenge_len_data) + // Phase loop + for { } lt(proof_cptr, proof_cptr_end) { } { + success, proof_cptr, hash_mptr := read_ec_point(success, proof_cptr, hash_mptr) + } + // Generate challenges + challenge_mptr, hash_mptr := squeeze_challenge(challenge_mptr, hash_mptr) + + // Continue squeezing challenges based on num_challenges + let num_challenges := and(challenge_len_data, BYTE_FLAG_BITMASK) + challenge_len_data := shr(8, challenge_len_data) + for { let c := 1 } lt(c, num_challenges) { c := add(c, 1) } { + challenge_mptr := squeeze_challenge_cont(challenge_mptr) + } + } + challenge_len_data := mload(challenge_len_ptr) + } + + // Read evaluations + for + { let proof_cptr_end := add(proof_cptr, num_evals) } // num_evals + lt(proof_cptr, proof_cptr_end) + {} + { + let eval := calldataload(proof_cptr) + success := and(success, lt(eval, R)) + mstore(hash_mptr, eval) + proof_cptr := add(proof_cptr, 0x20) + hash_mptr := add(hash_mptr, 0x20) + } + + // Read batch opening proof and generate challenges + {%- match scheme %} + {%- when Bdfg21 %} + challenge_mptr, hash_mptr := squeeze_challenge(challenge_mptr, hash_mptr) // zeta + challenge_mptr := squeeze_challenge_cont(challenge_mptr) // nu + + success, proof_cptr, hash_mptr := read_ec_point(success, proof_cptr, hash_mptr) // W + + challenge_mptr, hash_mptr := squeeze_challenge(challenge_mptr, hash_mptr) // mu + + success, proof_cptr, hash_mptr := read_ec_point(success, proof_cptr, hash_mptr) // W' + {%- when Gwc19 %} + // TODO + {%- endmatch %} + + // Copy full vk into memory (some parts were overwritten during the challenge generation) + extcodecopy(vk, vk_mptr, 0x00, vk_len) + + // Read accumulator from instances + if mload(add(vk_mptr, {{ vk_const_offsets["has_accumulator"]|hex() }})) { + let num_limbs := mload(add(vk_mptr, {{ vk_const_offsets["num_acc_limbs"]|hex() }})) + let num_limb_bits := mload(add(vk_mptr, {{ vk_const_offsets["num_acc_limb_bits"]|hex() }})) + + let cptr := add(mload(add(vk_mptr, {{ vk_const_offsets["instance_cptr"]|hex() }})), mul(mload(add(vk_mptr, {{ vk_const_offsets["acc_offset"]|hex() }})), 0x20)) + let lhs_y_off := mul(num_limbs, 0x20) + let rhs_x_off := mul(lhs_y_off, 2) + let rhs_y_off := mul(lhs_y_off, 3) + let lhs_x := calldataload(cptr) + let lhs_y := calldataload(add(cptr, lhs_y_off)) + let rhs_x := calldataload(add(cptr, rhs_x_off)) + let rhs_y := calldataload(add(cptr, rhs_y_off)) + for + { + let cptr_end := add(cptr, mul(0x20, num_limbs)) + let shift := num_limb_bits + } + lt(cptr, cptr_end) + {} + { + cptr := add(cptr, 0x20) + lhs_x := add(lhs_x, shl(shift, calldataload(cptr))) + lhs_y := add(lhs_y, shl(shift, calldataload(add(cptr, lhs_y_off)))) + rhs_x := add(rhs_x, shl(shift, calldataload(add(cptr, rhs_x_off)))) + rhs_y := add(rhs_y, shl(shift, calldataload(add(cptr, rhs_y_off)))) + shift := add(shift, num_limb_bits) + } + + success := and(success, eq(mulmod(lhs_y, lhs_y, Q), addmod(mulmod(lhs_x, mulmod(lhs_x, lhs_x, Q), Q), 3, Q))) + success := and(success, eq(mulmod(rhs_y, rhs_y, Q), addmod(mulmod(rhs_x, mulmod(rhs_x, rhs_x, Q), Q), 3, Q))) + + mstore(add(theta_mptr, 0x100), lhs_x) + mstore(add(theta_mptr, 0x120), lhs_y) + mstore(add(theta_mptr, 0x140), rhs_x) + mstore(add(theta_mptr, 0x160), rhs_y) + } + + } + + // Revert earlier if anything from calldata is invalid + if iszero(success) { + revert(0, 0) + } + + + // Compute lagrange evaluations and instance evaluation + { + let k := mload(add(vk_mptr, {{ vk_const_offsets["k"]|hex() }})) + let x := mload(add(theta_mptr, 0x80)) + let x_n := x + for + { let idx := 0 } + lt(idx, k) + { idx := add(idx, 1) } + { + x_n := mulmod(x_n, x_n, R) + } + + let omega := mload(add(vk_mptr, {{ vk_const_offsets["omega"]|hex() }})) + let x_n_mptr := add(theta_mptr, 0x180) + let mptr := x_n_mptr + let num_instances := mload(add(vk_mptr, {{ vk_const_offsets["num_instances"]|hex() }})) + let num_neg_lagranges := mload(add(vk_mptr, {{ vk_const_offsets["num_neg_lagranges"]|hex() }})) + let mptr_end := add(mptr, mul(0x20, add(num_instances, num_neg_lagranges))) + if iszero(num_instances) { + mptr_end := add(mptr_end, 0x20) + } + for + { let pow_of_omega := mload(add(vk_mptr, {{ vk_const_offsets["omega_inv_to_l"]|hex() }})) } + lt(mptr, mptr_end) + { mptr := add(mptr, 0x20) } + { + mstore(mptr, addmod(x, sub(R, pow_of_omega),R)) + pow_of_omega := mulmod(pow_of_omega, omega,R) + } + let x_n_minus_1 := addmod(x_n, sub(R, 1),R) + mstore(mptr_end, x_n_minus_1) + success := batch_invert(success, x_n_mptr, add(mptr_end, 0x20)) + + mptr := x_n_mptr + let l_i_common := mulmod(x_n_minus_1, mload(add(vk_mptr, {{ vk_const_offsets["n_inv"]|hex() }})),R) + for + { let pow_of_omega := mload(add(vk_mptr, {{ vk_const_offsets["omega_inv_to_l"]|hex() }})) } + lt(mptr, mptr_end) + { mptr := add(mptr, 0x20) } + { + mstore(mptr, mulmod(l_i_common, mulmod(mload(mptr), pow_of_omega,R),R)) + pow_of_omega := mulmod(pow_of_omega, omega,R) + } + + let l_blind := mload(add(x_n_mptr, 0x20)) + let l_i_cptr := add(x_n_mptr, 0x40) + for + { let l_i_cptr_end := add(x_n_mptr, mul(0x20, num_neg_lagranges)) } + lt(l_i_cptr, l_i_cptr_end) + { l_i_cptr := add(l_i_cptr, 0x20) } + { + l_blind := addmod(l_blind, mload(l_i_cptr),R) + } + + let instance_eval := 0 + for + { + let instance_cptr := mload(add(vk_mptr, {{ vk_const_offsets["instance_cptr"]|hex() }})) + let instance_cptr_end := add(instance_cptr, mul(0x20, num_instances)) + } + lt(instance_cptr, instance_cptr_end) + { + instance_cptr := add(instance_cptr, 0x20) + l_i_cptr := add(l_i_cptr, 0x20) + } + { + instance_eval := addmod(instance_eval, mulmod(mload(l_i_cptr), calldataload(instance_cptr),R),R) + } + + let x_n_minus_1_inv := mload(mptr_end) + let l_last := mload(x_n_mptr) + let l_0 := mload(add(x_n_mptr, mul(0x20, num_neg_lagranges))) + + mstore(x_n_mptr, x_n) + mstore(add(theta_mptr, 0x1a0), x_n_minus_1_inv) + mstore(add(theta_mptr, 0x1c0), l_last) + mstore(add(theta_mptr, 0x1e0), l_blind) + mstore(add(theta_mptr, 0x200), l_0) + mstore(add(theta_mptr, 0x220), instance_eval) + } + + + // Compute quotient evaluation + { + let quotient_eval_numer + let y := mload(add(theta_mptr, 0x60)) + { + // Gate computations / expression evaluations. + let computations_ptr, computations_len := soa_layout_metadata({{ vk_const_offsets["gate_computations_len_offset"]|hex() }}, vk_mptr) + let expressions_word := mload(computations_ptr) + let last_idx + // Load in the total number of code blocks from the vk constants, right after the number challenges + for { let code_block := 0 } lt(code_block, computations_len) { code_block := add(code_block, 0x20) } { + // call expression_evals to evaluate the expressions in the code block + computations_ptr, expressions_word, last_idx := expression_evals_packed(0x00, computations_ptr, expressions_word) + + // at the end of each code block we update `quotient_eval_numer` + // If this is the first code block, we set `quotient_eval_numer` to the last var in the code block + switch eq(code_block, 0) + case 1 { + quotient_eval_numer := mload(last_idx) + } + case 0 { + // Otherwise we add the last var in the code block to `quotient_eval_numer` mod r + quotient_eval_numer := addmod(mulmod(quotient_eval_numer, y, R), mload(last_idx), R) + } + } + } + { + // Permutation computations + let permutation_z_evals_ptr := add(vk_mptr, mload(add(vk_mptr, {{ vk_const_offsets["permutation_computations_len_offset"]|hex() }}))) + let permutation_z_evals := mload(permutation_z_evals_ptr) + // Last idx of permutation evals == permutation_evals.len() - 1 + let last_idx := and(permutation_z_evals, BYTE_FLAG_BITMASK) + permutation_z_evals := shr(8, permutation_z_evals) + // Num of words scaled by 0x20 that take up each permutation eval (permutation_z_eval + column evals) + // first and second LSG bytes contain the number of words for all of the permutation evals except the last. + // The third and fourth LSG bytes contain the number of words for the last permutation eval + let num_words := and(permutation_z_evals, 0xFFFFFFFF) + permutation_z_evals := shr(32, permutation_z_evals) + permutation_z_evals_ptr := add(permutation_z_evals_ptr, 0x20) + permutation_z_evals := mload(permutation_z_evals_ptr) + let l_0 := mload(add(theta_mptr, 0x200)) + { + // Get the first and second LSG bytes from the first permutation_z_evals word to load in (z, _, _) + let eval := addmod(l_0, sub(R, mulmod(l_0, calldataload(and(permutation_z_evals, PTR_BITMASK)), R)), R) + quotient_eval_numer := addmod(mulmod(quotient_eval_numer, y, R), eval, R) + } + + { + // Load in the last permutation_z_evals word + let perm_z_last_ptr := add(mul(last_idx, and(num_words, PTR_BITMASK)), permutation_z_evals_ptr) + let perm_z_last := calldataload(and(mload(perm_z_last_ptr), PTR_BITMASK)) + quotient_eval_numer := addmod( + mulmod(quotient_eval_numer, y, R), + mulmod( + mload(add(theta_mptr, 0x1C0)), + addmod( + mulmod(perm_z_last, perm_z_last, R), + sub(R, perm_z_last), + R + ), + R + ), + R + ) + + mstore(0x00, mulmod(mload(add(theta_mptr, 0x20)), mload(add(theta_mptr, 0x80)), R)) + + quotient_eval_numer := z_evals( + permutation_z_evals, + num_words, + perm_z_last_ptr, + permutation_z_evals_ptr, + theta_mptr, + l_0, + y, + quotient_eval_numer + ) + } + } + { + // MV lookup computations + mstore(0x0, mload(add(theta_mptr, 0x1C0))) // l_last + mstore(0x20, mload(add(theta_mptr, 0x200))) // l_0 + mstore(0x40, mload(add(theta_mptr, 0x1E0))) // l_blind + mstore(0x60, mload(theta_mptr)) // theta + mstore(0x80, mload(add(theta_mptr, 0x20))) // beta + let evals_ptr, meta_data := soa_layout_metadata({{ + vk_const_offsets["lookup_computations_len_offset"]|hex() + }}, vk_mptr) + // lookup meta data contains 32 byte flags for indicating if we need to do a lookup table lines + // expression evaluation or we can use the previous one cached in the table var. + if meta_data { + let table + let end_ptr := and(meta_data, PTR_BITMASK) + let mv := and(shr(16, meta_data), BYTE_FLAG_BITMASK) + switch mv + case 0x0 { + for { } lt(evals_ptr, end_ptr) { } { + evals_ptr, table, quotient_eval_numer := mv_lookup_evals(table, evals_ptr, quotient_eval_numer, y) + } + } + case 0x1 { + mstore(0xA0, mload(add(theta_mptr, 0x40))) // gamma + for { } lt(evals_ptr, end_ptr) { } { + evals_ptr, table, quotient_eval_numer := lookup_evals(table, evals_ptr, quotient_eval_numer, y) + } + } + } + } + + pop(y) + + mstore(add(theta_mptr, 0x240), mulmod(quotient_eval_numer, mload(add(theta_mptr, 0x1a0)), R)) + + } + + // Compute quotient commitment + { + mstore(0x00, calldataload(mload(add(vk_mptr, {{ vk_const_offsets["last_quotient_x_cptr"]|hex() }})))) + mstore(0x20, calldataload(add(mload(add(vk_mptr, {{ vk_const_offsets["last_quotient_x_cptr"]|hex() }})), 0x20))) + let x_n := mload(add(theta_mptr, 0x180)) + for + { + let cptr := sub(mload(add(vk_mptr, {{ vk_const_offsets["last_quotient_x_cptr"]|hex() }})), 0x40) + let cptr_end := sub(mload(add(vk_mptr, {{ vk_const_offsets["first_quotient_x_cptr"]|hex() }})), 0x40) + } + lt(cptr_end, cptr) + {} + { + success := ec_mul_acc(success, x_n) + success := ec_add_acc(success, calldataload(cptr), calldataload(add(cptr, 0x20))) + cptr := sub(cptr, 0x40) + } + mstore(add(theta_mptr, 0x260), mload(0x00)) + mstore(add(theta_mptr, 0x280), mload(0x20)) + } + + // Compute pairing lhs and rhs + { + // point_computations + let pcs_ptr := add(vk_mptr, mload(add(vk_mptr, {{ vk_const_offsets["pcs_computations_len_offset"]|hex() }}))) + { + let point_computations := mload(pcs_ptr) + let x := mload(add(theta_mptr, 0x80)) + let omega := mload(add(vk_mptr, {{ vk_const_offsets["omega"]|hex() }})) + let omega_inv := mload(add(vk_mptr, {{ vk_const_offsets["omega_inv"]|hex() }})) + let x_pow_of_omega := mulmod(x, omega, R) + x_pow_of_omega, pcs_ptr := point_rots(point_computations, pcs_ptr, 8, x_pow_of_omega, omega) + pcs_ptr := add(pcs_ptr, 0x20) + point_computations := mload(pcs_ptr) + // Store interm point + mstore(and(point_computations, PTR_BITMASK), x) + x_pow_of_omega := mulmod(x, omega_inv, R) + point_computations := shr(16, point_computations) + x_pow_of_omega, pcs_ptr := point_rots(point_computations, pcs_ptr, 24, x_pow_of_omega, omega_inv) + pcs_ptr := add(pcs_ptr, 0x20) + pop(x_pow_of_omega) + } + + // vanishing_computations + { + let mu := mload(add(theta_mptr, 0xE0)) + let vanishing_computations := mload(pcs_ptr) + mstore(0x20, 1) + for + { + let mptr := and(vanishing_computations, PTR_BITMASK) + vanishing_computations := shr(16, vanishing_computations) + let mptr_end := and(vanishing_computations, PTR_BITMASK) + vanishing_computations := shr(16, vanishing_computations) + let point_mptr := and(vanishing_computations, PTR_BITMASK) + } + lt(mptr, mptr_end) + { + mptr := add(mptr, 0x20) + point_mptr := add(point_mptr, 0x20) + } + { + mstore(mptr, addmod(mu, sub(R, mload(point_mptr)), R)) + } + pop(mu) + vanishing_computations := shr(16, vanishing_computations) + let num_words := and(vanishing_computations, BYTE_FLAG_BITMASK) + vanishing_computations := shr(8, vanishing_computations) + let s := mload(and(vanishing_computations, PTR_BITMASK)) + vanishing_computations := shr(16, vanishing_computations) + for { let i } lt(i, num_words) { i := add(i, 1) } { + for { } vanishing_computations { } { + s := mulmod(s, mload(and(vanishing_computations, PTR_BITMASK)), R) + vanishing_computations := shr(16, vanishing_computations) + } + pcs_ptr := add(pcs_ptr, 0x20) + vanishing_computations := mload(pcs_ptr) + } + let diff_ptr := and(vanishing_computations, PTR_BITMASK) + mstore(diff_ptr, s) + vanishing_computations := shr(16, vanishing_computations) + let diff + let sets_len := and(vanishing_computations, PTR_BITMASK) + pcs_ptr := add(pcs_ptr, 0x20) + vanishing_computations := mload(pcs_ptr) + for { let i := 0 } lt(i, sets_len) { i := add(i, 1) } { + diff := mload(and(vanishing_computations, PTR_BITMASK)) + vanishing_computations := shr(16, vanishing_computations) + for { } vanishing_computations { } { + diff := mulmod(diff, mload(and(vanishing_computations, PTR_BITMASK)), R) + vanishing_computations := shr(16, vanishing_computations) + } + diff_ptr := add(0x20, diff_ptr) + mstore(diff_ptr, diff) + if eq(i, 0) { + mstore(0x00, diff) + } + pcs_ptr := add(pcs_ptr, 0x20) + vanishing_computations := mload(pcs_ptr) + } + } + // coeff_computations + { + let coeff_len_data := mload(pcs_ptr) + // Load in the least significant byte of the `coeff_len_data` word to get the total number of words we will need to load in + // that contains the packed Vec. + let end_ptr_packed_lens := add(pcs_ptr, mul(0x20, and(coeff_len_data, BYTE_FLAG_BITMASK))) + coeff_len_data := shr(8, coeff_len_data) + let i := pcs_ptr + pcs_ptr := end_ptr_packed_lens + for { } lt(i, end_ptr_packed_lens) { i := add(i, 0x20) } { + for { } coeff_len_data { } { + coeff_len_data := coeff_computations(coeff_len_data, mload(pcs_ptr)) + pcs_ptr := add(pcs_ptr, 0x20) + } + coeff_len_data := mload(add(i, 0x20)) + } + } + // normalized_coeff_computations + { + let norm_coeff_data := mload(pcs_ptr) + success := batch_invert(success, 0, and(norm_coeff_data, PTR_BITMASK)) + norm_coeff_data := shr(16, norm_coeff_data) + let diff_0_inv := mload(0x00) + let mptr0 := and(norm_coeff_data, PTR_BITMASK) + norm_coeff_data := shr(16, norm_coeff_data) + mstore(mptr0, diff_0_inv) + for + { + let mptr := add(mptr0, 0x20) + let mptr_end := add(mptr0, and(norm_coeff_data, PTR_BITMASK)) + } + lt(mptr, mptr_end) + { mptr := add(mptr, 0x20) } + { + mstore(mptr, mulmod(mload(mptr), diff_0_inv, R)) + } + pcs_ptr := add(pcs_ptr, 0x20) + } + let coeff_ptr := 0x20 + // r_evals_computations + { + let r_evals_meta_data := mload(pcs_ptr) + let end_ptr_packed_lens := add(pcs_ptr, mul(0x20, and(r_evals_meta_data, BYTE_FLAG_BITMASK))) + r_evals_meta_data := shr(8, r_evals_meta_data) + let set_coeff := and(r_evals_meta_data, PTR_BITMASK) + r_evals_meta_data := shr(16, r_evals_meta_data) + let r_eval_mptr := and(r_evals_meta_data, PTR_BITMASK) + r_evals_meta_data := shr(16, r_evals_meta_data) + let i := pcs_ptr + pcs_ptr := end_ptr_packed_lens + let zeta := mload(add(theta_mptr, 0xA0)) + let quotient_eval := mload(add(theta_mptr, 0x240)) + let not_first + let r_eval + for { } lt(i, end_ptr_packed_lens) { i := add(i, 0x20) } { + for { } r_evals_meta_data { } { + r_eval, pcs_ptr := r_evals_computation(and(r_evals_meta_data, BYTE_FLAG_BITMASK), pcs_ptr, zeta, quotient_eval, coeff_ptr) + coeff_ptr := add(coeff_ptr, and(r_evals_meta_data, BYTE_FLAG_BITMASK)) + r_evals_meta_data := shr(8, r_evals_meta_data) + if not_first { + r_eval := mulmod(r_eval, mload(set_coeff), R) + set_coeff := add(set_coeff, 0x20) + } + not_first := 1 + mstore(r_eval_mptr, r_eval) + r_eval_mptr := add(r_eval_mptr, 0x20) + } + r_evals_meta_data := mload(add(i, 0x20)) + } + } + // coeff_sums_computation + { + let coeff_sums_data := mload(pcs_ptr) + let end_ptr_packed_lens := add(pcs_ptr, mul(0x20, and(coeff_sums_data, BYTE_FLAG_BITMASK))) + coeff_sums_data := shr(8, coeff_sums_data) + coeff_ptr := 0x20 + let i := pcs_ptr + pcs_ptr := end_ptr_packed_lens + for { } lt(i, end_ptr_packed_lens) { i := add(i, 0x20) } { + for { } coeff_sums_data { } { + let sum := mload(coeff_ptr) + let len := and(coeff_sums_data, BYTE_FLAG_BITMASK) + coeff_sums_data := shr(8, coeff_sums_data) + for { let j := 0x20 } lt(j, len) { j := add(j, 0x20) } { + sum := addmod(sum, mload(add(coeff_ptr, j)), R) + } + coeff_ptr := add(coeff_ptr, len) + mstore(and(coeff_sums_data, PTR_BITMASK), sum) + coeff_sums_data := shr(16, coeff_sums_data) + } + coeff_sums_data := mload(add(i, 0x20)) + } + + } + // r_eval_computation + { + let r_eval_data := mload(pcs_ptr) + let mptr_end := and(r_eval_data, PTR_BITMASK) + for + { + let mptr := 0x00 + r_eval_data := shr(16, r_eval_data) + let sum_mptr := and(r_eval_data, PTR_BITMASK) + } + lt(mptr, mptr_end) + { + mptr := add(mptr, 0x20) + sum_mptr := add(sum_mptr, 0x20) + } + { + mstore(mptr, mload(sum_mptr)) + } + r_eval_data := shr(16, r_eval_data) + success := batch_invert(success, 0, mptr_end) + let r_eval_ptr := and(r_eval_data, PTR_BITMASK) + let r_eval := mulmod(mload(sub(mptr_end, 0x20)), mload(r_eval_ptr), R) + r_eval_data := shr(16, r_eval_data) + for + { + let sum_inv_mptr := sub(mptr_end, 0x40) + let sum_inv_mptr_end := mptr_end + let r_eval_mptr := sub(r_eval_ptr, 0x20) + } + lt(sum_inv_mptr, sum_inv_mptr_end) + { + sum_inv_mptr := sub(sum_inv_mptr, 0x20) + r_eval_mptr := sub(r_eval_mptr, 0x20) + } + { + r_eval := mulmod(r_eval, mload(add(theta_mptr, 0xC0)), R) + r_eval := addmod(r_eval, mulmod(mload(sum_inv_mptr), mload(r_eval_mptr), R), R) + } + mstore(add(theta_mptr, 0x2A0), r_eval) + pcs_ptr := add(pcs_ptr, 0x20) + } + // pairing_input_computations + let nu := mload(add(theta_mptr, 0xC0)) + { + let pairing_input_meta_data := mload(pcs_ptr) + let end_ptr_packed_lens := add(pcs_ptr, mul(0x20, and(pairing_input_meta_data, BYTE_FLAG_BITMASK))) + pairing_input_meta_data := shr(8, pairing_input_meta_data) + let set_coeff := and(pairing_input_meta_data, PTR_BITMASK) + pairing_input_meta_data := shr(16, pairing_input_meta_data) + let ec_points_cptr_packed := and(pairing_input_meta_data, 0xFFFFFFFFFFFFFFFFFFFF) + pairing_input_meta_data := shr(80, pairing_input_meta_data) + let i := pcs_ptr + pcs_ptr := end_ptr_packed_lens + let first := 1 + for { } lt(i, end_ptr_packed_lens) { i := add(i, 0x20) } { + for { } pairing_input_meta_data { } { + let len := and(pairing_input_meta_data, BYTE_FLAG_BITMASK) + pairing_input_meta_data := shr(8, pairing_input_meta_data) + if first { + first := 0 + success := pairing_input_computations_first(len, pcs_ptr, mload(pcs_ptr), theta_mptr, success) + pcs_ptr := add(pcs_ptr, len) + continue + } + success := pairing_input_computations(len, pcs_ptr, mload(pcs_ptr), theta_mptr, success) + pcs_ptr := add(pcs_ptr, len) + success := ec_mul_tmp(success, mulmod(nu, mload(set_coeff), R)) + set_coeff := add(set_coeff, 0x20) + success := ec_add_acc(success, mload(0x80), mload(0xa0)) + // execute this if statement if not the last set + if or(0x1, lt(i, sub(end_ptr_packed_lens, 0x20))) { + nu := mulmod(nu, mload(add(theta_mptr, 0xC0)), R) + } + } + pairing_input_meta_data := mload(add(i, 0x20)) + } + mstore(0x80, mload(add(vk_mptr, {{ vk_const_offsets["g1_x"]|hex() }}))) + mstore(0xa0, mload(add(vk_mptr, {{ vk_const_offsets["g1_y"]|hex() }}))) + success := ec_mul_tmp(success, sub(R, mload(add(theta_mptr, 0x2A0)))) + success := ec_add_acc(success, mload(0x80), mload(0xa0)) + mstore(0x80, calldataload(and(ec_points_cptr_packed, PTR_BITMASK))) + ec_points_cptr_packed := shr(16, ec_points_cptr_packed) + mstore(0xa0, calldataload(and(ec_points_cptr_packed, PTR_BITMASK))) + ec_points_cptr_packed := shr(16, ec_points_cptr_packed) + success := ec_mul_tmp(success, sub(R, mload(and(ec_points_cptr_packed, PTR_BITMASK)))) + ec_points_cptr_packed := shr(16, ec_points_cptr_packed) + success := ec_add_acc(success, mload(0x80), mload(0xa0)) + let w_prime_x := calldataload(and(ec_points_cptr_packed, PTR_BITMASK)) + ec_points_cptr_packed := shr(16, ec_points_cptr_packed) + let w_prime_y := calldataload(and(ec_points_cptr_packed, PTR_BITMASK)) + mstore(0x80, w_prime_x) + mstore(0xa0, w_prime_y) + success := ec_mul_tmp(success, mload(add(theta_mptr, 0xE0))) + success := ec_add_acc(success, mload(0x80), mload(0xa0)) + mstore(add(theta_mptr, 0x2C0), mload(0x00)) + mstore(add(theta_mptr, 0x2E0), mload(0x20)) + mstore(add(theta_mptr, 0x300), w_prime_x) + mstore(add(theta_mptr, 0x320), w_prime_y) + } + } + + // Random linear combine with accumulator + if mload(add(vk_mptr, {{ vk_const_offsets["has_accumulator"]|hex() }})) { + mstore(0x00, mload(add(theta_mptr, 0x100))) + mstore(0x20, mload(add(theta_mptr, 0x120))) + mstore(0x40, mload(add(theta_mptr, 0x140))) + mstore(0x60, mload(add(theta_mptr, 0x160))) + mstore(0x80, mload(add(theta_mptr, 0x2c0))) + mstore(0xa0, mload(add(theta_mptr, 0x2e0))) + mstore(0xc0, mload(add(theta_mptr, 0x300))) + mstore(0xe0, mload(add(theta_mptr, 0x320))) + let challenge := mod(keccak256(0x00, 0x100), R) + + // [pairing_lhs] += challenge * [acc_lhs] + success := ec_mul_acc(success, challenge) + success := ec_add_acc(success, mload(add(theta_mptr, 0x2c0)), mload(add(theta_mptr, 0x2e0))) + mstore(add(theta_mptr, 0x2c0), mload(0x00)) + mstore(add(theta_mptr, 0x2e0), mload(0x20)) + + // [pairing_rhs] += challenge * [acc_rhs] + mstore(0x00, mload(add(theta_mptr, 0x140))) + mstore(0x20, mload(add(theta_mptr, 0x160))) + success := ec_mul_acc(success, challenge) + success := ec_add_acc(success, mload(add(theta_mptr, 0x300)), mload(add(theta_mptr, 0x320))) + mstore(add(theta_mptr, 0x300), mload(0x00)) + mstore(add(theta_mptr, 0x320), mload(0x20)) + } + + // Perform pairing + success := ec_pairing( + success, + vk_mptr, + mload(add(theta_mptr, 0x2c0)), + mload(add(theta_mptr, 0x2e0)), + mload(add(theta_mptr, 0x300)), + mload(add(theta_mptr, 0x320)) + ) + + // Revert if anything fails + if iszero(success) { + revert(0x00, 0x00) + } + + // Return 1 as result if everything succeeds + mstore(0x00, 1) + return(0x00, 0x20) + } + } +} diff --git a/templates/Halo2VerifyingArtifact.sol b/templates/Halo2VerifyingArtifact.sol new file mode 100644 index 0000000..f8c81f1 --- /dev/null +++ b/templates/Halo2VerifyingArtifact.sol @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.8.0; + +contract Halo2VerifyingArtifact { + constructor() { + assembly { + {%- for (name, chunk) in constants %} + mstore({{ (32 * loop.index0)|hex_padded(4) }}, {{ chunk|hex_padded(64) }}) // {{ name }} + {%- endfor %} + {%- let offset_0 = constants.len() %} + {%- for (x, y) in fixed_comms %} + mstore({{ (32 * (offset_0 + 2 * loop.index0))|hex_padded(4) }}, {{ x|hex_padded(64) }}) // fixed_comms[{{ loop.index0 }}].x + mstore({{ (32 * (offset_0 + 2 * loop.index0 + 1))|hex_padded(4) }}, {{ y|hex_padded(64) }}) // fixed_comms[{{ loop.index0 }}].y + {%- endfor %} + {%- let offset_1 = offset_0 + 2 * fixed_comms.len() %} + {%- for (x, y) in permutation_comms %} + mstore({{ (32 * (offset_1 + 2 * loop.index0))|hex_padded(4) }}, {{ x|hex_padded(64) }}) // permutation_comms[{{ loop.index0 }}].x + mstore({{ (32 * (offset_1 + 2 * loop.index0 + 1))|hex_padded(4) }}, {{ y|hex_padded(64) }}) // permutation_comms[{{ loop.index0 }}].y + {%- endfor %} + {%- let offset_2 = offset_1 + 2 * permutation_comms.len() %} + {%- for const in const_expressions %} + mstore({{ (32 * (offset_2 + loop.index0))|hex_padded(4) }}, {{ const|hex_padded(64) }}) // const_expressions[{{ loop.index0 }}] + {%- endfor %} + {%- let offset_4 = offset_2 + const_expressions.len() %} + mstore({{ (32 * offset_4)|hex_padded(4) }}, {{ (32 * gate_computations.length)|hex_padded(64) }}) // gate_computations length + {%- let offset_5 = offset_4 + 1 %} + {%- for packed_expression_word in gate_computations.packed_expression_words %} + {%- let offset = offset_5 + loop.index0 %} + mstore({{ (32 * offset)|hex_padded(4) }}, {{ packed_expression_word|hex_padded(64) }}) // packed_expression_word [{{ loop.index0 }}] + {%- endfor %} + {%- let offset_6 = offset_4 + gate_computations.len() %} + mstore({{ (32 * offset_6)|hex_padded(4) }}, {{ permutation_computations.permutation_meta_data|hex_padded(64) }}) // permutation_meta_data + {%- for word in permutation_computations.permutation_data %} + {%- let offset = offset_6 + 1 + loop.index0 %} + mstore({{ (32 * offset)|hex_padded(4) }}, {{ word|hex_padded(64) }}) // permutation_data [{{ loop.index0 }}] + {%- endfor %} + {%- let offset_7 = offset_6 + permutation_computations.len() %} + mstore({{ (32 * offset_7)|hex_padded(4) }}, {{ lookup_computations.meta_data|hex_padded(64) }}) // meta_data of lookup_computations + {%- let base_offset = offset_7 + 1 %} + {%- for lookup in lookup_computations.lookups %} + {%- let offset = base_offset + lookup.acc %} + mstore({{ (32 * offset)|hex_padded(4) }}, {{ lookup.evals|hex_padded(64) }}) // lookup_evals[{{ loop.index0 }}] + {%- for table_line in lookup.table_lines %} + mstore({{ (32 * (offset + 1 + loop.index0))|hex_padded(4) }}, {{ table_line|hex_padded(64) }}) // lookup_table_line [{{ loop.index0 }}] + {%- endfor %} + {%- for input in lookup.inputs %} + {%- let offset = offset + 1 + lookup.table_lines.len() + input.acc %} + {%- for expression in input.expression %} + mstore({{ (32 * (offset + loop.index0))|hex_padded(4) }}, {{ expression|hex_padded(64) }}) // input_expression [{{ loop.index0 }}] + {%- endfor %} + {%- endfor %} + {%- endfor %} + {%- let offset_8 = offset_7 + lookup_computations.len() %} + {%- for point_word in pcs_computations.point_computations %} + mstore({{ (32 * (offset_8 + loop.index0))|hex_padded(4) }}, {{ point_word|hex_padded(64) }}) // point_computations[{{ loop.index0 }}] + {%- endfor %} + {%- let offset_9 = offset_8 + pcs_computations.point_computations.len() %} + {%- for vanishing_word in pcs_computations.vanishing_computations %} + mstore({{ (32 * (offset_9 + loop.index0))|hex_padded(4) }}, {{ vanishing_word|hex_padded(64) }}) // vanishing_computations[{{ loop.index0 }}] + {%- endfor %} + {%- let offset_10 = offset_9 + pcs_computations.vanishing_computations.len() %} + {%- for coeff_word in pcs_computations.coeff_computations %} + mstore({{ (32 * (offset_10 + loop.index0))|hex_padded(4) }}, {{ coeff_word|hex_padded(64) }}) // coeff_computations[{{ loop.index0 }}] + {%- endfor %} + {%- let offset_11 = offset_10 + pcs_computations.coeff_computations.len() %} + mstore({{ (32 * offset_11)|hex_padded(4) }}, {{ pcs_computations.normalized_coeff_computations|hex_padded(64) }}) // normalized_coeff_computations + {%- let offset_12 = offset_11 + 1 %} + {%- for r_eval_word in pcs_computations.r_evals_computations %} + mstore({{ (32 * (offset_12 + loop.index0))|hex_padded(4) }}, {{ r_eval_word|hex_padded(64) }}) // r_evals_computations[{{ loop.index0 }}] + {%- endfor %} + {%- let offset_13 = offset_12 + pcs_computations.r_evals_computations.len() %} + {%- for coeff_sum_word in pcs_computations.coeff_sums_computation %} + mstore({{ (32 * (offset_13 + loop.index0))|hex_padded(4) }}, {{ coeff_sum_word|hex_padded(64) }}) // coeff_sums_computations[{{ loop.index0 }}] + {%- endfor %} + {%- let offset_14 = offset_13 + pcs_computations.coeff_sums_computation.len() %} + mstore({{ (32 * offset_14)|hex_padded(4) }}, {{ pcs_computations.r_eval_computations|hex_padded(64) }}) // r_eval_computations + {%- let offset_15 = offset_14 + 1 %} + {%- for pairing_input_word in pcs_computations.pairing_input_computations %} + mstore({{ (32 * (offset_15 + loop.index0))|hex_padded(4) }}, {{ pairing_input_word|hex_padded(64) }}) // pairing_input_computations[{{ loop.index0 }}] + {%- endfor %} + return(0, {{ (32 * (offset_8 + pcs_computations.len()))|hex() }}) + } + } +} diff --git a/templates/Halo2VerifyingKey.sol b/templates/Halo2VerifyingKey.sol index c9edfce..91b3b73 100644 --- a/templates/Halo2VerifyingKey.sol +++ b/templates/Halo2VerifyingKey.sol @@ -8,18 +8,17 @@ contract Halo2VerifyingKey { {%- for (name, chunk) in constants %} mstore({{ (32 * loop.index0)|hex_padded(4) }}, {{ chunk|hex_padded(64) }}) // {{ name }} {%- endfor %} + {%- let offset_0 = constants.len() %} {%- for (x, y) in fixed_comms %} - {%- let offset = constants.len() %} - mstore({{ (32 * (offset + 2 * loop.index0))|hex_padded(4) }}, {{ x|hex_padded(64) }}) // fixed_comms[{{ loop.index0 }}].x - mstore({{ (32 * (offset + 2 * loop.index0 + 1))|hex_padded(4) }}, {{ y|hex_padded(64) }}) // fixed_comms[{{ loop.index0 }}].y + mstore({{ (32 * (offset_0 + 2 * loop.index0))|hex_padded(4) }}, {{ x|hex_padded(64) }}) // fixed_comms[{{ loop.index0 }}].x + mstore({{ (32 * (offset_0 + 2 * loop.index0 + 1))|hex_padded(4) }}, {{ y|hex_padded(64) }}) // fixed_comms[{{ loop.index0 }}].y {%- endfor %} + {%- let offset_1 = offset_0 + 2 * fixed_comms.len() %} {%- for (x, y) in permutation_comms %} - {%- let offset = constants.len() + 2 * fixed_comms.len() %} - mstore({{ (32 * (offset + 2 * loop.index0))|hex_padded(4) }}, {{ x|hex_padded(64) }}) // permutation_comms[{{ loop.index0 }}].x - mstore({{ (32 * (offset + 2 * loop.index0 + 1))|hex_padded(4) }}, {{ y|hex_padded(64) }}) // permutation_comms[{{ loop.index0 }}].y + mstore({{ (32 * (offset_1 + 2 * loop.index0))|hex_padded(4) }}, {{ x|hex_padded(64) }}) // permutation_comms[{{ loop.index0 }}].x + mstore({{ (32 * (offset_1 + 2 * loop.index0 + 1))|hex_padded(4) }}, {{ y|hex_padded(64) }}) // permutation_comms[{{ loop.index0 }}].y {%- endfor %} - - return(0, {{ (32 * (constants.len() + 2 * fixed_comms.len() + 2 * permutation_comms.len()))|hex() }}) + return(0, {{ (32 * (offset_1 + 2 * permutation_comms.len()))|hex() }}) } } }