From 9ee0ced481367744c60c8c92988ceb1b7325a1e8 Mon Sep 17 00:00:00 2001 From: "Eduard S." Date: Fri, 15 Dec 2023 17:11:24 +0100 Subject: [PATCH] Port verify_proof to fe-be split --- halo2_proofs/src/plonk.rs | 107 ++++- halo2_proofs/src/plonk/circuit.rs | 134 ++++-- halo2_proofs/src/plonk/evaluation.rs | 4 +- halo2_proofs/src/plonk/keygen.rs | 13 +- halo2_proofs/src/plonk/lookup/prover.rs | 8 +- halo2_proofs/src/plonk/lookup/verifier.rs | 45 +- halo2_proofs/src/plonk/permutation/prover.rs | 12 +- .../src/plonk/permutation/verifier.rs | 170 +++++++ halo2_proofs/src/plonk/prover.rs | 141 ++---- halo2_proofs/src/plonk/shuffle/prover.rs | 6 +- halo2_proofs/src/plonk/shuffle/verifier.rs | 26 +- halo2_proofs/src/plonk/vanishing/verifier.rs | 18 +- halo2_proofs/src/plonk/verifier.rs | 424 +++++++++++++++++- 13 files changed, 930 insertions(+), 178 deletions(-) diff --git a/halo2_proofs/src/plonk.rs b/halo2_proofs/src/plonk.rs index af9aac3f87..ad4dc16653 100644 --- a/halo2_proofs/src/plonk.rs +++ b/halo2_proofs/src/plonk.rs @@ -15,7 +15,7 @@ use crate::helpers::{ }; use crate::poly::{ Coeff, EvaluationDomain, ExtendedLagrangeCoeff, LagrangeCoeff, PinnedEvaluationDomain, - Polynomial, + Polynomial, Rotation, }; use crate::transcript::{ChallengeScalar, EncodedChallenge, Transcript}; use crate::SerdeFormat; @@ -43,6 +43,108 @@ pub use verifier::*; use evaluation::Evaluator; use std::io; +/// List of queries (columns and rotations) used by a circuit +#[derive(Debug, Clone)] +pub struct Queries { + /// List of unique advice queries + pub advice: Vec<(Column, Rotation)>, + /// List of unique instance queries + pub instance: Vec<(Column, Rotation)>, + /// List of unique fixed queries + pub fixed: Vec<(Column, Rotation)>, + /// Contains an integer for each advice column + /// identifying how many distinct queries it has + /// so far; should be same length as cs.num_advice_columns. + pub num_advice_queries: Vec, +} + +impl Queries { + /// Returns the minimum necessary rows that need to exist in order to + /// account for e.g. blinding factors. + pub fn minimum_rows(&self) -> usize { + self.blinding_factors() // m blinding factors + + 1 // for l_{-(m + 1)} (l_last) + + 1 // for l_0 (just for extra breathing room for the permutation + // argument, to essentially force a separation in the + // permutation polynomial between the roles of l_last, l_0 + // and the interstitial values.) + + 1 // for at least one row + } + + /// Compute the number of blinding factors necessary to perfectly blind + /// each of the prover's witness polynomials. + pub fn blinding_factors(&self) -> usize { + // All of the prover's advice columns are evaluated at no more than + let factors = *self.num_advice_queries.iter().max().unwrap_or(&1); + // distinct points during gate checks. + + // - The permutation argument witness polynomials are evaluated at most 3 times. + // - Each lookup argument has independent witness polynomials, and they are + // evaluated at most 2 times. + let factors = std::cmp::max(3, factors); + + // Each polynomial is evaluated at most an additional time during + // multiopen (at x_3 to produce q_evals): + let factors = factors + 1; + + // h(x) is derived by the other evaluations so it does not reveal + // anything; in fact it does not even appear in the proof. + + // h(x_3) is also not revealed; the verifier only learns a single + // evaluation of a polynomial in x_1 which has h(x_3) and another random + // polynomial evaluated at x_3 as coefficients -- this random polynomial + // is "random_poly" in the vanishing argument. + + // Add an additional blinding factor as a slight defense against + // off-by-one errors. + factors + 1 + } + + pub(crate) fn get_advice_query_index(&self, column: Column, at: Rotation) -> usize { + for (index, advice_query) in self.advice.iter().enumerate() { + if advice_query == &(column, at) { + return index; + } + } + + panic!("get_advice_query_index called for non-existent query"); + } + + pub(crate) fn get_fixed_query_index(&self, column: Column, at: Rotation) -> usize { + for (index, fixed_query) in self.fixed.iter().enumerate() { + if fixed_query == &(column, at) { + return index; + } + } + + panic!("get_fixed_query_index called for non-existent query"); + } + + pub(crate) fn get_instance_query_index(&self, column: Column, at: Rotation) -> usize { + for (index, instance_query) in self.instance.iter().enumerate() { + if instance_query == &(column, at) { + return index; + } + } + + panic!("get_instance_query_index called for non-existent query"); + } + + pub(crate) fn get_any_query_index(&self, column: Column, at: Rotation) -> usize { + match column.column_type() { + Any::Advice(_) => { + self.get_advice_query_index(Column::::try_from(column).unwrap(), at) + } + Any::Fixed => { + self.get_fixed_query_index(Column::::try_from(column).unwrap(), at) + } + Any::Instance => { + self.get_instance_query_index(Column::::try_from(column).unwrap(), at) + } + } + } +} + /// This is a verifying key which allows for the verification of proofs for a /// particular circuit. #[derive(Clone, Debug)] @@ -51,6 +153,7 @@ pub struct VerifyingKeyV2 { fixed_commitments: Vec, permutation: permutation::VerifyingKey, cs: ConstraintSystemV2Backend, + queries: Queries, /// Cached maximum degree of `cs` (which doesn't change after construction). cs_degree: usize, /// The representative of this `VerifyingKey` in transcripts. @@ -69,12 +172,14 @@ impl VerifyingKeyV2 { { // Compute cached values. let cs_degree = cs.degree(); + let queries = cs.collect_queries(); let mut vk = Self { domain, fixed_commitments, permutation, cs, + queries, cs_degree, // Temporary, this is not pinned. transcript_repr: C::Scalar::ZERO, diff --git a/halo2_proofs/src/plonk/circuit.rs b/halo2_proofs/src/plonk/circuit.rs index b78acd8294..caa70bb029 100644 --- a/halo2_proofs/src/plonk/circuit.rs +++ b/halo2_proofs/src/plonk/circuit.rs @@ -1,4 +1,4 @@ -use super::{lookup, permutation, shuffle, Assigned, Error}; +use super::{lookup, permutation, shuffle, Assigned, Error, Queries}; use crate::circuit::layouter::SyncDeps; use crate::dev::metadata; use crate::{ @@ -9,6 +9,7 @@ use core::cmp::max; use core::ops::{Add, Mul}; use ff::Field; use sealed::SealedPhase; +use std::collections::BTreeSet; use std::collections::HashMap; use std::fmt::Debug; use std::iter::{Product, Sum}; @@ -1587,6 +1588,48 @@ pub struct CompiledCircuitV2 { pub(crate) cs: ConstraintSystemV2Backend, } +struct QueriesSet { + advice: BTreeSet<(Column, Rotation)>, + instance: BTreeSet<(Column, Rotation)>, + fixed: BTreeSet<(Column, Rotation)>, +} + +fn collect_queries(expr: &Expression, queries: &mut QueriesSet) { + match expr { + Expression::Constant(_) => (), + Expression::Selector(_selector) => { + panic!("no Selector should arrive to the Backend"); + } + Expression::Fixed(query) => { + queries + .fixed + .insert((Column::new(query.column_index, Fixed), query.rotation)); + } + Expression::Advice(query) => { + queries.advice.insert(( + Column::new(query.column_index, Advice { phase: query.phase }), + query.rotation, + )); + } + Expression::Instance(query) => { + queries + .instance + .insert((Column::new(query.column_index, Instance), query.rotation)); + } + Expression::Challenge(_) => (), + Expression::Negated(a) => collect_queries(a, queries), + Expression::Sum(a, b) => { + collect_queries(a, queries); + collect_queries(b, queries); + } + Expression::Product(a, b) => { + collect_queries(a, queries); + collect_queries(b, queries); + } + Expression::Scaled(a, _) => collect_queries(a, queries), + }; +} + /// This is a description of the circuit environment, such as the gate, column and /// permutation arrangements. #[derive(Debug, Clone)] @@ -1611,10 +1654,6 @@ pub struct ConstraintSystemV2Backend { // pub(crate) selector_map: Vec>, pub(crate) gates: Vec>, // pub(crate) advice_queries: Vec<(Column, Rotation)>, - // Contains an integer for each advice column - // identifying how many distinct queries it has - // so far; should be same length as num_advice_columns. - num_advice_queries: Vec, // pub(crate) instance_queries: Vec<(Column, Rotation)>, // pub(crate) fixed_queries: Vec<(Column, Rotation)>, @@ -1683,47 +1722,6 @@ impl ConstraintSystemV2Backend { degree } - /// Returns the minimum necessary rows that need to exist in order to - /// account for e.g. blinding factors. - pub fn minimum_rows(&self) -> usize { - self.blinding_factors() // m blinding factors - + 1 // for l_{-(m + 1)} (l_last) - + 1 // for l_0 (just for extra breathing room for the permutation - // argument, to essentially force a separation in the - // permutation polynomial between the roles of l_last, l_0 - // and the interstitial values.) - + 1 // for at least one row - } - - /// Compute the number of blinding factors necessary to perfectly blind - /// each of the prover's witness polynomials. - pub fn blinding_factors(&self) -> usize { - // All of the prover's advice columns are evaluated at no more than - let factors = *self.num_advice_queries.iter().max().unwrap_or(&1); - // distinct points during gate checks. - - // - The permutation argument witness polynomials are evaluated at most 3 times. - // - Each lookup argument has independent witness polynomials, and they are - // evaluated at most 2 times. - let factors = std::cmp::max(3, factors); - - // Each polynomial is evaluated at most an additional time during - // multiopen (at x_3 to produce q_evals): - let factors = factors + 1; - - // h(x) is derived by the other evaluations so it does not reveal - // anything; in fact it does not even appear in the proof. - - // h(x_3) is also not revealed; the verifier only learns a single - // evaluation of a polynomial in x_1 which has h(x_3) and another random - // polynomial evaluated at x_3 as coefficients -- this random polynomial - // is "random_poly" in the vanishing argument. - - // Add an additional blinding factor as a slight defense against - // off-by-one errors. - factors + 1 - } - pub(crate) fn phases(&self) -> Vec { let max_phase = self .advice_column_phase @@ -1733,6 +1731,50 @@ impl ConstraintSystemV2Backend { .unwrap_or_default(); (0..=max_phase).collect() } + + pub(crate) fn collect_queries(&self) -> Queries { + let mut queries = QueriesSet { + advice: BTreeSet::new(), + instance: BTreeSet::new(), + fixed: BTreeSet::new(), + }; + let mut num_advice_queries = vec![0; self.num_advice_columns]; + + for gate in &self.gates { + for expr in gate.polynomials() { + collect_queries(expr, &mut queries); + } + } + for lookup in &self.lookups { + for expr in lookup + .input_expressions + .iter() + .chain(lookup.table_expressions.iter()) + { + collect_queries(expr, &mut queries); + } + } + for shuffle in &self.shuffles { + for expr in shuffle + .input_expressions + .iter() + .chain(shuffle.shuffle_expressions.iter()) + { + collect_queries(expr, &mut queries); + } + } + + for (column, _) in queries.advice.iter() { + num_advice_queries[column.index()] += 1; + } + + Queries { + advice: queries.advice.into_iter().collect(), + instance: queries.instance.into_iter().collect(), + fixed: queries.fixed.into_iter().collect(), + num_advice_queries, + } + } } /// This is a description of the circuit environment, such as the gate, column and diff --git a/halo2_proofs/src/plonk/evaluation.rs b/halo2_proofs/src/plonk/evaluation.rs index efd4f6081f..83f52f16ac 100644 --- a/halo2_proofs/src/plonk/evaluation.rs +++ b/halo2_proofs/src/plonk/evaluation.rs @@ -295,6 +295,7 @@ impl Evaluator { } /// Creates a new evaluation structure + // TODO: Remove pub fn new(cs: &ConstraintSystem) -> Self { let mut ev = Evaluator::default(); @@ -475,7 +476,7 @@ impl Evaluator { // Permutations let sets = &permutation.sets; if !sets.is_empty() { - let blinding_factors = pk.vk.cs.blinding_factors(); + let blinding_factors = pk.vk.queries.blinding_factors(); let last_rotation = Rotation(-((blinding_factors + 1) as i32)); let chunk_len = pk.vk.cs.degree() - 2; let delta_start = beta * &C::Scalar::ZETA; @@ -695,6 +696,7 @@ impl Evaluator { } /// Evaluate h poly + // TODO: Remove #[allow(clippy::too_many_arguments)] pub(in crate::plonk) fn evaluate_h( &self, diff --git a/halo2_proofs/src/plonk/keygen.rs b/halo2_proofs/src/plonk/keygen.rs index 405e1c11f8..b8891ce26e 100644 --- a/halo2_proofs/src/plonk/keygen.rs +++ b/halo2_proofs/src/plonk/keygen.rs @@ -214,9 +214,10 @@ where C::Scalar: FromUniformBytes<64>, { let cs = &circuit.cs; + let queries = cs.collect_queries(); let domain = EvaluationDomain::new(cs.degree() as u32, params.k()); - if (params.n() as usize) < cs.minimum_rows() { + if (params.n() as usize) < queries.minimum_rows() { return Err(Error::not_enough_rows_available(params.k())); } @@ -343,7 +344,7 @@ where { let cs = &circuit.cs; - if (params.n() as usize) < cs.minimum_rows() { + if (params.n() as usize) < vk.queries.minimum_rows() { return Err(Error::not_enough_rows_available(params.k())); } @@ -376,7 +377,11 @@ where // Compute l_blind(X) which evaluates to 1 for each blinding factor row // and 0 otherwise over the domain. let mut l_blind = vk.domain.empty_lagrange(); - for evaluation in l_blind[..].iter_mut().rev().take(cs.blinding_factors()) { + for evaluation in l_blind[..] + .iter_mut() + .rev() + .take(vk.queries.blinding_factors()) + { *evaluation = C::Scalar::ONE; } let l_blind = vk.domain.lagrange_to_coeff(l_blind); @@ -385,7 +390,7 @@ where // Compute l_last(X) which evaluates to 1 on the first inactive row (just // before the blinding factors) and 0 otherwise over the domain let mut l_last = vk.domain.empty_lagrange(); - l_last[params.n() as usize - cs.blinding_factors() - 1] = C::Scalar::ONE; + l_last[params.n() as usize - vk.queries.blinding_factors() - 1] = C::Scalar::ONE; let l_last = vk.domain.lagrange_to_coeff(l_last); let l_last = vk.domain.coeff_to_extended(l_last); diff --git a/halo2_proofs/src/plonk/lookup/prover.rs b/halo2_proofs/src/plonk/lookup/prover.rs index 203b554939..c6c9859046 100644 --- a/halo2_proofs/src/plonk/lookup/prover.rs +++ b/halo2_proofs/src/plonk/lookup/prover.rs @@ -166,6 +166,7 @@ impl> Argument { /// - constructs Permuted struct using permuted_input_value = A', and /// permuted_table_expression = S'. /// The Permuted struct is used to update the Lookup, and is then returned. + // TODO: Remove #[allow(clippy::too_many_arguments)] pub(in crate::plonk) fn commit_permuted< 'a, @@ -286,7 +287,7 @@ impl Permuted { mut rng: R, transcript: &mut T, ) -> Result, Error> { - let blinding_factors = pk.vk.cs.blinding_factors(); + let blinding_factors = pk.vk.queries.blinding_factors(); // Goal is to compute the products of fractions // // Numerator: (\theta^{m-1} a_0(\omega^i) + \theta^{m-2} a_1(\omega^i) + ... + \theta a_{m-2}(\omega^i) + a_{m-1}(\omega^i) + \beta) @@ -587,6 +588,7 @@ impl Committed { Ok(Evaluated { constructed: self }) } + // TODO: Remove pub(in crate::plonk) fn evaluate, T: TranscriptWrite>( self, pk: &ProvingKey, @@ -661,6 +663,7 @@ impl Evaluated { })) } + // TODO: Remove pub(in crate::plonk) fn open<'a>( &'a self, pk: &'a ProvingKey, @@ -720,7 +723,7 @@ fn permute_expression_pair_v2<'params, C: CurveAffine, P: Params<'params, C>, R: input_expression: &Polynomial, table_expression: &Polynomial, ) -> Result, Error> { - let blinding_factors = pk.vk.cs.blinding_factors(); + let blinding_factors = pk.vk.queries.blinding_factors(); let usable_rows = params.n() as usize - (blinding_factors + 1); let mut permuted_input_expression: Vec = input_expression.to_vec(); @@ -804,6 +807,7 @@ fn permute_expression_pair_v2<'params, C: CurveAffine, P: Params<'params, C>, R: /// - the first row in a sequence of like values in A' is the row /// that has the corresponding value in S'. /// This method returns (A', S') if no errors are encountered. +// TODO: Remove fn permute_expression_pair<'params, C: CurveAffine, P: Params<'params, C>, R: RngCore>( pk: &ProvingKey, params: &P, diff --git a/halo2_proofs/src/plonk/lookup/verifier.rs b/halo2_proofs/src/plonk/lookup/verifier.rs index bbc86c8e9d..5667a54c5d 100644 --- a/halo2_proofs/src/plonk/lookup/verifier.rs +++ b/halo2_proofs/src/plonk/lookup/verifier.rs @@ -6,7 +6,7 @@ use super::super::{ use super::Argument; use crate::{ arithmetic::CurveAffine, - plonk::{Error, VerifyingKey}, + plonk::{Error, VerifyingKey, VerifyingKeyV2}, poly::{commitment::MSM, Rotation, VerifierQuery}, transcript::{EncodedChallenge, TranscriptRead}, }; @@ -168,6 +168,49 @@ impl Evaluated { )) } + // NOTE: Copy of queries with VerifyingKeyV2 + pub(in crate::plonk) fn queries_v2<'r, M: MSM + 'r>( + &'r self, + vk: &'r VerifyingKeyV2, + x: ChallengeX, + ) -> impl Iterator> + Clone { + let x_inv = vk.domain.rotate_omega(*x, Rotation::prev()); + let x_next = vk.domain.rotate_omega(*x, Rotation::next()); + + iter::empty() + // Open lookup product commitment at x + .chain(Some(VerifierQuery::new_commitment( + &self.committed.product_commitment, + *x, + self.product_eval, + ))) + // Open lookup input commitments at x + .chain(Some(VerifierQuery::new_commitment( + &self.committed.permuted.permuted_input_commitment, + *x, + self.permuted_input_eval, + ))) + // Open lookup table commitments at x + .chain(Some(VerifierQuery::new_commitment( + &self.committed.permuted.permuted_table_commitment, + *x, + self.permuted_table_eval, + ))) + // Open lookup input commitments at \omega^{-1} x + .chain(Some(VerifierQuery::new_commitment( + &self.committed.permuted.permuted_input_commitment, + x_inv, + self.permuted_input_inv_eval, + ))) + // Open lookup product commitment at \omega x + .chain(Some(VerifierQuery::new_commitment( + &self.committed.product_commitment, + x_next, + self.product_next_eval, + ))) + } + + // TODO: Remove pub(in crate::plonk) fn queries<'r, M: MSM + 'r>( &'r self, vk: &'r VerifyingKey, diff --git a/halo2_proofs/src/plonk/permutation/prover.rs b/halo2_proofs/src/plonk/permutation/prover.rs index 5bc3924708..d5683a815b 100644 --- a/halo2_proofs/src/plonk/permutation/prover.rs +++ b/halo2_proofs/src/plonk/permutation/prover.rs @@ -72,7 +72,7 @@ impl Argument { // 3 circuit for the permutation argument. assert!(pk.vk.cs_degree >= 3); let chunk_len = pk.vk.cs_degree - 2; - let blinding_factors = pk.vk.cs.blinding_factors(); + let blinding_factors = pk.vk.queries.blinding_factors(); // Each column gets its own delta power. let mut deltaomega = C::Scalar::ONE; @@ -191,6 +191,8 @@ impl Argument { Ok(Committed { sets }) } + + // TODO: Remove #[allow(clippy::too_many_arguments)] pub(in crate::plonk) fn commit< 'params, @@ -391,7 +393,7 @@ impl Constructed { transcript: &mut T, ) -> Result, Error> { let domain = &pk.vk.domain; - let blinding_factors = pk.vk.cs.blinding_factors(); + let blinding_factors = pk.vk.queries.blinding_factors(); { let mut sets = self.sets.iter(); @@ -428,6 +430,8 @@ impl Constructed { Ok(Evaluated { constructed: self }) } + + // TODO: Remove pub(in crate::plonk) fn evaluate, T: TranscriptWrite>( self, pk: &plonk::ProvingKey, @@ -481,7 +485,7 @@ impl Evaluated { pk: &'a plonk::ProvingKeyV2, x: ChallengeX, ) -> impl Iterator> + Clone { - let blinding_factors = pk.vk.cs.blinding_factors(); + let blinding_factors = pk.vk.queries.blinding_factors(); let x_next = pk.vk.domain.rotate_omega(*x, Rotation::next()); let x_last = pk .vk @@ -521,6 +525,8 @@ impl Evaluated { }), ) } + + // TODO: Remove pub(in crate::plonk) fn open<'a>( &'a self, pk: &'a plonk::ProvingKey, diff --git a/halo2_proofs/src/plonk/permutation/verifier.rs b/halo2_proofs/src/plonk/permutation/verifier.rs index a4637422ae..2cb6e6f925 100644 --- a/halo2_proofs/src/plonk/permutation/verifier.rs +++ b/halo2_proofs/src/plonk/permutation/verifier.rs @@ -30,6 +30,29 @@ pub struct Evaluated { } impl Argument { + pub(crate) fn read_product_commitments_v2< + C: CurveAffine, + E: EncodedChallenge, + T: TranscriptRead, + >( + &self, + vk: &plonk::VerifyingKeyV2, + transcript: &mut T, + ) -> Result, Error> { + let chunk_len = vk.cs_degree - 2; + + let permutation_product_commitments = self + .columns + .chunks(chunk_len) + .map(|_| transcript.read_point()) + .collect::, _>>()?; + + Ok(Committed { + permutation_product_commitments, + }) + } + + // TODO: Remove pub(crate) fn read_product_commitments< C: CurveAffine, E: EncodedChallenge, @@ -99,6 +122,114 @@ impl Committed { } impl Evaluated { + // NOTE: Copy of expressions with VerifyingKeyV2 + #[allow(clippy::too_many_arguments)] + pub(in crate::plonk) fn expressions_v2<'a>( + &'a self, + vk: &'a plonk::VerifyingKeyV2, + p: &'a Argument, + common: &'a CommonEvaluated, + advice_evals: &'a [C::Scalar], + fixed_evals: &'a [C::Scalar], + instance_evals: &'a [C::Scalar], + l_0: C::Scalar, + l_last: C::Scalar, + l_blind: C::Scalar, + beta: ChallengeBeta, + gamma: ChallengeGamma, + x: ChallengeX, + ) -> impl Iterator + 'a { + let chunk_len = vk.cs_degree - 2; + iter::empty() + // Enforce only for the first set. + // l_0(X) * (1 - z_0(X)) = 0 + .chain( + self.sets + .first() + .map(|first_set| l_0 * &(C::Scalar::ONE - &first_set.permutation_product_eval)), + ) + // Enforce only for the last set. + // l_last(X) * (z_l(X)^2 - z_l(X)) = 0 + .chain(self.sets.last().map(|last_set| { + (last_set.permutation_product_eval.square() - &last_set.permutation_product_eval) + * &l_last + })) + // Except for the first set, enforce. + // l_0(X) * (z_i(X) - z_{i-1}(\omega^(last) X)) = 0 + .chain( + self.sets + .iter() + .skip(1) + .zip(self.sets.iter()) + .map(|(set, last_set)| { + ( + set.permutation_product_eval, + last_set.permutation_product_last_eval.unwrap(), + ) + }) + .map(move |(set, prev_last)| (set - &prev_last) * &l_0), + ) + // And for all the sets we enforce: + // (1 - (l_last(X) + l_blind(X))) * ( + // z_i(\omega X) \prod (p(X) + \beta s_i(X) + \gamma) + // - z_i(X) \prod (p(X) + \delta^i \beta X + \gamma) + // ) + .chain( + self.sets + .iter() + .zip(p.columns.chunks(chunk_len)) + .zip(common.permutation_evals.chunks(chunk_len)) + .enumerate() + .map(move |(chunk_index, ((set, columns), permutation_evals))| { + let mut left = set.permutation_product_next_eval; + for (eval, permutation_eval) in columns + .iter() + .map(|&column| match column.column_type() { + Any::Advice(_) => { + advice_evals + [vk.queries.get_any_query_index(column, Rotation::cur())] + } + Any::Fixed => { + fixed_evals + [vk.queries.get_any_query_index(column, Rotation::cur())] + } + Any::Instance => { + instance_evals + [vk.queries.get_any_query_index(column, Rotation::cur())] + } + }) + .zip(permutation_evals.iter()) + { + left *= &(eval + &(*beta * permutation_eval) + &*gamma); + } + + let mut right = set.permutation_product_eval; + let mut current_delta = (*beta * &*x) + * &(::DELTA + .pow_vartime([(chunk_index * chunk_len) as u64])); + for eval in columns.iter().map(|&column| match column.column_type() { + Any::Advice(_) => { + advice_evals + [vk.queries.get_any_query_index(column, Rotation::cur())] + } + Any::Fixed => { + fixed_evals[vk.queries.get_any_query_index(column, Rotation::cur())] + } + Any::Instance => { + instance_evals + [vk.queries.get_any_query_index(column, Rotation::cur())] + } + }) { + right *= &(eval + ¤t_delta + &*gamma); + current_delta *= &C::Scalar::DELTA; + } + + (left - &right) * (C::Scalar::ONE - &(l_last + &l_blind)) + }), + ) + } + + // TODO: Remove #[allow(clippy::too_many_arguments)] pub(in crate::plonk) fn expressions<'a>( &'a self, @@ -201,6 +332,45 @@ impl Evaluated { ) } + // NOTE: Copy of queries with VerifyingKeyV2 + pub(in crate::plonk) fn queries_v2<'r, M: MSM + 'r>( + &'r self, + vk: &'r plonk::VerifyingKeyV2, + x: ChallengeX, + ) -> impl Iterator> + Clone { + let blinding_factors = vk.queries.blinding_factors(); + let x_next = vk.domain.rotate_omega(*x, Rotation::next()); + let x_last = vk + .domain + .rotate_omega(*x, Rotation(-((blinding_factors + 1) as i32))); + + iter::empty() + .chain(self.sets.iter().flat_map(move |set| { + iter::empty() + // Open permutation product commitments at x and \omega^{-1} x + // Open permutation product commitments at x and \omega x + .chain(Some(VerifierQuery::new_commitment( + &set.permutation_product_commitment, + *x, + set.permutation_product_eval, + ))) + .chain(Some(VerifierQuery::new_commitment( + &set.permutation_product_commitment, + x_next, + set.permutation_product_next_eval, + ))) + })) + // Open it at \omega^{last} x for all but the last set + .chain(self.sets.iter().rev().skip(1).flat_map(move |set| { + Some(VerifierQuery::new_commitment( + &set.permutation_product_commitment, + x_last, + set.permutation_product_last_eval.unwrap(), + )) + })) + } + + // TODO: Remove pub(in crate::plonk) fn queries<'r, M: MSM + 'r>( &'r self, vk: &'r plonk::VerifyingKey, diff --git a/halo2_proofs/src/plonk/prover.rs b/halo2_proofs/src/plonk/prover.rs index 187b9f4972..1f3130dd75 100644 --- a/halo2_proofs/src/plonk/prover.rs +++ b/halo2_proofs/src/plonk/prover.rs @@ -8,8 +8,8 @@ use std::{collections::HashMap, iter}; use super::{ circuit::{ sealed::{self}, - Advice, Any, Assignment, Challenge, Circuit, Column, ConstraintSystem, - ConstraintSystemV2Backend, Expression, Fixed, FloorPlanner, Instance, Selector, + Advice, Any, Assignment, Challenge, Circuit, Column, ConstraintSystem, Fixed, FloorPlanner, + Instance, Selector, }, lookup, permutation, shuffle, vanishing, ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, ChallengeY, Error, ProvingKey, ProvingKeyV2, @@ -21,7 +21,7 @@ use crate::{ plonk::Assigned, poly::{ commitment::{Blind, CommitmentScheme, Params, Prover}, - Basis, Coeff, LagrangeCoeff, Polynomial, ProverQuery, Rotation, + Basis, Coeff, LagrangeCoeff, Polynomial, ProverQuery, }, }; use crate::{ @@ -58,9 +58,9 @@ pub struct ProverV2< // Circuit and setup fields params: &'params Scheme::ParamsProver, pk: &'a ProvingKeyV2, - advice_queries: Vec<(Column, Rotation)>, - instance_queries: Vec<(Column, Rotation)>, - fixed_queries: Vec<(Column, Rotation)>, + // advice_queries: Vec<(Column, Rotation)>, + // instance_queries: Vec<(Column, Rotation)>, + // fixed_queries: Vec<(Column, Rotation)>, phases: Vec, // State instance: Vec>, @@ -72,92 +72,6 @@ pub struct ProverV2< _marker: std::marker::PhantomData<(P, E)>, } -struct Queries { - advice: Vec<(Column, Rotation)>, - instance: Vec<(Column, Rotation)>, - fixed: Vec<(Column, Rotation)>, -} - -struct QueriesSet { - advice: BTreeSet<(Column, Rotation)>, - instance: BTreeSet<(Column, Rotation)>, - fixed: BTreeSet<(Column, Rotation)>, -} - -fn collect_queries(expr: &Expression, queries: &mut QueriesSet) { - match expr { - Expression::Constant(_) => (), - Expression::Selector(_selector) => { - panic!("no Selector should arrive to the Backend"); - } - Expression::Fixed(query) => { - queries - .fixed - .insert((Column::new(query.column_index, Fixed), query.rotation)); - } - Expression::Advice(query) => { - queries.advice.insert(( - Column::new(query.column_index, Advice { phase: query.phase }), - query.rotation, - )); - } - Expression::Instance(query) => { - queries - .instance - .insert((Column::new(query.column_index, Instance), query.rotation)); - } - Expression::Challenge(_) => (), - Expression::Negated(a) => collect_queries(a, queries), - Expression::Sum(a, b) => { - collect_queries(a, queries); - collect_queries(b, queries); - } - Expression::Product(a, b) => { - collect_queries(a, queries); - collect_queries(b, queries); - } - Expression::Scaled(a, _) => collect_queries(a, queries), - }; -} - -fn get_all_queries(cs: &ConstraintSystemV2Backend) -> Queries { - let mut queries = QueriesSet { - advice: BTreeSet::new(), - instance: BTreeSet::new(), - fixed: BTreeSet::new(), - }; - - for gate in &cs.gates { - for expr in gate.polynomials() { - collect_queries(expr, &mut queries); - } - } - for lookup in &cs.lookups { - for expr in lookup - .input_expressions - .iter() - .chain(lookup.table_expressions.iter()) - { - collect_queries(expr, &mut queries); - } - } - for shuffle in &cs.shuffles { - for expr in shuffle - .input_expressions - .iter() - .chain(shuffle.shuffle_expressions.iter()) - { - collect_queries(expr, &mut queries); - } - } - - Queries { - advice: queries.advice.into_iter().collect(), - instance: queries.instance.into_iter().collect(), - fixed: queries.fixed.into_iter().collect(), - } -} - impl< 'a, 'params, @@ -186,12 +100,11 @@ impl< } } - let queries = get_all_queries(&pk.vk.cs); - // Hash verification key into transcript pk.vk.hash_into(&mut transcript)?; let meta = &pk.vk.cs; + let queries = &pk.vk.queries; let phases = meta.phases(); let domain = &pk.vk.domain; @@ -204,7 +117,7 @@ impl< .map(|values| { let mut poly = domain.empty_lagrange(); assert_eq!(poly.len(), params.n() as usize); - if values.len() > (poly.len() - (meta.blinding_factors() + 1)) { + if values.len() > (poly.len() - (queries.blinding_factors() + 1)) { return Err(Error::InstanceTooLarge); } for (poly, value) in poly.iter_mut().zip(values.iter()) { @@ -266,9 +179,6 @@ impl< Ok(ProverV2 { params, pk, - advice_queries: queries.advice, - instance_queries: queries.instance, - fixed_queries: queries.fixed, phases, instance, rng, @@ -303,6 +213,7 @@ impl< let params = self.params; let meta = &self.pk.vk.cs; + let queries = &self.pk.vk.queries; let transcript = &mut self.transcript; let mut rng = &mut self.rng; @@ -348,7 +259,7 @@ impl< Option, LagrangeCoeff>>, >| -> Result<(), Error> { - let unusable_rows_start = params.n() as usize - (meta.blinding_factors() + 1); + let unusable_rows_start = params.n() as usize - (queries.blinding_factors() + 1); let mut advice_values = batch_invert_assigned::(witness.into_iter().flatten().collect()); let unblinded_advice: HashSet = @@ -428,6 +339,7 @@ impl< { let params = self.params; let meta = &self.pk.vk.cs; + let queries = &self.pk.vk.queries; let pk = self.pk; let domain = &self.pk.vk.domain; @@ -598,8 +510,8 @@ impl< // Compute and hash instance evals for the circuit instance for instance in instance.iter() { // Evaluate polynomials at omega^i x - let instance_evals: Vec<_> = self - .instance_queries + let instance_evals: Vec<_> = queries + .instance .iter() .map(|&(column, at)| { eval_polynomial( @@ -619,8 +531,8 @@ impl< // Compute and hash advice evals for the circuit instance for advice in advice.iter() { // Evaluate polynomials at omega^i x - let advice_evals: Vec<_> = self - .advice_queries + let advice_evals: Vec<_> = queries + .advice .iter() .map(|&(column, at)| { eval_polynomial( @@ -637,8 +549,8 @@ impl< } // Compute and hash fixed evals - let fixed_evals: Vec<_> = self - .fixed_queries + let fixed_evals: Vec<_> = queries + .fixed .iter() .map(|&(column, at)| { eval_polynomial(&pk.fixed_polys[column.index()], domain.rotate_omega(*x, at)) @@ -695,7 +607,7 @@ impl< iter::empty() .chain( P::QUERY_INSTANCE - .then_some(self.instance_queries.iter().map(move |&(column, at)| { + .then_some(queries.instance.iter().map(move |&(column, at)| { ProverQuery { point: domain.rotate_omega(*x, at), poly: &instance.instance_polys[column.index()], @@ -705,20 +617,16 @@ impl< .into_iter() .flatten(), ) - .chain( - self.advice_queries - .iter() - .map(move |&(column, at)| ProverQuery { - point: domain.rotate_omega(*x, at), - poly: &advice.advice_polys[column.index()], - blind: advice.advice_blinds[column.index()], - }), - ) + .chain(queries.advice.iter().map(move |&(column, at)| ProverQuery { + point: domain.rotate_omega(*x, at), + poly: &advice.advice_polys[column.index()], + blind: advice.advice_blinds[column.index()], + })) .chain(permutation.open_v2(pk, x)) .chain(lookups.iter().flat_map(move |p| p.open_v2(pk, x))) .chain(shuffles.iter().flat_map(move |p| p.open_v2(pk, x))) }) - .chain(self.fixed_queries.iter().map(|&(column, at)| ProverQuery { + .chain(queries.fixed.iter().map(|&(column, at)| ProverQuery { point: domain.rotate_omega(*x, at), poly: &pk.fixed_polys[column.index()], blind: Blind::default(), @@ -740,6 +648,7 @@ impl< /// parameters `params` and the proving key [`ProvingKey`] that was /// generated previously for the same circuit. The provided `instances` /// are zero-padded internally. +// TODO: Remove pub fn create_proof< 'params, Scheme: CommitmentScheme, diff --git a/halo2_proofs/src/plonk/shuffle/prover.rs b/halo2_proofs/src/plonk/shuffle/prover.rs index 30b9768203..929acfe4fa 100644 --- a/halo2_proofs/src/plonk/shuffle/prover.rs +++ b/halo2_proofs/src/plonk/shuffle/prover.rs @@ -95,6 +95,7 @@ impl> Argument { /// [S_0, S_1, ..., S_{m-1}], this method /// - constructs A_compressed = \theta^{m-1} A_0 + theta^{m-2} A_1 + ... + \theta A_{m-2} + A_{m-1} /// and S_compressed = \theta^{m-1} S_0 + theta^{m-2} S_1 + ... + \theta S_{m-2} + S_{m-1}, + // TODO: Remove #[allow(clippy::too_many_arguments)] fn compress<'a, 'params: 'a, C, P: Params<'params, C>>( &self, @@ -187,7 +188,7 @@ impl> Argument { challenges, ); - let blinding_factors = pk.vk.cs.blinding_factors(); + let blinding_factors = pk.vk.queries.blinding_factors(); let mut shuffle_product = vec![C::Scalar::ZERO; params.n() as usize]; parallelize(&mut shuffle_product, |shuffle_product, start| { @@ -259,6 +260,7 @@ impl> Argument { /// constructs the grand product polynomial over the shuffle. /// The grand product polynomial is used to populate the Product struct. /// The Product struct is added to the Shuffle and finally returned by the method. + // TODO: Remove #[allow(clippy::too_many_arguments)] pub(in crate::plonk) fn commit_product< 'a, @@ -391,6 +393,7 @@ impl Committed { Ok(Evaluated { constructed: self }) } + // TODO: Remove pub(in crate::plonk) fn evaluate, T: TranscriptWrite>( self, pk: &ProvingKey, @@ -439,6 +442,7 @@ impl Evaluated { })) } + // TODO: Remove pub(in crate::plonk) fn open<'a>( &'a self, pk: &'a ProvingKey, diff --git a/halo2_proofs/src/plonk/shuffle/verifier.rs b/halo2_proofs/src/plonk/shuffle/verifier.rs index 379cc5c8a1..2f77b52d1d 100644 --- a/halo2_proofs/src/plonk/shuffle/verifier.rs +++ b/halo2_proofs/src/plonk/shuffle/verifier.rs @@ -4,7 +4,7 @@ use super::super::{circuit::Expression, ChallengeGamma, ChallengeTheta, Challeng use super::Argument; use crate::{ arithmetic::CurveAffine, - plonk::{Error, VerifyingKey}, + plonk::{Error, VerifyingKey, VerifyingKeyV2}, poly::{commitment::MSM, Rotation, VerifierQuery}, transcript::{EncodedChallenge, TranscriptRead}, }; @@ -114,6 +114,30 @@ impl Evaluated { ) } + // NOTE: Copy of queries with VerifyingKeyV2 + pub(in crate::plonk) fn queries_v2<'r, M: MSM + 'r>( + &'r self, + vk: &'r VerifyingKeyV2, + x: ChallengeX, + ) -> impl Iterator> + Clone { + let x_next = vk.domain.rotate_omega(*x, Rotation::next()); + + iter::empty() + // Open shuffle product commitment at x + .chain(Some(VerifierQuery::new_commitment( + &self.committed.product_commitment, + *x, + self.product_eval, + ))) + // Open shuffle product commitment at \omega x + .chain(Some(VerifierQuery::new_commitment( + &self.committed.product_commitment, + x_next, + self.product_next_eval, + ))) + } + + // TODO: Remove pub(in crate::plonk) fn queries<'r, M: MSM + 'r>( &'r self, vk: &'r VerifyingKey, diff --git a/halo2_proofs/src/plonk/vanishing/verifier.rs b/halo2_proofs/src/plonk/vanishing/verifier.rs index 0881dfb2c0..a179336e0d 100644 --- a/halo2_proofs/src/plonk/vanishing/verifier.rs +++ b/halo2_proofs/src/plonk/vanishing/verifier.rs @@ -4,7 +4,7 @@ use ff::Field; use crate::{ arithmetic::CurveAffine, - plonk::{Error, VerifyingKey}, + plonk::{Error, VerifyingKey, VerifyingKeyV2}, poly::{ commitment::{Params, MSM}, VerifierQuery, @@ -53,6 +53,22 @@ impl Argument { } impl Committed { + pub(in crate::plonk) fn read_commitments_after_y_v2< + E: EncodedChallenge, + T: TranscriptRead, + >( + self, + vk: &VerifyingKeyV2, + transcript: &mut T, + ) -> Result, Error> { + // Obtain a commitment to h(X) in the form of multiple pieces of degree n - 1 + let h_commitments = read_n_points(transcript, vk.domain.get_quotient_poly_degree())?; + + Ok(Constructed { + h_commitments, + random_poly_commitment: self.random_poly_commitment, + }) + } pub(in crate::plonk) fn read_commitments_after_y< E: EncodedChallenge, T: TranscriptRead, diff --git a/halo2_proofs/src/plonk/verifier.rs b/halo2_proofs/src/plonk/verifier.rs index 76675bcdfa..6efee590f2 100644 --- a/halo2_proofs/src/plonk/verifier.rs +++ b/halo2_proofs/src/plonk/verifier.rs @@ -4,7 +4,7 @@ use std::iter; use super::{ vanishing, ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, ChallengeY, Error, - VerifyingKey, + VerifyingKey, VerifyingKeyV2, }; use crate::arithmetic::compute_inner_product; use crate::poly::commitment::{CommitmentScheme, Verifier}; @@ -20,6 +20,428 @@ mod batch; #[cfg(feature = "batch")] pub use batch::BatchVerifier; +/// Returns a boolean indicating whether or not the proof is valid +pub fn verify_proof_v2< + 'params, + Scheme: CommitmentScheme, + V: Verifier<'params, Scheme>, + E: EncodedChallenge, + T: TranscriptRead, + Strategy: VerificationStrategy<'params, Scheme, V>, +>( + params: &'params Scheme::ParamsVerifier, + vk: &VerifyingKeyV2, + strategy: Strategy, + instances: &[&[&[Scheme::Scalar]]], + transcript: &mut T, +) -> Result +where + Scheme::Scalar: WithSmallOrderMulGroup<3> + FromUniformBytes<64>, +{ + // Check that instances matches the expected number of instance columns + for instances in instances.iter() { + if instances.len() != vk.cs.num_instance_columns { + return Err(Error::InvalidInstances); + } + } + + let instance_commitments = if V::QUERY_INSTANCE { + instances + .iter() + .map(|instance| { + instance + .iter() + .map(|instance| { + if instance.len() + > params.n() as usize - (vk.queries.blinding_factors() + 1) + { + return Err(Error::InstanceTooLarge); + } + let mut poly = instance.to_vec(); + poly.resize(params.n() as usize, Scheme::Scalar::ZERO); + let poly = vk.domain.lagrange_from_vec(poly); + + Ok(params.commit_lagrange(&poly, Blind::default()).to_affine()) + }) + .collect::, _>>() + }) + .collect::, _>>()? + } else { + vec![vec![]; instances.len()] + }; + + let num_proofs = instance_commitments.len(); + + // Hash verification key into transcript + vk.hash_into(transcript)?; + + if V::QUERY_INSTANCE { + for instance_commitments in instance_commitments.iter() { + // Hash the instance (external) commitments into the transcript + for commitment in instance_commitments { + transcript.common_point(*commitment)? + } + } + } else { + for instance in instances.iter() { + for instance in instance.iter() { + for value in instance.iter() { + transcript.common_scalar(*value)?; + } + } + } + } + + // Hash the prover's advice commitments into the transcript and squeeze challenges + let (advice_commitments, challenges) = { + let mut advice_commitments = + vec![vec![Scheme::Curve::default(); vk.cs.num_advice_columns]; num_proofs]; + let mut challenges = vec![Scheme::Scalar::ZERO; vk.cs.num_challenges]; + + for current_phase in vk.cs.phases() { + for advice_commitments in advice_commitments.iter_mut() { + for (phase, commitment) in vk + .cs + .advice_column_phase + .iter() + .zip(advice_commitments.iter_mut()) + { + if current_phase == *phase { + *commitment = transcript.read_point()?; + } + } + } + for (phase, challenge) in vk.cs.challenge_phase.iter().zip(challenges.iter_mut()) { + if current_phase == *phase { + *challenge = *transcript.squeeze_challenge_scalar::<()>(); + } + } + } + + (advice_commitments, challenges) + }; + + // Sample theta challenge for keeping lookup columns linearly independent + let theta: ChallengeTheta<_> = transcript.squeeze_challenge_scalar(); + + let lookups_permuted = (0..num_proofs) + .map(|_| -> Result, _> { + // Hash each lookup permuted commitment + vk.cs + .lookups + .iter() + .map(|argument| argument.read_permuted_commitments(transcript)) + .collect::, _>>() + }) + .collect::, _>>()?; + + // Sample beta challenge + let beta: ChallengeBeta<_> = transcript.squeeze_challenge_scalar(); + + // Sample gamma challenge + let gamma: ChallengeGamma<_> = transcript.squeeze_challenge_scalar(); + + let permutations_committed = (0..num_proofs) + .map(|_| { + // Hash each permutation product commitment + vk.cs + .permutation + .read_product_commitments_v2(vk, transcript) + }) + .collect::, _>>()?; + + let lookups_committed = lookups_permuted + .into_iter() + .map(|lookups| { + // Hash each lookup product commitment + lookups + .into_iter() + .map(|lookup| lookup.read_product_commitment(transcript)) + .collect::, _>>() + }) + .collect::, _>>()?; + + let shuffles_committed = (0..num_proofs) + .map(|_| -> Result, _> { + // Hash each shuffle product commitment + vk.cs + .shuffles + .iter() + .map(|argument| argument.read_product_commitment(transcript)) + .collect::, _>>() + }) + .collect::, _>>()?; + + let vanishing = vanishing::Argument::read_commitments_before_y(transcript)?; + + // Sample y challenge, which keeps the gates linearly independent. + let y: ChallengeY<_> = transcript.squeeze_challenge_scalar(); + + let vanishing = vanishing.read_commitments_after_y_v2(vk, transcript)?; + + // Sample x challenge, which is used to ensure the circuit is + // satisfied with high probability. + let x: ChallengeX<_> = transcript.squeeze_challenge_scalar(); + let instance_evals = if V::QUERY_INSTANCE { + (0..num_proofs) + .map(|_| -> Result, _> { read_n_scalars(transcript, vk.queries.instance.len()) }) + .collect::, _>>()? + } else { + let xn = x.pow([params.n()]); + let (min_rotation, max_rotation) = + vk.queries + .instance + .iter() + .fold((0, 0), |(min, max), (_, rotation)| { + if rotation.0 < min { + (rotation.0, max) + } else if rotation.0 > max { + (min, rotation.0) + } else { + (min, max) + } + }); + let max_instance_len = instances + .iter() + .flat_map(|instance| instance.iter().map(|instance| instance.len())) + .max_by(Ord::cmp) + .unwrap_or_default(); + let l_i_s = &vk.domain.l_i_range( + *x, + xn, + -max_rotation..max_instance_len as i32 + min_rotation.abs(), + ); + instances + .iter() + .map(|instances| { + vk.queries + .instance + .iter() + .map(|(column, rotation)| { + let instances = instances[column.index()]; + let offset = (max_rotation - rotation.0) as usize; + compute_inner_product(instances, &l_i_s[offset..offset + instances.len()]) + }) + .collect::>() + }) + .collect::>() + }; + + let advice_evals = (0..num_proofs) + .map(|_| -> Result, _> { read_n_scalars(transcript, vk.queries.advice.len()) }) + .collect::, _>>()?; + + let fixed_evals = read_n_scalars(transcript, vk.queries.fixed.len())?; + + let vanishing = vanishing.evaluate_after_x(transcript)?; + + let permutations_common = vk.permutation.evaluate(transcript)?; + + let permutations_evaluated = permutations_committed + .into_iter() + .map(|permutation| permutation.evaluate(transcript)) + .collect::, _>>()?; + + let lookups_evaluated = lookups_committed + .into_iter() + .map(|lookups| -> Result, _> { + lookups + .into_iter() + .map(|lookup| lookup.evaluate(transcript)) + .collect::, _>>() + }) + .collect::, _>>()?; + + let shuffles_evaluated = shuffles_committed + .into_iter() + .map(|shuffles| -> Result, _> { + shuffles + .into_iter() + .map(|shuffle| shuffle.evaluate(transcript)) + .collect::, _>>() + }) + .collect::, _>>()?; + + // This check ensures the circuit is satisfied so long as the polynomial + // commitments open to the correct values. + let vanishing = { + // x^n + let xn = x.pow([params.n()]); + + let blinding_factors = vk.queries.blinding_factors(); + let l_evals = vk + .domain + .l_i_range(*x, xn, (-((blinding_factors + 1) as i32))..=0); + assert_eq!(l_evals.len(), 2 + blinding_factors); + let l_last = l_evals[0]; + let l_blind: Scheme::Scalar = l_evals[1..(1 + blinding_factors)] + .iter() + .fold(Scheme::Scalar::ZERO, |acc, eval| acc + eval); + let l_0 = l_evals[1 + blinding_factors]; + + // Compute the expected value of h(x) + let expressions = advice_evals + .iter() + .zip(instance_evals.iter()) + .zip(permutations_evaluated.iter()) + .zip(lookups_evaluated.iter()) + .zip(shuffles_evaluated.iter()) + .flat_map( + |((((advice_evals, instance_evals), permutation), lookups), shuffles)| { + let challenges = &challenges; + let fixed_evals = &fixed_evals; + std::iter::empty() + // Evaluate the circuit using the custom gates provided + .chain(vk.cs.gates.iter().flat_map(move |gate| { + gate.polynomials().iter().map(move |poly| { + poly.evaluate( + &|scalar| scalar, + &|_| { + panic!("virtual selectors are removed during optimization") + }, + &|query| fixed_evals[query.index.unwrap()], + &|query| advice_evals[query.index.unwrap()], + &|query| instance_evals[query.index.unwrap()], + &|challenge| challenges[challenge.index()], + &|a| -a, + &|a, b| a + &b, + &|a, b| a * &b, + &|a, scalar| a * &scalar, + ) + }) + })) + .chain(permutation.expressions_v2( + vk, + &vk.cs.permutation, + &permutations_common, + advice_evals, + fixed_evals, + instance_evals, + l_0, + l_last, + l_blind, + beta, + gamma, + x, + )) + .chain(lookups.iter().zip(vk.cs.lookups.iter()).flat_map( + move |(p, argument)| { + p.expressions( + l_0, + l_last, + l_blind, + argument, + theta, + beta, + gamma, + advice_evals, + fixed_evals, + instance_evals, + challenges, + ) + }, + )) + .chain(shuffles.iter().zip(vk.cs.shuffles.iter()).flat_map( + move |(p, argument)| { + p.expressions( + l_0, + l_last, + l_blind, + argument, + theta, + gamma, + advice_evals, + fixed_evals, + instance_evals, + challenges, + ) + }, + )) + }, + ); + + vanishing.verify(params, expressions, y, xn) + }; + + let queries = instance_commitments + .iter() + .zip(instance_evals.iter()) + .zip(advice_commitments.iter()) + .zip(advice_evals.iter()) + .zip(permutations_evaluated.iter()) + .zip(lookups_evaluated.iter()) + .zip(shuffles_evaluated.iter()) + .flat_map( + |( + ( + ( + ( + ((instance_commitments, instance_evals), advice_commitments), + advice_evals, + ), + permutation, + ), + lookups, + ), + shuffles, + )| { + iter::empty() + .chain( + V::QUERY_INSTANCE + .then_some(vk.queries.instance.iter().enumerate().map( + move |(query_index, &(column, at))| { + VerifierQuery::new_commitment( + &instance_commitments[column.index()], + vk.domain.rotate_omega(*x, at), + instance_evals[query_index], + ) + }, + )) + .into_iter() + .flatten(), + ) + .chain(vk.queries.advice.iter().enumerate().map( + move |(query_index, &(column, at))| { + VerifierQuery::new_commitment( + &advice_commitments[column.index()], + vk.domain.rotate_omega(*x, at), + advice_evals[query_index], + ) + }, + )) + .chain(permutation.queries_v2(vk, x)) + .chain(lookups.iter().flat_map(move |p| p.queries_v2(vk, x))) + .chain(shuffles.iter().flat_map(move |p| p.queries_v2(vk, x))) + }, + ) + .chain( + vk.queries + .fixed + .iter() + .enumerate() + .map(|(query_index, &(column, at))| { + VerifierQuery::new_commitment( + &vk.fixed_commitments[column.index()], + vk.domain.rotate_omega(*x, at), + fixed_evals[query_index], + ) + }), + ) + .chain(permutations_common.queries(&vk.permutation, x)) + .chain(vanishing.queries(x)); + + // We are now convinced the circuit is satisfied so long as the + // polynomial commitments open to the correct values. + + let verifier = V::new(params); + strategy.process(|msm| { + verifier + .verify_proof(transcript, queries, msm) + .map_err(|_| Error::Opening) + }) +} + +// TODO: Remove /// Returns a boolean indicating whether or not the proof is valid pub fn verify_proof< 'params,