diff --git a/halo2_proofs/src/plonk.rs b/halo2_proofs/src/plonk.rs index a6cdba70c1..85a544d585 100644 --- a/halo2_proofs/src/plonk.rs +++ b/halo2_proofs/src/plonk.rs @@ -101,78 +101,6 @@ impl Queries { } } -// TODO: Remove in favour of VerifyingKey -/// This is a verifying key which allows for the verification of proofs for a -/// particular circuit. -#[derive(Clone, Debug)] -pub struct VerifyingKeyV2 { - domain: EvaluationDomain, - fixed_commitments: Vec, - permutation: permutation::VerifyingKey, - cs: ConstraintSystem, - // queries: Queries, - /// Cached maximum degree of `cs` (which doesn't change after construction). - cs_degree: usize, - /// The representative of this `VerifyingKey` in transcripts. - transcript_repr: C::Scalar, -} - -impl VerifyingKeyV2 { - fn from_parts( - domain: EvaluationDomain, - fixed_commitments: Vec, - permutation: permutation::VerifyingKey, - cs: ConstraintSystem, - ) -> Self - where - C::ScalarExt: FromUniformBytes<64>, - { - // Compute cached values. - let cs_degree = cs.degree(); - // let queries = cs.collect_queries(); - - let mut vk = Self { - domain, - fixed_commitments, - permutation, - cs, - // queries, - cs_degree, - // Temporary, this is not pinned. - transcript_repr: C::Scalar::ZERO, - }; - - let mut hasher = Blake2bParams::new() - .hash_length(64) - .personal(b"Halo2-Verify-Key") - .to_state(); - - // let s = format!("{:?}", vk.pinned()); - // TODO(Edu): Is it Ok to not use the pinned Vk here? We removed a lot of stuff from Vk - // and Cs, so maybe we already have the same as in PinnedVerificationKey? - let s = format!("{:?}", vk); - - hasher.update(&(s.len() as u64).to_le_bytes()); - hasher.update(s.as_bytes()); - - // Hash in final Blake2bState - vk.transcript_repr = C::Scalar::from_uniform_bytes(hasher.finalize().as_array()); - // dbg!(&vk.transcript_repr); - - vk - } - - /// Hashes a verification key into a transcript. - pub fn hash_into, T: Transcript>( - &self, - transcript: &mut T, - ) -> io::Result<()> { - transcript.common_scalar(self.transcript_repr)?; - - Ok(()) - } -} - /// This is a verifying key which allows for the verification of proofs for a /// particular circuit. #[derive(Clone, Debug)] @@ -445,36 +373,6 @@ pub struct PinnedVerificationKey<'a, C: CurveAffine> { permutation: &'a permutation::VerifyingKey, } -/// This is a proving key which allows for the creation of proofs for a -/// particular circuit. -#[derive(Clone, Debug)] -pub struct ProvingKeyV2 { - vk: VerifyingKeyV2, - l0: Polynomial, - l_last: Polynomial, - l_active_row: Polynomial, - fixed_values: Vec>, - fixed_polys: Vec>, - fixed_cosets: Vec>, - permutation: permutation::ProvingKey, - ev: Evaluator, -} - -// impl ProvingKeyV2 -// where -// C::Scalar: FromUniformBytes<64>, -// { -// /// Hashes a verification key into a transcript. -// pub fn hash_into, T: Transcript>( -// &self, -// transcript: &mut T, -// ) -> io::Result<()> { -// transcript.common_scalar(self.transcript_repr)?; -// -// Ok(()) -// } -// } - /// This is a proving key which allows for the creation of proofs for a /// particular circuit. #[derive(Clone, Debug)] diff --git a/halo2_proofs/src/plonk/evaluation.rs b/halo2_proofs/src/plonk/evaluation.rs index b865d516b6..e89359fa68 100644 --- a/halo2_proofs/src/plonk/evaluation.rs +++ b/halo2_proofs/src/plonk/evaluation.rs @@ -1,5 +1,5 @@ use crate::multicore; -use crate::plonk::{lookup, permutation, Any, ProvingKey, ProvingKeyV2}; +use crate::plonk::{lookup, permutation, Any, ProvingKey}; use crate::poly::Basis; use crate::{ arithmetic::{parallelize, CurveAffine}, @@ -387,318 +387,6 @@ impl Evaluator { } /// Evaluate h poly - // NOTE: Copy of evaluate_h with ProvingKeyV2 - #[allow(clippy::too_many_arguments)] - pub(in crate::plonk) fn evaluate_h_v2( - &self, - pk: &ProvingKeyV2, - advice_polys: &[&[Polynomial]], - instance_polys: &[&[Polynomial]], - challenges: &[C::ScalarExt], - y: C::ScalarExt, - beta: C::ScalarExt, - gamma: C::ScalarExt, - theta: C::ScalarExt, - lookups: &[Vec>], - shuffles: &[Vec>], - permutations: &[permutation::prover::Committed], - ) -> Polynomial { - let domain = &pk.vk.domain; - let size = domain.extended_len(); - let rot_scale = 1 << (domain.extended_k() - domain.k()); - let fixed = &pk.fixed_cosets[..]; - let extended_omega = domain.get_extended_omega(); - let isize = size as i32; - let one = C::ScalarExt::ONE; - let l0 = &pk.l0; - let l_last = &pk.l_last; - let l_active_row = &pk.l_active_row; - let p = &pk.vk.cs.permutation; - - // Calculate the advice and instance cosets - let advice: Vec>> = advice_polys - .iter() - .map(|advice_polys| { - advice_polys - .iter() - .map(|poly| domain.coeff_to_extended(poly.clone())) - .collect() - }) - .collect(); - let instance: Vec>> = instance_polys - .iter() - .map(|instance_polys| { - instance_polys - .iter() - .map(|poly| domain.coeff_to_extended(poly.clone())) - .collect() - }) - .collect(); - - let mut values = domain.empty_extended(); - - // Core expression evaluations - let num_threads = multicore::current_num_threads(); - for ((((advice, instance), lookups), shuffles), permutation) in advice - .iter() - .zip(instance.iter()) - .zip(lookups.iter()) - .zip(shuffles.iter()) - .zip(permutations.iter()) - { - // Custom gates - multicore::scope(|scope| { - let chunk_size = (size + num_threads - 1) / num_threads; - for (thread_idx, values) in values.chunks_mut(chunk_size).enumerate() { - let start = thread_idx * chunk_size; - scope.spawn(move |_| { - let mut eval_data = self.custom_gates.instance(); - for (i, value) in values.iter_mut().enumerate() { - let idx = start + i; - *value = self.custom_gates.evaluate( - &mut eval_data, - fixed, - advice, - instance, - challenges, - &beta, - &gamma, - &theta, - &y, - value, - idx, - rot_scale, - isize, - ); - } - }); - } - }); - - // Permutations - let sets = &permutation.sets; - if !sets.is_empty() { - let blinding_factors = pk.vk.cs.blinding_factors(); - let last_rotation = Rotation(-((blinding_factors + 1) as i32)); - let chunk_len = pk.vk.cs.degree() - 2; - let delta_start = beta * &C::Scalar::ZETA; - - let first_set = sets.first().unwrap(); - let last_set = sets.last().unwrap(); - - // Permutation constraints - parallelize(&mut values, |values, start| { - let mut beta_term = extended_omega.pow_vartime([start as u64, 0, 0, 0]); - for (i, value) in values.iter_mut().enumerate() { - let idx = start + i; - let r_next = get_rotation_idx(idx, 1, rot_scale, isize); - let r_last = get_rotation_idx(idx, last_rotation.0, rot_scale, isize); - - // Enforce only for the first set. - // l_0(X) * (1 - z_0(X)) = 0 - *value = *value * y - + ((one - first_set.permutation_product_coset[idx]) * l0[idx]); - // Enforce only for the last set. - // l_last(X) * (z_l(X)^2 - z_l(X)) = 0 - *value = *value * y - + ((last_set.permutation_product_coset[idx] - * last_set.permutation_product_coset[idx] - - last_set.permutation_product_coset[idx]) - * l_last[idx]); - // Except for the first set, enforce. - // l_0(X) * (z_i(X) - z_{i-1}(\omega^(last) X)) = 0 - for (set_idx, set) in sets.iter().enumerate() { - if set_idx != 0 { - *value = *value * y - + ((set.permutation_product_coset[idx] - - permutation.sets[set_idx - 1].permutation_product_coset - [r_last]) - * l0[idx]); - } - } - // And for all the sets we enforce: - // (1 - (l_last(X) + l_blind(X))) * ( - // z_i(\omega X) \prod_j (p(X) + \beta s_j(X) + \gamma) - // - z_i(X) \prod_j (p(X) + \delta^j \beta X + \gamma) - // ) - let mut current_delta = delta_start * beta_term; - for ((set, columns), cosets) in sets - .iter() - .zip(p.columns.chunks(chunk_len)) - .zip(pk.permutation.cosets.chunks(chunk_len)) - { - let mut left = set.permutation_product_coset[r_next]; - for (values, permutation) in columns - .iter() - .map(|&column| match column.column_type() { - Any::Advice(_) => &advice[column.index()], - Any::Fixed => &fixed[column.index()], - Any::Instance => &instance[column.index()], - }) - .zip(cosets.iter()) - { - left *= values[idx] + beta * permutation[idx] + gamma; - } - - let mut right = set.permutation_product_coset[idx]; - for values in columns.iter().map(|&column| match column.column_type() { - Any::Advice(_) => &advice[column.index()], - Any::Fixed => &fixed[column.index()], - Any::Instance => &instance[column.index()], - }) { - right *= values[idx] + current_delta + gamma; - current_delta *= &C::Scalar::DELTA; - } - - *value = *value * y + ((left - right) * l_active_row[idx]); - } - beta_term *= &extended_omega; - } - }); - } - - // Lookups - for (n, lookup) in lookups.iter().enumerate() { - // Polynomials required for this lookup. - // Calculated here so these only have to be kept in memory for the short time - // they are actually needed. - let product_coset = pk.vk.domain.coeff_to_extended(lookup.product_poly.clone()); - let permuted_input_coset = pk - .vk - .domain - .coeff_to_extended(lookup.permuted_input_poly.clone()); - let permuted_table_coset = pk - .vk - .domain - .coeff_to_extended(lookup.permuted_table_poly.clone()); - - // Lookup constraints - parallelize(&mut values, |values, start| { - let lookup_evaluator = &self.lookups[n]; - let mut eval_data = lookup_evaluator.instance(); - for (i, value) in values.iter_mut().enumerate() { - let idx = start + i; - - let table_value = lookup_evaluator.evaluate( - &mut eval_data, - fixed, - advice, - instance, - challenges, - &beta, - &gamma, - &theta, - &y, - &C::ScalarExt::ZERO, - idx, - rot_scale, - isize, - ); - - let r_next = get_rotation_idx(idx, 1, rot_scale, isize); - let r_prev = get_rotation_idx(idx, -1, rot_scale, isize); - - let a_minus_s = permuted_input_coset[idx] - permuted_table_coset[idx]; - // l_0(X) * (1 - z(X)) = 0 - *value = *value * y + ((one - product_coset[idx]) * l0[idx]); - // l_last(X) * (z(X)^2 - z(X)) = 0 - *value = *value * y - + ((product_coset[idx] * product_coset[idx] - product_coset[idx]) - * l_last[idx]); - // (1 - (l_last(X) + l_blind(X))) * ( - // z(\omega X) (a'(X) + \beta) (s'(X) + \gamma) - // - z(X) (\theta^{m-1} a_0(X) + ... + a_{m-1}(X) + \beta) - // (\theta^{m-1} s_0(X) + ... + s_{m-1}(X) + \gamma) - // ) = 0 - *value = *value * y - + ((product_coset[r_next] - * (permuted_input_coset[idx] + beta) - * (permuted_table_coset[idx] + gamma) - - product_coset[idx] * table_value) - * l_active_row[idx]); - // Check that the first values in the permuted input expression and permuted - // fixed expression are the same. - // l_0(X) * (a'(X) - s'(X)) = 0 - *value = *value * y + (a_minus_s * l0[idx]); - // Check that each value in the permuted lookup input expression is either - // equal to the value above it, or the value at the same index in the - // permuted table expression. - // (1 - (l_last + l_blind)) * (a′(X) − s′(X))⋅(a′(X) − a′(\omega^{-1} X)) = 0 - *value = *value * y - + (a_minus_s - * (permuted_input_coset[idx] - permuted_input_coset[r_prev]) - * l_active_row[idx]); - } - }); - } - - // Shuffle constraints - for (n, shuffle) in shuffles.iter().enumerate() { - let product_coset = pk.vk.domain.coeff_to_extended(shuffle.product_poly.clone()); - - // Shuffle constraints - parallelize(&mut values, |values, start| { - let input_evaluator = &self.shuffles[2 * n]; - let shuffle_evaluator = &self.shuffles[2 * n + 1]; - let mut eval_data_input = shuffle_evaluator.instance(); - let mut eval_data_shuffle = shuffle_evaluator.instance(); - for (i, value) in values.iter_mut().enumerate() { - let idx = start + i; - - let input_value = input_evaluator.evaluate( - &mut eval_data_input, - fixed, - advice, - instance, - challenges, - &beta, - &gamma, - &theta, - &y, - &C::ScalarExt::ZERO, - idx, - rot_scale, - isize, - ); - - let shuffle_value = shuffle_evaluator.evaluate( - &mut eval_data_shuffle, - fixed, - advice, - instance, - challenges, - &beta, - &gamma, - &theta, - &y, - &C::ScalarExt::ZERO, - idx, - rot_scale, - isize, - ); - - let r_next = get_rotation_idx(idx, 1, rot_scale, isize); - - // l_0(X) * (1 - z(X)) = 0 - *value = *value * y + ((one - product_coset[idx]) * l0[idx]); - // l_last(X) * (z(X)^2 - z(X)) = 0 - *value = *value * y - + ((product_coset[idx] * product_coset[idx] - product_coset[idx]) - * l_last[idx]); - // (1 - (l_last(X) + l_blind(X))) * (z(\omega X) (s(X) + \gamma) - z(X) (a(X) + \gamma)) = 0 - *value = *value * y - + l_active_row[idx] - * (product_coset[r_next] * shuffle_value - - product_coset[idx] * input_value) - } - }); - } - } - values - } - - /// Evaluate h poly - // TODO: Remove #[allow(clippy::too_many_arguments)] pub(in crate::plonk) fn evaluate_h( &self, diff --git a/halo2_proofs/src/plonk/keygen.rs b/halo2_proofs/src/plonk/keygen.rs index b5b7b200fb..6329d83b08 100644 --- a/halo2_proofs/src/plonk/keygen.rs +++ b/halo2_proofs/src/plonk/keygen.rs @@ -11,8 +11,7 @@ use super::{ FloorPlanner, Instance, Selector, }, evaluation::Evaluator, - permutation, Assigned, Challenge, Error, LagrangeCoeff, Polynomial, ProvingKey, ProvingKeyV2, - VerifyingKey, VerifyingKeyV2, + permutation, Assigned, Challenge, Error, LagrangeCoeff, Polynomial, ProvingKey, VerifyingKey, }; use crate::{ arithmetic::{parallelize, CurveAffine}, @@ -207,7 +206,7 @@ impl Assignment for Assembly { pub fn keygen_vk_v2<'params, C, P>( params: &P, circuit: &CompiledCircuitV2, -) -> Result, Error> +) -> Result, Error> where C: CurveAffine, P: Params<'params, C>, @@ -236,11 +235,13 @@ where .map(|poly| params.commit_lagrange(poly, Blind::default()).to_affine()) .collect(); - Ok(VerifyingKeyV2::from_parts( + Ok(VerifyingKey::from_parts( domain, fixed_commitments, permutation_vk, cs, + Vec::new(), + false, )) } @@ -336,9 +337,9 @@ where /// Generate a `ProvingKey` from a `VerifyingKey` and an instance of `CompiledCircuit`. pub fn keygen_pk_v2<'params, C, P>( params: &P, - vk: VerifyingKeyV2, + vk: VerifyingKey, circuit: &CompiledCircuitV2, -) -> Result, Error> +) -> Result, Error> where C: CurveAffine, P: Params<'params, C>, @@ -404,7 +405,7 @@ where // Compute the optimized evaluation data structure let ev = Evaluator::new(&vk.cs); - Ok(ProvingKeyV2 { + Ok(ProvingKey { vk, l0, l_last, diff --git a/halo2_proofs/src/plonk/lookup/prover.rs b/halo2_proofs/src/plonk/lookup/prover.rs index 377773980b..028b298853 100644 --- a/halo2_proofs/src/plonk/lookup/prover.rs +++ b/halo2_proofs/src/plonk/lookup/prover.rs @@ -1,6 +1,6 @@ use super::super::{ circuit::Expression, ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, Error, - ProvingKey, ProvingKeyV2, + ProvingKey, }; use super::Argument; use crate::plonk::evaluation::evaluate; @@ -60,113 +60,6 @@ impl> Argument { /// - constructs Permuted struct using permuted_input_value = A', and /// permuted_table_expression = S'. /// The Permuted struct is used to update the Lookup, and is then returned. - // NOTE: Copy of commit_permuted that uses ProvingKeyV2 - #[allow(clippy::too_many_arguments)] - pub(in crate::plonk) fn commit_permuted_v2< - 'a, - 'params: 'a, - C, - P: Params<'params, C>, - E: EncodedChallenge, - R: RngCore, - T: TranscriptWrite, - >( - &self, - pk: &ProvingKeyV2, - params: &P, - domain: &EvaluationDomain, - theta: ChallengeTheta, - advice_values: &'a [Polynomial], - fixed_values: &'a [Polynomial], - instance_values: &'a [Polynomial], - challenges: &'a [C::Scalar], - mut rng: R, - transcript: &mut T, - ) -> Result, Error> - where - C: CurveAffine, - C::Curve: Mul + MulAssign, - { - // Closure to get values of expressions and compress them - let compress_expressions = |expressions: &[Expression]| { - let compressed_expression = expressions - .iter() - .map(|expression| { - pk.vk.domain.lagrange_from_vec(evaluate( - expression, - params.n() as usize, - 1, - fixed_values, - advice_values, - instance_values, - challenges, - )) - }) - .fold(domain.empty_lagrange(), |acc, expression| { - acc * *theta + &expression - }); - compressed_expression - }; - - // Get values of input expressions involved in the lookup and compress them - let compressed_input_expression = compress_expressions(&self.input_expressions); - - // Get values of table expressions involved in the lookup and compress them - let compressed_table_expression = compress_expressions(&self.table_expressions); - - // Permute compressed (InputExpression, TableExpression) pair - let (permuted_input_expression, permuted_table_expression) = permute_expression_pair_v2( - pk, - params, - domain, - &mut rng, - &compressed_input_expression, - &compressed_table_expression, - )?; - - // Closure to construct commitment to vector of values - let mut commit_values = |values: &Polynomial| { - let poly = pk.vk.domain.lagrange_to_coeff(values.clone()); - let blind = Blind(C::Scalar::random(&mut rng)); - let commitment = params.commit_lagrange(values, blind).to_affine(); - (poly, blind, commitment) - }; - - // Commit to permuted input expression - let (permuted_input_poly, permuted_input_blind, permuted_input_commitment) = - commit_values(&permuted_input_expression); - - // Commit to permuted table expression - let (permuted_table_poly, permuted_table_blind, permuted_table_commitment) = - commit_values(&permuted_table_expression); - - // Hash permuted input commitment - transcript.write_point(permuted_input_commitment)?; - - // Hash permuted table commitment - transcript.write_point(permuted_table_commitment)?; - - Ok(Permuted { - compressed_input_expression, - permuted_input_expression, - permuted_input_poly, - permuted_input_blind, - compressed_table_expression, - permuted_table_expression, - permuted_table_poly, - permuted_table_blind, - }) - } - /// Given a Lookup with input expressions [A_0, A_1, ..., A_{m-1}] and table expressions - /// [S_0, S_1, ..., S_{m-1}], this method - /// - constructs A_compressed = \theta^{m-1} A_0 + theta^{m-2} A_1 + ... + \theta A_{m-2} + A_{m-1} - /// and S_compressed = \theta^{m-1} S_0 + theta^{m-2} S_1 + ... + \theta S_{m-2} + S_{m-1}, - /// - permutes A_compressed and S_compressed using permute_expression_pair() helper, - /// obtaining A' and S', and - /// - constructs Permuted struct using permuted_input_value = A', and - /// permuted_table_expression = S'. - /// The Permuted struct is used to update the Lookup, and is then returned. - // TODO: Remove #[allow(clippy::too_many_arguments)] pub(in crate::plonk) fn commit_permuted< 'a, @@ -266,151 +159,6 @@ impl> Argument { } impl Permuted { - /// Given a Lookup with input expressions, table expressions, and the permuted - /// input expression and permuted table expression, this method constructs the - /// grand product polynomial over the lookup. The grand product polynomial - /// is used to populate the Product struct. The Product struct is - /// added to the Lookup and finally returned by the method. - // NOTE: Copy of commit_permuted with ProvingKeyV2 - pub(in crate::plonk) fn commit_product_v2< - 'params, - P: Params<'params, C>, - E: EncodedChallenge, - R: RngCore, - T: TranscriptWrite, - >( - self, - pk: &ProvingKeyV2, - params: &P, - beta: ChallengeBeta, - gamma: ChallengeGamma, - mut rng: R, - transcript: &mut T, - ) -> Result, Error> { - let blinding_factors = pk.vk.cs.blinding_factors(); - // Goal is to compute the products of fractions - // - // Numerator: (\theta^{m-1} a_0(\omega^i) + \theta^{m-2} a_1(\omega^i) + ... + \theta a_{m-2}(\omega^i) + a_{m-1}(\omega^i) + \beta) - // * (\theta^{m-1} s_0(\omega^i) + \theta^{m-2} s_1(\omega^i) + ... + \theta s_{m-2}(\omega^i) + s_{m-1}(\omega^i) + \gamma) - // Denominator: (a'(\omega^i) + \beta) (s'(\omega^i) + \gamma) - // - // where a_j(X) is the jth input expression in this lookup, - // where a'(X) is the compression of the permuted input expressions, - // s_j(X) is the jth table expression in this lookup, - // s'(X) is the compression of the permuted table expressions, - // and i is the ith row of the expression. - let mut lookup_product = vec![C::Scalar::ZERO; params.n() as usize]; - // Denominator uses the permuted input expression and permuted table expression - parallelize(&mut lookup_product, |lookup_product, start| { - for ((lookup_product, permuted_input_value), permuted_table_value) in lookup_product - .iter_mut() - .zip(self.permuted_input_expression[start..].iter()) - .zip(self.permuted_table_expression[start..].iter()) - { - *lookup_product = (*beta + permuted_input_value) * &(*gamma + permuted_table_value); - } - }); - - // Batch invert to obtain the denominators for the lookup product - // polynomials - lookup_product.iter_mut().batch_invert(); - - // Finish the computation of the entire fraction by computing the numerators - // (\theta^{m-1} a_0(\omega^i) + \theta^{m-2} a_1(\omega^i) + ... + \theta a_{m-2}(\omega^i) + a_{m-1}(\omega^i) + \beta) - // * (\theta^{m-1} s_0(\omega^i) + \theta^{m-2} s_1(\omega^i) + ... + \theta s_{m-2}(\omega^i) + s_{m-1}(\omega^i) + \gamma) - parallelize(&mut lookup_product, |product, start| { - for (i, product) in product.iter_mut().enumerate() { - let i = i + start; - - *product *= &(self.compressed_input_expression[i] + &*beta); - *product *= &(self.compressed_table_expression[i] + &*gamma); - } - }); - - // The product vector is a vector of products of fractions of the form - // - // Numerator: (\theta^{m-1} a_0(\omega^i) + \theta^{m-2} a_1(\omega^i) + ... + \theta a_{m-2}(\omega^i) + a_{m-1}(\omega^i) + \beta) - // * (\theta^{m-1} s_0(\omega^i) + \theta^{m-2} s_1(\omega^i) + ... + \theta s_{m-2}(\omega^i) + s_{m-1}(\omega^i) + \gamma) - // Denominator: (a'(\omega^i) + \beta) (s'(\omega^i) + \gamma) - // - // where there are m input expressions and m table expressions, - // a_j(\omega^i) is the jth input expression in this lookup, - // a'j(\omega^i) is the permuted input expression, - // s_j(\omega^i) is the jth table expression in this lookup, - // s'(\omega^i) is the permuted table expression, - // and i is the ith row of the expression. - - // Compute the evaluations of the lookup product polynomial - // over our domain, starting with z[0] = 1 - let z = iter::once(C::Scalar::ONE) - .chain(lookup_product) - .scan(C::Scalar::ONE, |state, cur| { - *state *= &cur; - Some(*state) - }) - // Take all rows including the "last" row which should - // be a boolean (and ideally 1, else soundness is broken) - .take(params.n() as usize - blinding_factors) - // Chain random blinding factors. - .chain((0..blinding_factors).map(|_| C::Scalar::random(&mut rng))) - .collect::>(); - assert_eq!(z.len(), params.n() as usize); - let z = pk.vk.domain.lagrange_from_vec(z); - - #[cfg(feature = "sanity-checks")] - // This test works only with intermediate representations in this method. - // It can be used for debugging purposes. - { - // While in Lagrange basis, check that product is correctly constructed - let u = (params.n() as usize) - (blinding_factors + 1); - - // l_0(X) * (1 - z(X)) = 0 - assert_eq!(z[0], C::Scalar::ONE); - - // z(\omega X) (a'(X) + \beta) (s'(X) + \gamma) - // - z(X) (\theta^{m-1} a_0(X) + ... + a_{m-1}(X) + \beta) (\theta^{m-1} s_0(X) + ... + s_{m-1}(X) + \gamma) - for i in 0..u { - let mut left = z[i + 1]; - let permuted_input_value = &self.permuted_input_expression[i]; - - let permuted_table_value = &self.permuted_table_expression[i]; - - left *= &(*beta + permuted_input_value); - left *= &(*gamma + permuted_table_value); - - let mut right = z[i]; - let mut input_term = self.compressed_input_expression[i]; - let mut table_term = self.compressed_table_expression[i]; - - input_term += &(*beta); - table_term += &(*gamma); - right *= &(input_term * &table_term); - - assert_eq!(left, right); - } - - // l_last(X) * (z(X)^2 - z(X)) = 0 - // Assertion will fail only when soundness is broken, in which - // case this z[u] value will be zero. (bad!) - assert_eq!(z[u], C::Scalar::ONE); - } - - let product_blind = Blind(C::Scalar::random(rng)); - let product_commitment = params.commit_lagrange(&z, product_blind).to_affine(); - let z = pk.vk.domain.lagrange_to_coeff(z); - - // Hash product commitment - transcript.write_point(product_commitment)?; - - Ok(Committed:: { - permuted_input_poly: self.permuted_input_poly, - permuted_input_blind: self.permuted_input_blind, - permuted_table_poly: self.permuted_table_poly, - permuted_table_blind: self.permuted_table_blind, - product_poly: z, - product_blind, - }) - } /// Given a Lookup with input expressions, table expressions, and the permuted /// input expression and permuted table expression, this method constructs the /// grand product polynomial over the lookup. The grand product polynomial @@ -558,37 +306,6 @@ impl Permuted { } impl Committed { - pub(in crate::plonk) fn evaluate_v2, T: TranscriptWrite>( - self, - pk: &ProvingKeyV2, - x: ChallengeX, - transcript: &mut T, - ) -> Result, Error> { - let domain = &pk.vk.domain; - let x_inv = domain.rotate_omega(*x, Rotation::prev()); - let x_next = domain.rotate_omega(*x, Rotation::next()); - - let product_eval = eval_polynomial(&self.product_poly, *x); - let product_next_eval = eval_polynomial(&self.product_poly, x_next); - let permuted_input_eval = eval_polynomial(&self.permuted_input_poly, *x); - let permuted_input_inv_eval = eval_polynomial(&self.permuted_input_poly, x_inv); - let permuted_table_eval = eval_polynomial(&self.permuted_table_poly, *x); - - // Hash each advice evaluation - for eval in iter::empty() - .chain(Some(product_eval)) - .chain(Some(product_next_eval)) - .chain(Some(permuted_input_eval)) - .chain(Some(permuted_input_inv_eval)) - .chain(Some(permuted_table_eval)) - { - transcript.write_scalar(eval)?; - } - - Ok(Evaluated { constructed: self }) - } - - // TODO: Remove pub(in crate::plonk) fn evaluate, T: TranscriptWrite>( self, pk: &ProvingKey, @@ -621,49 +338,6 @@ impl Committed { } impl Evaluated { - // NOTE: Copy of open with ProvingKeyV2 - pub(in crate::plonk) fn open_v2<'a>( - &'a self, - pk: &'a ProvingKeyV2, - x: ChallengeX, - ) -> impl Iterator> + Clone { - let x_inv = pk.vk.domain.rotate_omega(*x, Rotation::prev()); - let x_next = pk.vk.domain.rotate_omega(*x, Rotation::next()); - - iter::empty() - // Open lookup product commitments at x - .chain(Some(ProverQuery { - point: *x, - poly: &self.constructed.product_poly, - blind: self.constructed.product_blind, - })) - // Open lookup input commitments at x - .chain(Some(ProverQuery { - point: *x, - poly: &self.constructed.permuted_input_poly, - blind: self.constructed.permuted_input_blind, - })) - // Open lookup table commitments at x - .chain(Some(ProverQuery { - point: *x, - poly: &self.constructed.permuted_table_poly, - blind: self.constructed.permuted_table_blind, - })) - // Open lookup input commitments at x_inv - .chain(Some(ProverQuery { - point: x_inv, - poly: &self.constructed.permuted_input_poly, - blind: self.constructed.permuted_input_blind, - })) - // Open lookup product commitments at x_next - .chain(Some(ProverQuery { - point: x_next, - poly: &self.constructed.product_poly, - blind: self.constructed.product_blind, - })) - } - - // TODO: Remove pub(in crate::plonk) fn open<'a>( &'a self, pk: &'a ProvingKey, @@ -714,100 +388,6 @@ type ExpressionPair = (Polynomial, Polynomial, R: RngCore>( - pk: &ProvingKeyV2, - params: &P, - domain: &EvaluationDomain, - mut rng: R, - input_expression: &Polynomial, - table_expression: &Polynomial, -) -> Result, Error> { - let blinding_factors = pk.vk.cs.blinding_factors(); - let usable_rows = params.n() as usize - (blinding_factors + 1); - - let mut permuted_input_expression: Vec = input_expression.to_vec(); - permuted_input_expression.truncate(usable_rows); - - // Sort input lookup expression values - permuted_input_expression.sort(); - - // A BTreeMap of each unique element in the table expression and its count - let mut leftover_table_map: BTreeMap = table_expression - .iter() - .take(usable_rows) - .fold(BTreeMap::new(), |mut acc, coeff| { - *acc.entry(*coeff).or_insert(0) += 1; - acc - }); - let mut permuted_table_coeffs = vec![C::Scalar::ZERO; usable_rows]; - - let mut repeated_input_rows = permuted_input_expression - .iter() - .zip(permuted_table_coeffs.iter_mut()) - .enumerate() - .filter_map(|(row, (input_value, table_value))| { - // If this is the first occurrence of `input_value` in the input expression - if row == 0 || *input_value != permuted_input_expression[row - 1] { - *table_value = *input_value; - // Remove one instance of input_value from leftover_table_map - if let Some(count) = leftover_table_map.get_mut(input_value) { - assert!(*count > 0); - *count -= 1; - None - } else { - // Return error if input_value not found - Some(Err(Error::ConstraintSystemFailure)) - } - // If input value is repeated - } else { - Some(Ok(row)) - } - }) - .collect::, _>>()?; - - // Populate permuted table at unfilled rows with leftover table elements - for (coeff, count) in leftover_table_map.iter() { - for _ in 0..*count { - permuted_table_coeffs[repeated_input_rows.pop().unwrap()] = *coeff; - } - } - assert!(repeated_input_rows.is_empty()); - - permuted_input_expression - .extend((0..(blinding_factors + 1)).map(|_| C::Scalar::random(&mut rng))); - permuted_table_coeffs.extend((0..(blinding_factors + 1)).map(|_| C::Scalar::random(&mut rng))); - assert_eq!(permuted_input_expression.len(), params.n() as usize); - assert_eq!(permuted_table_coeffs.len(), params.n() as usize); - - #[cfg(feature = "sanity-checks")] - { - let mut last = None; - for (a, b) in permuted_input_expression - .iter() - .zip(permuted_table_coeffs.iter()) - .take(usable_rows) - { - if *a != *b { - assert_eq!(*a, last.unwrap()); - } - last = Some(*a); - } - } - - Ok(( - domain.lagrange_from_vec(permuted_input_expression), - domain.lagrange_from_vec(permuted_table_coeffs), - )) -} - -/// Given a vector of input values A and a vector of table values S, -/// this method permutes A and S to produce A' and S', such that: -/// - like values in A' are vertically adjacent to each other; and -/// - the first row in a sequence of like values in A' is the row -/// that has the corresponding value in S'. -/// This method returns (A', S') if no errors are encountered. -// TODO: Remove fn permute_expression_pair<'params, C: CurveAffine, P: Params<'params, C>, R: RngCore>( pk: &ProvingKey, params: &P, diff --git a/halo2_proofs/src/plonk/lookup/verifier.rs b/halo2_proofs/src/plonk/lookup/verifier.rs index 5667a54c5d..bbc86c8e9d 100644 --- a/halo2_proofs/src/plonk/lookup/verifier.rs +++ b/halo2_proofs/src/plonk/lookup/verifier.rs @@ -6,7 +6,7 @@ use super::super::{ use super::Argument; use crate::{ arithmetic::CurveAffine, - plonk::{Error, VerifyingKey, VerifyingKeyV2}, + plonk::{Error, VerifyingKey}, poly::{commitment::MSM, Rotation, VerifierQuery}, transcript::{EncodedChallenge, TranscriptRead}, }; @@ -168,49 +168,6 @@ impl Evaluated { )) } - // NOTE: Copy of queries with VerifyingKeyV2 - pub(in crate::plonk) fn queries_v2<'r, M: MSM + 'r>( - &'r self, - vk: &'r VerifyingKeyV2, - x: ChallengeX, - ) -> impl Iterator> + Clone { - let x_inv = vk.domain.rotate_omega(*x, Rotation::prev()); - let x_next = vk.domain.rotate_omega(*x, Rotation::next()); - - iter::empty() - // Open lookup product commitment at x - .chain(Some(VerifierQuery::new_commitment( - &self.committed.product_commitment, - *x, - self.product_eval, - ))) - // Open lookup input commitments at x - .chain(Some(VerifierQuery::new_commitment( - &self.committed.permuted.permuted_input_commitment, - *x, - self.permuted_input_eval, - ))) - // Open lookup table commitments at x - .chain(Some(VerifierQuery::new_commitment( - &self.committed.permuted.permuted_table_commitment, - *x, - self.permuted_table_eval, - ))) - // Open lookup input commitments at \omega^{-1} x - .chain(Some(VerifierQuery::new_commitment( - &self.committed.permuted.permuted_input_commitment, - x_inv, - self.permuted_input_inv_eval, - ))) - // Open lookup product commitment at \omega x - .chain(Some(VerifierQuery::new_commitment( - &self.committed.product_commitment, - x_next, - self.product_next_eval, - ))) - } - - // TODO: Remove pub(in crate::plonk) fn queries<'r, M: MSM + 'r>( &'r self, vk: &'r VerifyingKey, diff --git a/halo2_proofs/src/plonk/permutation/prover.rs b/halo2_proofs/src/plonk/permutation/prover.rs index 560a047d60..d6b108554d 100644 --- a/halo2_proofs/src/plonk/permutation/prover.rs +++ b/halo2_proofs/src/plonk/permutation/prover.rs @@ -42,157 +42,6 @@ pub(crate) struct Evaluated { } impl Argument { - // NOTE: Copy of commit with ProvingKeyV2 - #[allow(clippy::too_many_arguments)] - pub(in crate::plonk) fn commit_v2< - 'params, - C: CurveAffine, - P: Params<'params, C>, - E: EncodedChallenge, - R: RngCore, - T: TranscriptWrite, - >( - &self, - params: &P, - pk: &plonk::ProvingKeyV2, - pkey: &ProvingKey, - advice: &[Polynomial], - fixed: &[Polynomial], - instance: &[Polynomial], - beta: ChallengeBeta, - gamma: ChallengeGamma, - mut rng: R, - transcript: &mut T, - ) -> Result, Error> { - let domain = &pk.vk.domain; - - // How many columns can be included in a single permutation polynomial? - // We need to multiply by z(X) and (1 - (l_last(X) + l_blind(X))). This - // will never underflow because of the requirement of at least a degree - // 3 circuit for the permutation argument. - assert!(pk.vk.cs_degree >= 3); - let chunk_len = pk.vk.cs_degree - 2; - let blinding_factors = pk.vk.cs.blinding_factors(); - - // Each column gets its own delta power. - let mut deltaomega = C::Scalar::ONE; - - // Track the "last" value from the previous column set - let mut last_z = C::Scalar::ONE; - - let mut sets = vec![]; - - for (columns, permutations) in self - .columns - .chunks(chunk_len) - .zip(pkey.permutations.chunks(chunk_len)) - { - // Goal is to compute the products of fractions - // - // (p_j(\omega^i) + \delta^j \omega^i \beta + \gamma) / - // (p_j(\omega^i) + \beta s_j(\omega^i) + \gamma) - // - // where p_j(X) is the jth column in this permutation, - // and i is the ith row of the column. - - let mut modified_values = vec![C::Scalar::ONE; params.n() as usize]; - - // Iterate over each column of the permutation - for (&column, permuted_column_values) in columns.iter().zip(permutations.iter()) { - let values = match column.column_type() { - Any::Advice(_) => advice, - Any::Fixed => fixed, - Any::Instance => instance, - }; - parallelize(&mut modified_values, |modified_values, start| { - for ((modified_values, value), permuted_value) in modified_values - .iter_mut() - .zip(values[column.index()][start..].iter()) - .zip(permuted_column_values[start..].iter()) - { - *modified_values *= &(*beta * permuted_value + &*gamma + value); - } - }); - } - - // Invert to obtain the denominator for the permutation product polynomial - modified_values.batch_invert(); - - // Iterate over each column again, this time finishing the computation - // of the entire fraction by computing the numerators - for &column in columns.iter() { - let omega = domain.get_omega(); - let values = match column.column_type() { - Any::Advice(_) => advice, - Any::Fixed => fixed, - Any::Instance => instance, - }; - parallelize(&mut modified_values, |modified_values, start| { - let mut deltaomega = deltaomega * &omega.pow_vartime([start as u64, 0, 0, 0]); - for (modified_values, value) in modified_values - .iter_mut() - .zip(values[column.index()][start..].iter()) - { - // Multiply by p_j(\omega^i) + \delta^j \omega^i \beta - *modified_values *= &(deltaomega * &*beta + &*gamma + value); - deltaomega *= ω - } - }); - deltaomega *= &::DELTA; - } - - // The modified_values vector is a vector of products of fractions - // of the form - // - // (p_j(\omega^i) + \delta^j \omega^i \beta + \gamma) / - // (p_j(\omega^i) + \beta s_j(\omega^i) + \gamma) - // - // where i is the index into modified_values, for the jth column in - // the permutation - - // Compute the evaluations of the permutation product polynomial - // over our domain, starting with z[0] = 1 - let mut z = vec![last_z]; - for row in 1..(params.n() as usize) { - let mut tmp = z[row - 1]; - - tmp *= &modified_values[row - 1]; - z.push(tmp); - } - let mut z = domain.lagrange_from_vec(z); - // Set blinding factors - for z in &mut z[params.n() as usize - blinding_factors..] { - *z = C::Scalar::random(&mut rng); - } - // Set new last_z - last_z = z[params.n() as usize - (blinding_factors + 1)]; - - let blind = Blind(C::Scalar::random(&mut rng)); - - let permutation_product_commitment_projective = params.commit_lagrange(&z, blind); - let permutation_product_blind = blind; - let z = domain.lagrange_to_coeff(z); - let permutation_product_poly = z.clone(); - - let permutation_product_coset = domain.coeff_to_extended(z.clone()); - - let permutation_product_commitment = - permutation_product_commitment_projective.to_affine(); - - // Hash the permutation product commitment - transcript.write_point(permutation_product_commitment)?; - - sets.push(CommittedSet { - permutation_product_poly, - permutation_product_coset, - permutation_product_blind, - }); - } - - Ok(Committed { sets }) - } - - // TODO: Remove #[allow(clippy::too_many_arguments)] pub(in crate::plonk) fn commit< 'params, @@ -385,53 +234,6 @@ impl super::ProvingKey { } impl Constructed { - // NOTE: Copy of evaluate with ProvingKeyV2 - pub(in crate::plonk) fn evaluate_v2, T: TranscriptWrite>( - self, - pk: &plonk::ProvingKeyV2, - x: ChallengeX, - transcript: &mut T, - ) -> Result, Error> { - let domain = &pk.vk.domain; - let blinding_factors = pk.vk.cs.blinding_factors(); - - { - let mut sets = self.sets.iter(); - - while let Some(set) = sets.next() { - let permutation_product_eval = eval_polynomial(&set.permutation_product_poly, *x); - - let permutation_product_next_eval = eval_polynomial( - &set.permutation_product_poly, - domain.rotate_omega(*x, Rotation::next()), - ); - - // Hash permutation product evals - for eval in iter::empty() - .chain(Some(&permutation_product_eval)) - .chain(Some(&permutation_product_next_eval)) - { - transcript.write_scalar(*eval)?; - } - - // If we have any remaining sets to process, evaluate this set at omega^u - // so we can constrain the last value of its running product to equal the - // first value of the next set's running product, chaining them together. - if sets.len() > 0 { - let permutation_product_last_eval = eval_polynomial( - &set.permutation_product_poly, - domain.rotate_omega(*x, Rotation(-((blinding_factors + 1) as i32))), - ); - - transcript.write_scalar(permutation_product_last_eval)?; - } - } - } - - Ok(Evaluated { constructed: self }) - } - - // TODO: Remove pub(in crate::plonk) fn evaluate, T: TranscriptWrite>( self, pk: &plonk::ProvingKey, @@ -479,54 +281,6 @@ impl Constructed { } impl Evaluated { - // NOTE: Copy of open with ProvingKeyV2 - pub(in crate::plonk) fn open_v2<'a>( - &'a self, - pk: &'a plonk::ProvingKeyV2, - x: ChallengeX, - ) -> impl Iterator> + Clone { - let blinding_factors = pk.vk.cs.blinding_factors(); - let x_next = pk.vk.domain.rotate_omega(*x, Rotation::next()); - let x_last = pk - .vk - .domain - .rotate_omega(*x, Rotation(-((blinding_factors + 1) as i32))); - - iter::empty() - .chain(self.constructed.sets.iter().flat_map(move |set| { - iter::empty() - // Open permutation product commitments at x and \omega x - .chain(Some(ProverQuery { - point: *x, - poly: &set.permutation_product_poly, - blind: set.permutation_product_blind, - })) - .chain(Some(ProverQuery { - point: x_next, - poly: &set.permutation_product_poly, - blind: set.permutation_product_blind, - })) - })) - // Open it at \omega^{last} x for all but the last set. This rotation is only - // sensical for the first row, but we only use this rotation in a constraint - // that is gated on l_0. - .chain( - self.constructed - .sets - .iter() - .rev() - .skip(1) - .flat_map(move |set| { - Some(ProverQuery { - point: x_last, - poly: &set.permutation_product_poly, - blind: set.permutation_product_blind, - }) - }), - ) - } - - // TODO: Remove pub(in crate::plonk) fn open<'a>( &'a self, pk: &'a plonk::ProvingKey, diff --git a/halo2_proofs/src/plonk/permutation/verifier.rs b/halo2_proofs/src/plonk/permutation/verifier.rs index ac2f944298..a4637422ae 100644 --- a/halo2_proofs/src/plonk/permutation/verifier.rs +++ b/halo2_proofs/src/plonk/permutation/verifier.rs @@ -30,29 +30,6 @@ pub struct Evaluated { } impl Argument { - pub(crate) fn read_product_commitments_v2< - C: CurveAffine, - E: EncodedChallenge, - T: TranscriptRead, - >( - &self, - vk: &plonk::VerifyingKeyV2, - transcript: &mut T, - ) -> Result, Error> { - let chunk_len = vk.cs_degree - 2; - - let permutation_product_commitments = self - .columns - .chunks(chunk_len) - .map(|_| transcript.read_point()) - .collect::, _>>()?; - - Ok(Committed { - permutation_product_commitments, - }) - } - - // TODO: Remove pub(crate) fn read_product_commitments< C: CurveAffine, E: EncodedChallenge, @@ -122,110 +99,6 @@ impl Committed { } impl Evaluated { - // NOTE: Copy of expressions with VerifyingKeyV2 - #[allow(clippy::too_many_arguments)] - pub(in crate::plonk) fn expressions_v2<'a>( - &'a self, - vk: &'a plonk::VerifyingKeyV2, - p: &'a Argument, - common: &'a CommonEvaluated, - advice_evals: &'a [C::Scalar], - fixed_evals: &'a [C::Scalar], - instance_evals: &'a [C::Scalar], - l_0: C::Scalar, - l_last: C::Scalar, - l_blind: C::Scalar, - beta: ChallengeBeta, - gamma: ChallengeGamma, - x: ChallengeX, - ) -> impl Iterator + 'a { - let chunk_len = vk.cs_degree - 2; - iter::empty() - // Enforce only for the first set. - // l_0(X) * (1 - z_0(X)) = 0 - .chain( - self.sets - .first() - .map(|first_set| l_0 * &(C::Scalar::ONE - &first_set.permutation_product_eval)), - ) - // Enforce only for the last set. - // l_last(X) * (z_l(X)^2 - z_l(X)) = 0 - .chain(self.sets.last().map(|last_set| { - (last_set.permutation_product_eval.square() - &last_set.permutation_product_eval) - * &l_last - })) - // Except for the first set, enforce. - // l_0(X) * (z_i(X) - z_{i-1}(\omega^(last) X)) = 0 - .chain( - self.sets - .iter() - .skip(1) - .zip(self.sets.iter()) - .map(|(set, last_set)| { - ( - set.permutation_product_eval, - last_set.permutation_product_last_eval.unwrap(), - ) - }) - .map(move |(set, prev_last)| (set - &prev_last) * &l_0), - ) - // And for all the sets we enforce: - // (1 - (l_last(X) + l_blind(X))) * ( - // z_i(\omega X) \prod (p(X) + \beta s_i(X) + \gamma) - // - z_i(X) \prod (p(X) + \delta^i \beta X + \gamma) - // ) - .chain( - self.sets - .iter() - .zip(p.columns.chunks(chunk_len)) - .zip(common.permutation_evals.chunks(chunk_len)) - .enumerate() - .map(move |(chunk_index, ((set, columns), permutation_evals))| { - let mut left = set.permutation_product_next_eval; - for (eval, permutation_eval) in columns - .iter() - .map(|&column| match column.column_type() { - Any::Advice(_) => { - advice_evals[vk.cs.get_any_query_index(column, Rotation::cur())] - } - Any::Fixed => { - fixed_evals[vk.cs.get_any_query_index(column, Rotation::cur())] - } - Any::Instance => { - instance_evals - [vk.cs.get_any_query_index(column, Rotation::cur())] - } - }) - .zip(permutation_evals.iter()) - { - left *= &(eval + &(*beta * permutation_eval) + &*gamma); - } - - let mut right = set.permutation_product_eval; - let mut current_delta = (*beta * &*x) - * &(::DELTA - .pow_vartime([(chunk_index * chunk_len) as u64])); - for eval in columns.iter().map(|&column| match column.column_type() { - Any::Advice(_) => { - advice_evals[vk.cs.get_any_query_index(column, Rotation::cur())] - } - Any::Fixed => { - fixed_evals[vk.cs.get_any_query_index(column, Rotation::cur())] - } - Any::Instance => { - instance_evals[vk.cs.get_any_query_index(column, Rotation::cur())] - } - }) { - right *= &(eval + ¤t_delta + &*gamma); - current_delta *= &C::Scalar::DELTA; - } - - (left - &right) * (C::Scalar::ONE - &(l_last + &l_blind)) - }), - ) - } - - // TODO: Remove #[allow(clippy::too_many_arguments)] pub(in crate::plonk) fn expressions<'a>( &'a self, @@ -328,45 +201,6 @@ impl Evaluated { ) } - // NOTE: Copy of queries with VerifyingKeyV2 - pub(in crate::plonk) fn queries_v2<'r, M: MSM + 'r>( - &'r self, - vk: &'r plonk::VerifyingKeyV2, - x: ChallengeX, - ) -> impl Iterator> + Clone { - let blinding_factors = vk.cs.blinding_factors(); - let x_next = vk.domain.rotate_omega(*x, Rotation::next()); - let x_last = vk - .domain - .rotate_omega(*x, Rotation(-((blinding_factors + 1) as i32))); - - iter::empty() - .chain(self.sets.iter().flat_map(move |set| { - iter::empty() - // Open permutation product commitments at x and \omega^{-1} x - // Open permutation product commitments at x and \omega x - .chain(Some(VerifierQuery::new_commitment( - &set.permutation_product_commitment, - *x, - set.permutation_product_eval, - ))) - .chain(Some(VerifierQuery::new_commitment( - &set.permutation_product_commitment, - x_next, - set.permutation_product_next_eval, - ))) - })) - // Open it at \omega^{last} x for all but the last set - .chain(self.sets.iter().rev().skip(1).flat_map(move |set| { - Some(VerifierQuery::new_commitment( - &set.permutation_product_commitment, - x_last, - set.permutation_product_last_eval.unwrap(), - )) - })) - } - - // TODO: Remove pub(in crate::plonk) fn queries<'r, M: MSM + 'r>( &'r self, vk: &'r plonk::VerifyingKey, diff --git a/halo2_proofs/src/plonk/prover.rs b/halo2_proofs/src/plonk/prover.rs index 2c17f83e49..95cbdef5dc 100644 --- a/halo2_proofs/src/plonk/prover.rs +++ b/halo2_proofs/src/plonk/prover.rs @@ -12,7 +12,7 @@ use super::{ Instance, Selector, }, lookup, permutation, shuffle, vanishing, ChallengeBeta, ChallengeGamma, ChallengeTheta, - ChallengeX, ChallengeY, Error, ProvingKey, ProvingKeyV2, + ChallengeX, ChallengeY, Error, ProvingKey, }; use crate::{ @@ -57,7 +57,7 @@ pub struct ProverV2< > { // Circuit and setup fields params: &'params Scheme::ParamsProver, - pk: &'a ProvingKeyV2, + pk: &'a ProvingKey, // advice_queries: Vec<(Column, Rotation)>, // instance_queries: Vec<(Column, Rotation)>, // fixed_queries: Vec<(Column, Rotation)>, @@ -85,7 +85,7 @@ impl< /// Create a new prover object pub fn new( params: &'params Scheme::ParamsProver, - pk: &'a ProvingKeyV2, + pk: &'a ProvingKey, // TODO: If this was a vector the usage would be simpler instances: &[&[&[Scheme::Scalar]]], rng: R, @@ -384,7 +384,7 @@ impl< meta.lookups .iter() .map(|lookup| { - lookup.commit_permuted_v2( + lookup.commit_permuted( pk, params, &domain, @@ -419,7 +419,7 @@ impl< .iter() .zip(advice.iter()) .map(|(instance, advice)| { - meta.permutation.commit_v2( + meta.permutation.commit( params, pk, &pk.permutation, @@ -441,7 +441,7 @@ impl< lookups .into_iter() .map(|lookup| { - lookup.commit_product_v2(pk, params, beta, gamma, &mut rng, &mut transcript) + lookup.commit_product(pk, params, beta, gamma, &mut rng, &mut transcript) }) .collect::, _>>() }) @@ -455,7 +455,7 @@ impl< meta.shuffles .iter() .map(|shuffle| { - shuffle.commit_product_v2( + shuffle.commit_product( pk, params, domain, @@ -499,7 +499,7 @@ impl< .collect(); // Evaluate the h(X) polynomial - let h_poly = pk.ev.evaluate_h_v2( + let h_poly = pk.ev.evaluate_h( pk, &advice .iter() @@ -591,7 +591,7 @@ impl< let permutations: Vec> = permutations .into_iter() .map(|permutation| -> Result<_, _> { - permutation.construct().evaluate_v2(pk, x, &mut transcript) + permutation.construct().evaluate(pk, x, &mut transcript) }) .collect::, _>>()?; @@ -601,7 +601,7 @@ impl< .map(|lookups| -> Result, _> { lookups .into_iter() - .map(|p| p.evaluate_v2(pk, x, &mut transcript)) + .map(|p| p.evaluate(pk, x, &mut transcript)) .collect::, _>>() }) .collect::, _>>()?; @@ -612,7 +612,7 @@ impl< .map(|shuffles| -> Result, _> { shuffles .into_iter() - .map(|p| p.evaluate_v2(pk, x, &mut transcript)) + .map(|p| p.evaluate(pk, x, &mut transcript)) .collect::, _>>() }) .collect::, _>>()?; @@ -646,9 +646,9 @@ impl< blind: advice.advice_blinds[column.index()], }), ) - .chain(permutation.open_v2(pk, x)) - .chain(lookups.iter().flat_map(move |p| p.open_v2(pk, x))) - .chain(shuffles.iter().flat_map(move |p| p.open_v2(pk, x))) + .chain(permutation.open(pk, x)) + .chain(lookups.iter().flat_map(move |p| p.open(pk, x))) + .chain(shuffles.iter().flat_map(move |p| p.open(pk, x))) }) .chain(meta.fixed_queries.iter().map(|&(column, at)| ProverQuery { point: domain.rotate_omega(*x, at), diff --git a/halo2_proofs/src/plonk/shuffle/prover.rs b/halo2_proofs/src/plonk/shuffle/prover.rs index 59ccef29f3..fd30436a47 100644 --- a/halo2_proofs/src/plonk/shuffle/prover.rs +++ b/halo2_proofs/src/plonk/shuffle/prover.rs @@ -1,6 +1,5 @@ use super::super::{ circuit::Expression, ChallengeGamma, ChallengeTheta, ChallengeX, Error, ProvingKey, - ProvingKeyV2, }; use super::Argument; use crate::plonk::evaluation::evaluate; @@ -41,61 +40,6 @@ impl> Argument { /// [S_0, S_1, ..., S_{m-1}], this method /// - constructs A_compressed = \theta^{m-1} A_0 + theta^{m-2} A_1 + ... + \theta A_{m-2} + A_{m-1} /// and S_compressed = \theta^{m-1} S_0 + theta^{m-2} S_1 + ... + \theta S_{m-2} + S_{m-1}, - // NOTE: Copy of compress with ProvingKeyV2 - #[allow(clippy::too_many_arguments)] - fn compress_v2<'a, 'params: 'a, C, P: Params<'params, C>>( - &self, - pk: &ProvingKeyV2, - params: &P, - domain: &EvaluationDomain, - theta: ChallengeTheta, - advice_values: &'a [Polynomial], - fixed_values: &'a [Polynomial], - instance_values: &'a [Polynomial], - challenges: &'a [C::Scalar], - ) -> Compressed - where - C: CurveAffine, - C::Curve: Mul + MulAssign, - { - // Closure to get values of expressions and compress them - let compress_expressions = |expressions: &[Expression]| { - let compressed_expression = expressions - .iter() - .map(|expression| { - pk.vk.domain.lagrange_from_vec(evaluate( - expression, - params.n() as usize, - 1, - fixed_values, - advice_values, - instance_values, - challenges, - )) - }) - .fold(domain.empty_lagrange(), |acc, expression| { - acc * *theta + &expression - }); - compressed_expression - }; - - // Get values of input expressions involved in the shuffle and compress them - let input_expression = compress_expressions(&self.input_expressions); - - // Get values of table expressions involved in the shuffle and compress them - let shuffle_expression = compress_expressions(&self.shuffle_expressions); - - Compressed { - input_expression, - shuffle_expression, - } - } - - /// Given a Shuffle with input expressions [A_0, A_1, ..., A_{m-1}] and table expressions - /// [S_0, S_1, ..., S_{m-1}], this method - /// - constructs A_compressed = \theta^{m-1} A_0 + theta^{m-2} A_1 + ... + \theta A_{m-2} + A_{m-1} - /// and S_compressed = \theta^{m-1} S_0 + theta^{m-2} S_1 + ... + \theta S_{m-2} + S_{m-1}, - // TODO: Remove #[allow(clippy::too_many_arguments)] fn compress<'a, 'params: 'a, C, P: Params<'params, C>>( &self, @@ -149,118 +93,6 @@ impl> Argument { /// constructs the grand product polynomial over the shuffle. /// The grand product polynomial is used to populate the Product struct. /// The Product struct is added to the Shuffle and finally returned by the method. - // NOTE: Copy of commit_product with ProvingKeyV2 - #[allow(clippy::too_many_arguments)] - pub(in crate::plonk) fn commit_product_v2< - 'a, - 'params: 'a, - C, - P: Params<'params, C>, - E: EncodedChallenge, - R: RngCore, - T: TranscriptWrite, - >( - &self, - pk: &ProvingKeyV2, - params: &P, - domain: &EvaluationDomain, - theta: ChallengeTheta, - gamma: ChallengeGamma, - advice_values: &'a [Polynomial], - fixed_values: &'a [Polynomial], - instance_values: &'a [Polynomial], - challenges: &'a [C::Scalar], - mut rng: R, - transcript: &mut T, - ) -> Result, Error> - where - C: CurveAffine, - C::Curve: Mul + MulAssign, - { - let compressed = self.compress_v2( - pk, - params, - domain, - theta, - advice_values, - fixed_values, - instance_values, - challenges, - ); - - let blinding_factors = pk.vk.cs.blinding_factors(); - - let mut shuffle_product = vec![C::Scalar::ZERO; params.n() as usize]; - parallelize(&mut shuffle_product, |shuffle_product, start| { - for (shuffle_product, shuffle_value) in shuffle_product - .iter_mut() - .zip(compressed.shuffle_expression[start..].iter()) - { - *shuffle_product = *gamma + shuffle_value; - } - }); - - shuffle_product.iter_mut().batch_invert(); - - parallelize(&mut shuffle_product, |product, start| { - for (i, product) in product.iter_mut().enumerate() { - let i = i + start; - *product *= &(*gamma + compressed.input_expression[i]); - } - }); - - // Compute the evaluations of the shuffle product polynomial - // over our domain, starting with z[0] = 1 - let z = iter::once(C::Scalar::ONE) - .chain(shuffle_product) - .scan(C::Scalar::ONE, |state, cur| { - *state *= &cur; - Some(*state) - }) - // Take all rows including the "last" row which should - // be a boolean (and ideally 1, else soundness is broken) - .take(params.n() as usize - blinding_factors) - // Chain random blinding factors. - .chain((0..blinding_factors).map(|_| C::Scalar::random(&mut rng))) - .collect::>(); - assert_eq!(z.len(), params.n() as usize); - let z = pk.vk.domain.lagrange_from_vec(z); - - #[cfg(feature = "sanity-checks")] - { - // While in Lagrange basis, check that product is correctly constructed - let u = (params.n() as usize) - (blinding_factors + 1); - assert_eq!(z[0], C::Scalar::ONE); - for i in 0..u { - let mut left = z[i + 1]; - let input_value = &compressed.input_expression[i]; - let shuffle_value = &compressed.shuffle_expression[i]; - left *= &(*gamma + shuffle_value); - let mut right = z[i]; - right *= &(*gamma + input_value); - assert_eq!(left, right); - } - assert_eq!(z[u], C::Scalar::ONE); - } - - let product_blind = Blind(C::Scalar::random(rng)); - let product_commitment = params.commit_lagrange(&z, product_blind).to_affine(); - let z = pk.vk.domain.lagrange_to_coeff(z); - - // Hash product commitment - transcript.write_point(product_commitment)?; - - Ok(Committed:: { - product_poly: z, - product_blind, - }) - } - - /// Given a Shuffle with input expressions and table expressions this method - /// constructs the grand product polynomial over the shuffle. - /// The grand product polynomial is used to populate the Product struct. - /// The Product struct is added to the Shuffle and finally returned by the method. - // TODO: Remove #[allow(clippy::too_many_arguments)] pub(in crate::plonk) fn commit_product< 'a, @@ -369,31 +201,6 @@ impl> Argument { } impl Committed { - // NOTE: Copy of evaluate with ProvingKeyV2 - pub(in crate::plonk) fn evaluate_v2, T: TranscriptWrite>( - self, - pk: &ProvingKeyV2, - x: ChallengeX, - transcript: &mut T, - ) -> Result, Error> { - let domain = &pk.vk.domain; - let x_next = domain.rotate_omega(*x, Rotation::next()); - - let product_eval = eval_polynomial(&self.product_poly, *x); - let product_next_eval = eval_polynomial(&self.product_poly, x_next); - - // Hash each advice evaluation - for eval in iter::empty() - .chain(Some(product_eval)) - .chain(Some(product_next_eval)) - { - transcript.write_scalar(eval)?; - } - - Ok(Evaluated { constructed: self }) - } - - // TODO: Remove pub(in crate::plonk) fn evaluate, T: TranscriptWrite>( self, pk: &ProvingKey, @@ -419,30 +226,6 @@ impl Committed { } impl Evaluated { - // NOTE: Copy of open with ProvingKeyV2 - pub(in crate::plonk) fn open_v2<'a>( - &'a self, - pk: &'a ProvingKeyV2, - x: ChallengeX, - ) -> impl Iterator> + Clone { - let x_next = pk.vk.domain.rotate_omega(*x, Rotation::next()); - - iter::empty() - // Open shuffle product commitments at x - .chain(Some(ProverQuery { - point: *x, - poly: &self.constructed.product_poly, - blind: self.constructed.product_blind, - })) - // Open shuffle product commitments at x_next - .chain(Some(ProverQuery { - point: x_next, - poly: &self.constructed.product_poly, - blind: self.constructed.product_blind, - })) - } - - // TODO: Remove pub(in crate::plonk) fn open<'a>( &'a self, pk: &'a ProvingKey, diff --git a/halo2_proofs/src/plonk/shuffle/verifier.rs b/halo2_proofs/src/plonk/shuffle/verifier.rs index 2f77b52d1d..379cc5c8a1 100644 --- a/halo2_proofs/src/plonk/shuffle/verifier.rs +++ b/halo2_proofs/src/plonk/shuffle/verifier.rs @@ -4,7 +4,7 @@ use super::super::{circuit::Expression, ChallengeGamma, ChallengeTheta, Challeng use super::Argument; use crate::{ arithmetic::CurveAffine, - plonk::{Error, VerifyingKey, VerifyingKeyV2}, + plonk::{Error, VerifyingKey}, poly::{commitment::MSM, Rotation, VerifierQuery}, transcript::{EncodedChallenge, TranscriptRead}, }; @@ -114,30 +114,6 @@ impl Evaluated { ) } - // NOTE: Copy of queries with VerifyingKeyV2 - pub(in crate::plonk) fn queries_v2<'r, M: MSM + 'r>( - &'r self, - vk: &'r VerifyingKeyV2, - x: ChallengeX, - ) -> impl Iterator> + Clone { - let x_next = vk.domain.rotate_omega(*x, Rotation::next()); - - iter::empty() - // Open shuffle product commitment at x - .chain(Some(VerifierQuery::new_commitment( - &self.committed.product_commitment, - *x, - self.product_eval, - ))) - // Open shuffle product commitment at \omega x - .chain(Some(VerifierQuery::new_commitment( - &self.committed.product_commitment, - x_next, - self.product_next_eval, - ))) - } - - // TODO: Remove pub(in crate::plonk) fn queries<'r, M: MSM + 'r>( &'r self, vk: &'r VerifyingKey, diff --git a/halo2_proofs/src/plonk/vanishing/verifier.rs b/halo2_proofs/src/plonk/vanishing/verifier.rs index a179336e0d..0881dfb2c0 100644 --- a/halo2_proofs/src/plonk/vanishing/verifier.rs +++ b/halo2_proofs/src/plonk/vanishing/verifier.rs @@ -4,7 +4,7 @@ use ff::Field; use crate::{ arithmetic::CurveAffine, - plonk::{Error, VerifyingKey, VerifyingKeyV2}, + plonk::{Error, VerifyingKey}, poly::{ commitment::{Params, MSM}, VerifierQuery, @@ -53,22 +53,6 @@ impl Argument { } impl Committed { - pub(in crate::plonk) fn read_commitments_after_y_v2< - E: EncodedChallenge, - T: TranscriptRead, - >( - self, - vk: &VerifyingKeyV2, - transcript: &mut T, - ) -> Result, Error> { - // Obtain a commitment to h(X) in the form of multiple pieces of degree n - 1 - let h_commitments = read_n_points(transcript, vk.domain.get_quotient_poly_degree())?; - - Ok(Constructed { - h_commitments, - random_poly_commitment: self.random_poly_commitment, - }) - } pub(in crate::plonk) fn read_commitments_after_y< E: EncodedChallenge, T: TranscriptRead, diff --git a/halo2_proofs/src/plonk/verifier.rs b/halo2_proofs/src/plonk/verifier.rs index ace4ff31d2..531c43355a 100644 --- a/halo2_proofs/src/plonk/verifier.rs +++ b/halo2_proofs/src/plonk/verifier.rs @@ -4,7 +4,7 @@ use std::iter; use super::{ vanishing, ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, ChallengeY, Error, - VerifyingKey, VerifyingKeyV2, + VerifyingKey, }; use crate::arithmetic::compute_inner_product; use crate::poly::commitment::{CommitmentScheme, Verifier}; @@ -20,435 +20,6 @@ mod batch; #[cfg(feature = "batch")] pub use batch::BatchVerifier; -/// Returns a boolean indicating whether or not the proof is valid -pub fn verify_proof_v2< - 'params, - Scheme: CommitmentScheme, - V: Verifier<'params, Scheme>, - E: EncodedChallenge, - T: TranscriptRead, - Strategy: VerificationStrategy<'params, Scheme, V>, ->( - params: &'params Scheme::ParamsVerifier, - vk: &VerifyingKeyV2, - strategy: Strategy, - instances: &[&[&[Scheme::Scalar]]], - transcript: &mut T, -) -> Result -where - Scheme::Scalar: WithSmallOrderMulGroup<3> + FromUniformBytes<64>, -{ - // println!("DBG verify vk.cs.advice_queriess {:?}", vk.cs.advice_queries); - // Check that instances matches the expected number of instance columns - for instances in instances.iter() { - if instances.len() != vk.cs.num_instance_columns { - return Err(Error::InvalidInstances); - } - } - - let instance_commitments = if V::QUERY_INSTANCE { - instances - .iter() - .map(|instance| { - instance - .iter() - .map(|instance| { - if instance.len() > params.n() as usize - (vk.cs.blinding_factors() + 1) { - return Err(Error::InstanceTooLarge); - } - let mut poly = instance.to_vec(); - poly.resize(params.n() as usize, Scheme::Scalar::ZERO); - let poly = vk.domain.lagrange_from_vec(poly); - - Ok(params.commit_lagrange(&poly, Blind::default()).to_affine()) - }) - .collect::, _>>() - }) - .collect::, _>>()? - } else { - vec![vec![]; instances.len()] - }; - - let num_proofs = instance_commitments.len(); - - // Hash verification key into transcript - vk.hash_into(transcript)?; - - if V::QUERY_INSTANCE { - for instance_commitments in instance_commitments.iter() { - // Hash the instance (external) commitments into the transcript - for commitment in instance_commitments { - // dbg!(2, commitment); - transcript.common_point(*commitment)? - } - } - } else { - for instance in instances.iter() { - for instance in instance.iter() { - for value in instance.iter() { - // dbg!(1, value); - transcript.common_scalar(*value)?; - } - } - } - } - - // Hash the prover's advice commitments into the transcript and squeeze challenges - let (advice_commitments, challenges) = { - let mut advice_commitments = - vec![vec![Scheme::Curve::default(); vk.cs.num_advice_columns]; num_proofs]; - let mut challenges = vec![Scheme::Scalar::ZERO; vk.cs.num_challenges]; - - for current_phase in vk.cs.phases() { - for advice_commitments in advice_commitments.iter_mut() { - for (phase, commitment) in vk - .cs - .advice_column_phase - .iter() - .zip(advice_commitments.iter_mut()) - { - if current_phase == *phase { - *commitment = transcript.read_point()?; - } - } - } - for (phase, challenge) in vk.cs.challenge_phase.iter().zip(challenges.iter_mut()) { - if current_phase == *phase { - *challenge = *transcript.squeeze_challenge_scalar::<()>(); - } - } - } - - (advice_commitments, challenges) - }; - - // Sample theta challenge for keeping lookup columns linearly independent - let theta: ChallengeTheta<_> = transcript.squeeze_challenge_scalar(); - - let lookups_permuted = (0..num_proofs) - .map(|_| -> Result, _> { - // Hash each lookup permuted commitment - vk.cs - .lookups - .iter() - .map(|argument| argument.read_permuted_commitments(transcript)) - .collect::, _>>() - }) - .collect::, _>>()?; - - // Sample beta challenge - let beta: ChallengeBeta<_> = transcript.squeeze_challenge_scalar(); - - // Sample gamma challenge - let gamma: ChallengeGamma<_> = transcript.squeeze_challenge_scalar(); - - let permutations_committed = (0..num_proofs) - .map(|_| { - // Hash each permutation product commitment - vk.cs - .permutation - .read_product_commitments_v2(vk, transcript) - }) - .collect::, _>>()?; - - let lookups_committed = lookups_permuted - .into_iter() - .map(|lookups| { - // Hash each lookup product commitment - lookups - .into_iter() - .map(|lookup| lookup.read_product_commitment(transcript)) - .collect::, _>>() - }) - .collect::, _>>()?; - - let shuffles_committed = (0..num_proofs) - .map(|_| -> Result, _> { - // Hash each shuffle product commitment - vk.cs - .shuffles - .iter() - .map(|argument| argument.read_product_commitment(transcript)) - .collect::, _>>() - }) - .collect::, _>>()?; - - let vanishing = vanishing::Argument::read_commitments_before_y(transcript)?; - - // Sample y challenge, which keeps the gates linearly independent. - let y: ChallengeY<_> = transcript.squeeze_challenge_scalar(); - - let vanishing = vanishing.read_commitments_after_y_v2(vk, transcript)?; - - // Sample x challenge, which is used to ensure the circuit is - // satisfied with high probability. - let x: ChallengeX<_> = transcript.squeeze_challenge_scalar(); - let instance_evals = if V::QUERY_INSTANCE { - (0..num_proofs) - .map(|_| -> Result, _> { - read_n_scalars(transcript, vk.cs.instance_queries.len()) - }) - .collect::, _>>()? - } else { - let xn = x.pow([params.n()]); - let (min_rotation, max_rotation) = - vk.cs - .instance_queries - .iter() - .fold((0, 0), |(min, max), (_, rotation)| { - if rotation.0 < min { - (rotation.0, max) - } else if rotation.0 > max { - (min, rotation.0) - } else { - (min, max) - } - }); - let max_instance_len = instances - .iter() - .flat_map(|instance| instance.iter().map(|instance| instance.len())) - .max_by(Ord::cmp) - .unwrap_or_default(); - let l_i_s = &vk.domain.l_i_range( - *x, - xn, - -max_rotation..max_instance_len as i32 + min_rotation.abs(), - ); - instances - .iter() - .map(|instances| { - vk.cs - .instance_queries - .iter() - .map(|(column, rotation)| { - let instances = instances[column.index()]; - let offset = (max_rotation - rotation.0) as usize; - compute_inner_product(instances, &l_i_s[offset..offset + instances.len()]) - }) - .collect::>() - }) - .collect::>() - }; - - let advice_evals = (0..num_proofs) - .map(|_| -> Result, _> { read_n_scalars(transcript, vk.cs.advice_queries.len()) }) - .collect::, _>>()?; - // dbg!(&advice_evals); - - let fixed_evals = read_n_scalars(transcript, vk.cs.fixed_queries.len())?; - - let vanishing = vanishing.evaluate_after_x(transcript)?; - - let permutations_common = vk.permutation.evaluate(transcript)?; - - let permutations_evaluated = permutations_committed - .into_iter() - .map(|permutation| permutation.evaluate(transcript)) - .collect::, _>>()?; - - let lookups_evaluated = lookups_committed - .into_iter() - .map(|lookups| -> Result, _> { - lookups - .into_iter() - .map(|lookup| lookup.evaluate(transcript)) - .collect::, _>>() - }) - .collect::, _>>()?; - - let shuffles_evaluated = shuffles_committed - .into_iter() - .map(|shuffles| -> Result, _> { - shuffles - .into_iter() - .map(|shuffle| shuffle.evaluate(transcript)) - .collect::, _>>() - }) - .collect::, _>>()?; - - // This check ensures the circuit is satisfied so long as the polynomial - // commitments open to the correct values. - let vanishing = { - // x^n - let xn = x.pow([params.n()]); - - let blinding_factors = vk.cs.blinding_factors(); - let l_evals = vk - .domain - .l_i_range(*x, xn, (-((blinding_factors + 1) as i32))..=0); - assert_eq!(l_evals.len(), 2 + blinding_factors); - let l_last = l_evals[0]; - let l_blind: Scheme::Scalar = l_evals[1..(1 + blinding_factors)] - .iter() - .fold(Scheme::Scalar::ZERO, |acc, eval| acc + eval); - let l_0 = l_evals[1 + blinding_factors]; - - // Compute the expected value of h(x) - let expressions = advice_evals - .iter() - .zip(instance_evals.iter()) - .zip(permutations_evaluated.iter()) - .zip(lookups_evaluated.iter()) - .zip(shuffles_evaluated.iter()) - .flat_map( - |((((advice_evals, instance_evals), permutation), lookups), shuffles)| { - let challenges = &challenges; - let fixed_evals = &fixed_evals; - std::iter::empty() - // Evaluate the circuit using the custom gates provided - .chain(vk.cs.gates.iter().flat_map(move |gate| { - gate.polynomials().iter().map(move |poly| { - poly.evaluate( - &|scalar| scalar, - &|_| { - panic!("virtual selectors are removed during optimization") - }, - &|query| fixed_evals[query.index.unwrap()], - &|query| advice_evals[query.index.unwrap()], - &|query| instance_evals[query.index.unwrap()], - &|challenge| challenges[challenge.index()], - &|a| -a, - &|a, b| a + &b, - &|a, b| a * &b, - &|a, scalar| a * &scalar, - ) - }) - })) - .chain(permutation.expressions_v2( - vk, - &vk.cs.permutation, - &permutations_common, - advice_evals, - fixed_evals, - instance_evals, - l_0, - l_last, - l_blind, - beta, - gamma, - x, - )) - .chain(lookups.iter().zip(vk.cs.lookups.iter()).flat_map( - move |(p, argument)| { - p.expressions( - l_0, - l_last, - l_blind, - argument, - theta, - beta, - gamma, - advice_evals, - fixed_evals, - instance_evals, - challenges, - ) - }, - )) - .chain(shuffles.iter().zip(vk.cs.shuffles.iter()).flat_map( - move |(p, argument)| { - p.expressions( - l_0, - l_last, - l_blind, - argument, - theta, - gamma, - advice_evals, - fixed_evals, - instance_evals, - challenges, - ) - }, - )) - }, - ); - - vanishing.verify(params, expressions, y, xn) - }; - - let queries = instance_commitments - .iter() - .zip(instance_evals.iter()) - .zip(advice_commitments.iter()) - .zip(advice_evals.iter()) - .zip(permutations_evaluated.iter()) - .zip(lookups_evaluated.iter()) - .zip(shuffles_evaluated.iter()) - .flat_map( - |( - ( - ( - ( - ((instance_commitments, instance_evals), advice_commitments), - advice_evals, - ), - permutation, - ), - lookups, - ), - shuffles, - )| { - iter::empty() - .chain( - V::QUERY_INSTANCE - .then_some(vk.cs.instance_queries.iter().enumerate().map( - move |(query_index, &(column, at))| { - VerifierQuery::new_commitment( - &instance_commitments[column.index()], - vk.domain.rotate_omega(*x, at), - instance_evals[query_index], - ) - }, - )) - .into_iter() - .flatten(), - ) - .chain(vk.cs.advice_queries.iter().enumerate().map( - move |(query_index, &(column, at))| { - VerifierQuery::new_commitment( - &advice_commitments[column.index()], - vk.domain.rotate_omega(*x, at), - advice_evals[query_index], - ) - }, - )) - .chain(permutation.queries_v2(vk, x)) - .chain(lookups.iter().flat_map(move |p| p.queries_v2(vk, x))) - .chain(shuffles.iter().flat_map(move |p| p.queries_v2(vk, x))) - }, - ) - .chain( - vk.cs - .fixed_queries - .iter() - .enumerate() - .map(|(query_index, &(column, at))| { - VerifierQuery::new_commitment( - &vk.fixed_commitments[column.index()], - vk.domain.rotate_omega(*x, at), - fixed_evals[query_index], - ) - }), - ) - .chain(permutations_common.queries(&vk.permutation, x)) - .chain(vanishing.queries(x)); - - // We are now convinced the circuit is satisfied so long as the - // polynomial commitments open to the correct values. - - let verifier = V::new(params); - Ok(strategy - .process(|msm| { - println!("ONE"); - verifier - .verify_proof(transcript, queries, msm) - .map_err(|_| Error::Opening) - }) - .expect("todo")) -} - -// TODO: Remove /// Returns a boolean indicating whether or not the proof is valid pub fn verify_proof< 'params, diff --git a/halo2_proofs/tests/frontend_backend_split.rs b/halo2_proofs/tests/frontend_backend_split.rs index b46b16001f..dc9c12efae 100644 --- a/halo2_proofs/tests/frontend_backend_split.rs +++ b/halo2_proofs/tests/frontend_backend_split.rs @@ -8,9 +8,9 @@ use halo2_proofs::circuit::{AssignedCell, Cell, Layouter, Region, SimpleFloorPla use halo2_proofs::dev::MockProver; use halo2_proofs::plonk::{ compile_circuit, create_proof, keygen_pk, keygen_pk_v2, keygen_vk, keygen_vk_v2, verify_proof, - verify_proof_v2, Advice, Assigned, Challenge, Circuit, Column, CompiledCircuitV2, - ConstraintSystem, ConstraintSystemV2Backend, Error, Expression, FirstPhase, Fixed, Instance, - ProverV2, ProvingKey, SecondPhase, Selector, TableColumn, VerifyingKey, WitnessCalculator, + Advice, Assigned, Challenge, Circuit, Column, CompiledCircuitV2, ConstraintSystem, + ConstraintSystemV2Backend, Error, Expression, FirstPhase, Fixed, Instance, ProverV2, + ProvingKey, SecondPhase, Selector, TableColumn, VerifyingKey, WitnessCalculator, }; use halo2_proofs::poly::commitment::{CommitmentScheme, ParamsProver, Prover, Verifier}; use halo2_proofs::poly::Rotation; @@ -577,7 +577,7 @@ fn test_mycircuit_full_split() { Blake2bRead::<_, G1Affine, Challenge255<_>>::init(proof.as_slice()); let strategy = SingleStrategy::new(&verifier_params); - verify_proof_v2::, VerifierSHPLONK<'_, Bn256>, _, _, _>( + verify_proof::, VerifierSHPLONK<'_, Bn256>, _, _, _>( ¶ms, &vk, strategy,