diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..fa79d12 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,14 @@ +name: Test +on: [push, pull_request] +env: + CARGO_TERM_COLOR: always +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Build + run: cargo build --verbose + - name: Run tests + run: | + cargo test --verbose diff --git a/README.md b/README.md index ef9f9a7..7537e3d 100644 --- a/README.md +++ b/README.md @@ -5,3 +5,76 @@ Proof of concept implementation of ProtoGalaxy (https://eprint.iacr.org/2023/110 > Do not use in production. Thanks to [Liam Eagen](https://twitter.com/LiamEagen) and [Ariel Gabizon](https://twitter.com/rel_zeta_tech) for their kind explanations. + +This code has been done in the context of the research on folding schemes in [0xPARC](https://0xparc.org). + +![protogalaxy img from Wikipedia](https://upload.wikimedia.org/wikipedia/commons/thumb/4/49/Stellar_Fireworks_Finale.jpg/303px-Stellar_Fireworks_Finale.jpg) + +(img: protogalaxies colliding, [from Wikipedia](https://en.wikipedia.org/wiki/File:Stellar_Fireworks_Finale.jpg)) + +## Details +Implementation of ProtoGalaxy's scheme described in section 4 of the paper. + +Current version implements the folding on prover & verifier and it works, but it is not optimized. +Next steps in terms of implementation include: F(X) O(n) construction following Claim 4.4, compute K(X) in O(kd log(kd)M + ndkC) as described in Claim 4.5, add tests folding in multiple iterations and also in a tree approach, add the decider and integrate with some existing R1CS tooling for the R1CS & witness generation. + +### Usage + +Example of folding k+1 instances: +```rust +// assume we have: +// an R1CS instance 'r1cs' +// a valid witness 'w' from our running instance +// k valid 'witnesses' to be fold + +// compute the committed instance for our running witness +let phi = Pedersen::::commit(&pedersen_params, &witness.w, &witness.r_w); +let instance = CommittedInstance:: { + phi, + betas: betas.clone(), + e: Fr::zero(), +}; + +// compute the k committed instances to be fold +let mut instances: Vec> = Vec::new(); +for i in 0..k { + let phi_i = + Pedersen::::commit(&pedersen_params, &witnesses[i].w, &witnesses[i].r_w); + let instance_i = CommittedInstance:: { + phi: phi_i, + betas: betas.clone(), + e: Fr::zero(), + }; + witnesses.push(witness_i); + instances.push(instance_i); +} + +// set the initial random betas +let beta = Fr::rand(&mut rng); +let betas = powers_of_beta(beta, t); + +// Prover folds the instances and witnesses +let (F_coeffs, K_coeffs, folded_instance, folded_witness) = Folding::::prover( + &mut transcript_p, + &r1cs, + instance.clone(), + witness, + instances.clone(), + witnesses, +); + +// verifier folds the instances +let folded_instance_v = Folding::::verifier( + &mut transcript_v, + &r1cs, + instance, + instances, + F_coeffs, + K_coeffs, +); + +// check that the folded instance satisfies the relation +assert!(check_instance(&r2cs, folded_instance, folded_witness)); + +``` +(see the actual code for more details) diff --git a/src/lib.rs b/src/lib.rs index e24a480..6695526 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,7 +1,7 @@ #![allow(non_snake_case)] #![allow(non_upper_case_globals)] -#![allow(unused)] // TMP -#![allow(dead_code)] // TMP +// #![allow(unused)] // TMP +// #![allow(dead_code)] // TMP pub mod pedersen; pub mod protogalaxy; diff --git a/src/pedersen.rs b/src/pedersen.rs index 70ff1ec..48d58bd 100644 --- a/src/pedersen.rs +++ b/src/pedersen.rs @@ -1,4 +1,4 @@ -/// pedersen.rs file and adapted from https://github.com/arnaucube/nova-study +/// pedersen.rs file adapted from https://github.com/arnaucube/nova-study use ark_ec::{CurveGroup, Group}; use ark_std::{ rand::{Rng, RngCore}, diff --git a/src/protogalaxy.rs b/src/protogalaxy.rs index b42c583..f10b420 100644 --- a/src/protogalaxy.rs +++ b/src/protogalaxy.rs @@ -2,17 +2,16 @@ use ark_crypto_primitives::sponge::Absorb; use ark_ec::{CurveGroup, Group}; use ark_ff::fields::PrimeField; use ark_std::log2; -use ark_std::{cfg_into_iter, One, Zero}; +use ark_std::{cfg_into_iter, Zero}; use std::marker::PhantomData; -use std::ops::{Add, Mul}; +use std::ops::Add; -use ark_ff::{batch_inversion, FftField}; use ark_poly::{ univariate::{DensePolynomial, SparsePolynomial}, DenseUVPolynomial, EvaluationDomain, Evaluations, GeneralEvaluationDomain, Polynomial, }; -use crate::pedersen::{Commitment, Params as PedersenParams, Pedersen, Proof as PedersenProof}; +use crate::pedersen::Commitment; use crate::transcript::Transcript; use crate::utils::*; @@ -40,8 +39,7 @@ where { // WIP naming of functions pub fn prover( - tr: &mut Transcript, - pedersen_params: &PedersenParams, + transcript: &mut Transcript, r1cs: &R1CS, // running instance instance: CommittedInstance, @@ -57,13 +55,14 @@ where ) { let t = instance.betas.len(); let n = r1cs.A[0].len(); + assert_eq!(w.w.len(), n); + assert_eq!(log2(n) as usize, t); // TODO initialize transcript - let delta = tr.get_challenge(); + let delta = transcript.get_challenge(); let deltas = powers_of_beta(delta, t); - let f_w = eval_f(&r1cs, &w.w); - // println!("is f(w) {:?}", f_w); + let f_w = eval_f(r1cs, &w.w); // F(X) let mut F_X: SparsePolynomial = SparsePolynomial::zero(); @@ -74,9 +73,9 @@ where } let F_X_dense = DensePolynomial::from(F_X.clone()); - tr.add_vec(&F_X_dense.coeffs); + transcript.add_vec(&F_X_dense.coeffs); - let alpha = tr.get_challenge(); + let alpha = transcript.get_challenge(); // eval F(alpha) let F_alpha = F_X.evaluate(&alpha); @@ -106,31 +105,78 @@ where w.clone(), )); - let gamma = tr.get_challenge(); + let gamma = transcript.get_challenge(); - // TODO WIP compute G(X) & K(X) - let G_evals: Vec = vec![C::ScalarField::zero(); n]; - let G_X: DensePolynomial = - Evaluations::::from_vec_and_domain(G_evals.clone(), H).interpolate(); - // dbg!(&G_X); - let (K_X, remainder) = G_X.divide_by_vanishing_poly(H).unwrap(); - // dbg!(&K_X); - assert!(remainder.is_zero()); + let mut ws: Vec> = Vec::new(); + ws.push(w.w.clone()); + for wj in vec_w.iter() { + assert_eq!(wj.w.len(), n); + ws.push(wj.w.clone()); + } + + let k = vec_instances.len(); + let H = GeneralEvaluationDomain::::new(k + 1).unwrap(); + let EH = GeneralEvaluationDomain::::new(t * k + 1).unwrap(); + let L_X: Vec> = lagrange_polys(H); + + // K(X) computation in a naive way, next iterations will compute K(X) as described in Claim + // 4.5 of the paper. + let mut G_evals: Vec = vec![C::ScalarField::zero(); EH.size()]; + for (hi, h) in EH.elements().enumerate() { + // each iteration evaluates G(h) + // inner = L_0(x) * w + \sum_k L_i(x) * w_j + let mut inner: Vec = vec![C::ScalarField::zero(); ws[0].len()]; + for (i, w) in ws.iter().enumerate() { + // Li_w = Li(X) * wj + let mut Li_w: Vec> = + vec![DensePolynomial::::zero(); w.len()]; + for (j, wj) in w.iter().enumerate() { + let Li_wj = &L_X[i] * *wj; + Li_w[j] = Li_wj; + } + // Li_w_h = Li_w(h) = Li(h) * wj + let mut Liw_h: Vec = vec![C::ScalarField::zero(); w.len()]; + for (j, _) in Li_w.iter().enumerate() { + Liw_h[j] = Li_w[j].evaluate(&h); + } + + for j in 0..inner.len() { + inner[j] += Liw_h[j]; + } + } + let f_ev = eval_f(r1cs, &inner); + let mut Gsum = C::ScalarField::zero(); + for i in 0..n { + let pow_i_betas = pow_i(i, &betas_star); + let curr = pow_i_betas * f_ev[i]; + Gsum += curr; + } + // G_evals[hi] = Gsum / Z_X.evaluate(&h); // WIP + G_evals[hi] = Gsum; + } + let G_X: DensePolynomial = + Evaluations::::from_vec_and_domain(G_evals.clone(), EH).interpolate(); let Z_X: DensePolynomial = H.vanishing_polynomial().into(); + // K(X) = (G(X)- F(alpha)*L_0(X)) / Z(X) + let L0_e = &L_X[0] * F_alpha; // L0(X)*F(a) will be 0 in the native case + let G_L0e = &G_X - &L0_e; + // TODO move division by Z_X to the prev loop + let (K_X, remainder) = G_L0e.divide_by_vanishing_poly(H).unwrap(); + assert!(remainder.is_zero()); let e_star = F_alpha * L_X[0].evaluate(&gamma) + Z_X.evaluate(&gamma) * K_X.evaluate(&gamma); let mut phi_star: C = instance.phi.0 * L_X[0].evaluate(&gamma); for i in 0..k { - phi_star += vec_instances[i].phi.0 * L_X[i].evaluate(&gamma); + phi_star += vec_instances[i].phi.0 * L_X[i + 1].evaluate(&gamma); } let mut w_star: Vec = vec_scalar_mul(&w.w, &L_X[0].evaluate(&gamma)); for i in 0..k { w_star = vec_add( &w_star, - &vec_scalar_mul(&vec_w[i].w, &L_X[i].evaluate(&gamma)), + &vec_scalar_mul(&vec_w[i].w, &L_X[i + 1].evaluate(&gamma)), ); } @@ -144,15 +190,14 @@ where }, Witness { w: w_star, - r_w: w.r_w, + r_w: w.r_w, // wip, fold also r_w (blinding used for the w commitment) }, ) } pub fn verifier( - tr: &mut Transcript, - pedersen_params: &PedersenParams, - r1cs: R1CS, + transcript: &mut Transcript, + r1cs: &R1CS, // running instance instance: CommittedInstance, // incomming instances @@ -164,17 +209,14 @@ where let t = instance.betas.len(); let n = r1cs.A[0].len(); - let delta = tr.get_challenge(); + let delta = transcript.get_challenge(); let deltas = powers_of_beta(delta, t); - tr.add_vec(&F_coeffs); + transcript.add_vec(&F_coeffs); - let alpha = tr.get_challenge(); + let alpha = transcript.get_challenge(); let alphas = all_powers(alpha, n); - // dbg!(instance.e); - // dbg!(F_coeffs[0]); - // F(alpha) = e + \sum_t F_i * alpha^i let mut F_alpha = instance.e; for (i, F_i) in F_coeffs.iter().enumerate() { @@ -193,20 +235,21 @@ where .map(|(beta_i, delta_i_alpha)| *beta_i + delta_i_alpha) .collect(); - let gamma = tr.get_challenge(); + let gamma = transcript.get_challenge(); let k = vec_instances.len(); - let domain_k = GeneralEvaluationDomain::::new(k).unwrap(); - let L_X: Vec> = lagrange_polys(domain_k); - let Z_X: DensePolynomial = domain_k.vanishing_polynomial().into(); + let H = GeneralEvaluationDomain::::new(k + 1).unwrap(); + let L_X: Vec> = lagrange_polys(H); + let Z_X: DensePolynomial = H.vanishing_polynomial().into(); let K_X: DensePolynomial = DensePolynomial::::from_coefficients_vec(K_coeffs); + let e_star = F_alpha * L_X[0].evaluate(&gamma) + Z_X.evaluate(&gamma) * K_X.evaluate(&gamma); let mut phi_star: C = instance.phi.0 * L_X[0].evaluate(&gamma); for i in 0..k { - phi_star += vec_instances[i].phi.0 * L_X[i].evaluate(&gamma); + phi_star += vec_instances[i].phi.0 * L_X[i + 1].evaluate(&gamma); } // return the folded instance @@ -226,12 +269,12 @@ fn pow_i(i: usize, betas: &Vec) -> F { let b = bit_decompose(i as u64, n as usize); let mut r: F = F::one(); - for (j, beta_i) in betas.iter().enumerate() { + for (j, beta_j) in betas.iter().enumerate() { let mut b_j = F::zero(); if b[j] { b_j = F::one(); } - r *= (F::one() - b_j) + b_j * betas[j]; + r *= (F::one() - b_j) + b_j * beta_j; } r } @@ -244,20 +287,19 @@ fn pow_i_over_x(i: usize, betas: &Vec, deltas: &Vec) -> Spa let mut r: SparsePolynomial = SparsePolynomial::::from_coefficients_vec(vec![(0, F::one())]); // start with r(x) = 1 - for (j, beta_i) in betas.iter().enumerate() { + for (j, beta_j) in betas.iter().enumerate() { if b[j] { let curr: SparsePolynomial = - SparsePolynomial::::from_coefficients_vec(vec![(0, betas[j]), (1, deltas[j])]); + SparsePolynomial::::from_coefficients_vec(vec![(0, *beta_j), (1, deltas[j])]); r = r.mul(&curr); } } r } -// method from caulk: https://github.com/caulk-crypto/caulk/tree/8210b51fb8a9eef4335505d1695c44ddc7bf8170/src/multi/setup.rs#L300 +// lagrange_polys method from caulk: https://github.com/caulk-crypto/caulk/tree/8210b51fb8a9eef4335505d1695c44ddc7bf8170/src/multi/setup.rs#L300 fn lagrange_polys(domain_n: GeneralEvaluationDomain) -> Vec> { let mut lagrange_polynomials: Vec> = Vec::new(); - for i in 0..domain_n.size() { let evals: Vec = cfg_into_iter!(0..domain_n.size()) .map(|k| if k == i { F::one() } else { F::zero() }) @@ -275,11 +317,10 @@ pub struct R1CS { } // f(w) in R1CS context -fn eval_f(r1cs: &R1CS, w: &Vec) -> Vec { - let AzBz = hadamard(&mat_vec_mul(&r1cs.A, &w), &mat_vec_mul(&r1cs.B, &w)); - let Cz = mat_vec_mul(&r1cs.C, &w); - let f_w = vec_sub(&AzBz, &Cz); - f_w +fn eval_f(r1cs: &R1CS, w: &[F]) -> Vec { + let AzBz = hadamard(&mat_vec_mul(&r1cs.A, w), &mat_vec_mul(&r1cs.B, w)); + let Cz = mat_vec_mul(&r1cs.C, w); + vec_sub(&AzBz, &Cz) } fn check_instance( @@ -288,11 +329,8 @@ fn check_instance( w: Witness, ) -> bool { let n = 2_u64.pow(instance.betas.len() as u32) as usize; - dbg!(n); - dbg!(w.w.len()); - let f_w = eval_f(&r1cs, &w.w); // f(w) - dbg!(f_w.len()); + let f_w = eval_f(r1cs, &w.w); // f(w) let mut r = C::ScalarField::zero(); for i in 0..n { @@ -310,7 +348,6 @@ mod tests { use crate::pedersen::Pedersen; use crate::transcript::poseidon_test_config; use ark_bls12_381::{Fr, G1Projective}; - use ark_std::One; use ark_std::UniformRand; pub fn to_F_matrix(M: Vec>) -> Vec> { @@ -405,16 +442,22 @@ mod tests { let mut rng = ark_std::test_rng(); let t = 3; let n = 8; - // let beta = Fr::rand(&mut rng); - // let delta = Fr::rand(&mut rng); - let beta = Fr::from(3); - let delta = Fr::from(5); + let beta = Fr::rand(&mut rng); + let delta = Fr::rand(&mut rng); let betas = powers_of_beta(beta, t); let deltas = powers_of_beta(delta, t); - // for i in 0..n { - // dbg!(pow_i_over_x(i, &betas, &deltas)); - // } + // compute b + X*d, with X=rand + let x = Fr::rand(&mut rng); + let bxd = vec_add(&betas, &vec_scalar_mul(&deltas, &x)); + + // assert that computing pow_over_x of betas,deltas, is equivalent to first computing the + // vector [betas+X*deltas] and then computing pow_i over it + for i in 0..n { + let pow_i1 = pow_i_over_x(i, &betas, &deltas); + let pow_i2 = pow_i(i, &bxd); + assert_eq!(pow_i1.evaluate(&x), pow_i2); + } } #[test] @@ -431,15 +474,15 @@ mod tests { } #[test] - fn test_fold() { + fn test_fold_native_case() { let mut rng = ark_std::test_rng(); let pedersen_params = Pedersen::::new_params(&mut rng, 100); // 100 is wip, will get it from actual vec let poseidon_config = poseidon_test_config::(); - let k = 5; + let k = 6; let r1cs = get_test_r1cs::(); - let mut z = get_test_z::(3); + let z = get_test_z::(3); let mut zs: Vec> = Vec::new(); for i in 0..k { let z_i = get_test_z::(i + 4); @@ -452,8 +495,6 @@ mod tests { let n = z.len(); let t = log2(n) as usize; - dbg!(n); - dbg!(t); let beta = Fr::rand(&mut rng); let betas = powers_of_beta(beta, t); @@ -489,30 +530,30 @@ mod tests { let (F_coeffs, K_coeffs, folded_instance, folded_witness) = Folding::::prover( &mut transcript_p, - &pedersen_params, &r1cs, instance.clone(), witness, instances.clone(), witnesses, ); - dbg!(&F_coeffs); // veriier let folded_instance_v = Folding::::verifier( &mut transcript_v, - &pedersen_params, - r1cs.clone(), // TODO rm clone do borrow + &r1cs, instance, instances, F_coeffs, K_coeffs, ); + // check that prover & verifier folded instances are the same values assert_eq!(folded_instance.phi.0, folded_instance_v.phi.0); assert_eq!(folded_instance.betas, folded_instance_v.betas); assert_eq!(folded_instance.e, folded_instance_v.e); + assert!(!folded_instance.e.is_zero()); - // assert!(check_instance(&r1cs, folded_instance, folded_witness)); + // check that the folded instance satisfies the relation + assert!(check_instance(&r1cs, folded_instance, folded_witness)); } } diff --git a/src/transcript.rs b/src/transcript.rs index c750472..53b93c9 100644 --- a/src/transcript.rs +++ b/src/transcript.rs @@ -1,4 +1,4 @@ -/// transcript.rs file and adapted from https://github.com/arnaucube/nova-study +/// transcript.rs file adapted from https://github.com/arnaucube/nova-study use ark_ec::{AffineRepr, CurveGroup}; use ark_ff::PrimeField; use std::marker::PhantomData; diff --git a/src/utils.rs b/src/utils.rs index 7e6044f..1c13634 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,7 +1,7 @@ use ark_ff::fields::PrimeField; use ark_std::cfg_iter; -pub fn vec_add(a: &Vec, b: &[F]) -> Vec { +pub fn vec_add(a: &[F], b: &[F]) -> Vec { let mut r: Vec = vec![F::zero(); a.len()]; for i in 0..a.len() { r[i] = a[i] + b[i]; @@ -9,7 +9,7 @@ pub fn vec_add(a: &Vec, b: &[F]) -> Vec { r } -pub fn vec_sub(a: &Vec, b: &Vec) -> Vec { +pub fn vec_sub(a: &[F], b: &[F]) -> Vec { let mut r: Vec = vec![F::zero(); a.len()]; for i in 0..a.len() { r[i] = a[i] - b[i]; @@ -45,7 +45,7 @@ pub fn mat_vec_mul(M: &Vec>, z: &[F]) -> Vec { r } -pub fn hadamard(a: &Vec, b: &Vec) -> Vec { +pub fn hadamard(a: &[F], b: &[F]) -> Vec { cfg_iter!(a).zip(b).map(|(a, b)| *a * b).collect() }