Skip to content

Commit

Permalink
fix!: cleanup felt serialization language in python and wasm (#724)
Browse files Browse the repository at this point in the history
BREAKING CHANGE: python and wasm felt utilities have new names
  • Loading branch information
alexander-camuto authored Feb 25, 2024
1 parent 6c0c17c commit 88098b8
Show file tree
Hide file tree
Showing 24 changed files with 524 additions and 382 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/rust.yml
Original file line number Diff line number Diff line change
Expand Up @@ -500,7 +500,7 @@ jobs:

prove-and-verify-aggr-tests:
runs-on: large-self-hosted
needs: [build, library-tests, python-tests]
needs: [build, library-tests]
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
Expand All @@ -513,11 +513,11 @@ jobs:
crate: cargo-nextest
locked: true
- name: KZG )tests
run: cargo nextest run --release --verbose tests_aggr::kzg_aggr_prove_and_verify_ --test-threads 8 -- --include-ignored
run: cargo nextest run --release --verbose tests_aggr::kzg_aggr_prove_and_verify_ --test-threads 4 -- --include-ignored

prove-and-verify-aggr-evm-tests:
runs-on: large-self-hosted
needs: [build, library-tests, python-tests]
needs: [build, library-tests]
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
Expand Down
8 changes: 4 additions & 4 deletions examples/notebooks/data_attest_hashed.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(ezkl.string_to_felt(res['processed_outputs']['poseidon_hash'][0]))"
"print(ezkl.felt_to_big_endian(res['processed_outputs']['poseidon_hash'][0]))"
]
},
{
Expand All @@ -325,7 +325,7 @@
"metadata": {},
"outputs": [],
"source": [
"from web3 import Web3, HTTPProvider, utils\n",
"from web3 import Web3, HTTPProvider\n",
"from solcx import compile_standard\n",
"from decimal import Decimal\n",
"import json\n",
Expand All @@ -338,7 +338,7 @@
"\n",
"def test_on_chain_data(res):\n",
" # Step 0: Convert the tensor to a flat list\n",
" data = [int(ezkl.string_to_felt(res['processed_outputs']['poseidon_hash'][0]), 0)]\n",
" data = [int(ezkl.felt_to_big_endian(res['processed_outputs']['poseidon_hash'][0]), 0)]\n",
"\n",
" # Step 1: Prepare the data\n",
" # Step 2: Prepare and compile the contract.\n",
Expand Down Expand Up @@ -648,7 +648,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
"version": "3.9.15"
},
"orig_nbformat": 4
},
Expand Down
4 changes: 2 additions & 2 deletions examples/notebooks/ezkl_demo.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -695,7 +695,7 @@
"formatted_output = \"[\"\n",
"for i, value in enumerate(proof[\"instances\"]):\n",
" for j, field_element in enumerate(value):\n",
" onchain_input_array.append(ezkl.string_to_felt(field_element))\n",
" onchain_input_array.append(ezkl.felt_to_big_endian(field_element))\n",
" formatted_output += str(onchain_input_array[-1])\n",
" if j != len(value) - 1:\n",
" formatted_output += \", \"\n",
Expand All @@ -705,7 +705,7 @@
"# copy them over to remix and see if they verify\n",
"# What happens when you change a value?\n",
"print(\"pubInputs: \", formatted_output)\n",
"print(\"proof: \", \"0x\" + proof[\"proof\"])"
"print(\"proof: \", proof[\"proof\"])"
]
},
{
Expand Down
4 changes: 2 additions & 2 deletions examples/notebooks/set_membership.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -122,8 +122,8 @@
"# Loop through each element in the y tensor\n",
"for e in y_input:\n",
" # Apply the custom function and append the result to the list\n",
" print(ezkl.float_to_string(e,7))\n",
" result.append(ezkl.poseidon_hash([ezkl.float_to_string(e, 7)])[0])\n",
" print(ezkl.float_to_felt(e,7))\n",
" result.append(ezkl.poseidon_hash([ezkl.float_to_felt(e, 7)])[0])\n",
"\n",
"y = y.unsqueeze(0)\n",
"y = y.reshape(1, 9)\n",
Expand Down
8 changes: 4 additions & 4 deletions examples/notebooks/solvency.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@
"# Loop through each element in the y tensor\n",
"for e in user_preimages:\n",
" # Apply the custom function and append the result to the list\n",
" users.append(ezkl.poseidon_hash([ezkl.float_to_string(e, 0)])[0])\n",
" users.append(ezkl.poseidon_hash([ezkl.float_to_felt(e, 0)])[0])\n",
"\n",
"users_t = torch.tensor(user_preimages)\n",
"users_t = users_t.reshape(1, 6)\n",
Expand Down Expand Up @@ -303,7 +303,7 @@
"# we force the output to be 1 this corresponds to the solvency test being true -- and we set this to a fixed vis output\n",
"# this means that the output is fixed and the verifier can see it but that if the input is not in the set the output will not be 0 and the verifier will reject\n",
"witness = json.load(open(witness_path, \"r\"))\n",
"witness[\"outputs\"][0] = [ezkl.float_to_string(1.0, 0)]\n",
"witness[\"outputs\"][0] = [ezkl.float_to_felt(1.0, 0)]\n",
"json.dump(witness, open(witness_path, \"w\"))"
]
},
Expand Down Expand Up @@ -417,7 +417,7 @@
"# we force the output to be 1 this corresponds to the solvency test being true -- and we set this to a fixed vis output\n",
"# this means that the output is fixed and the verifier can see it but that if the input is not in the set the output will not be 0 and the verifier will reject\n",
"witness = json.load(open(witness_path, \"r\"))\n",
"witness[\"outputs\"][0] = [ezkl.float_to_string(1.0, 0)]\n",
"witness[\"outputs\"][0] = [ezkl.float_to_felt(1.0, 0)]\n",
"json.dump(witness, open(witness_path, \"w\"))\n"
]
},
Expand Down Expand Up @@ -510,7 +510,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
"version": "3.9.15"
}
},
"nbformat": 4,
Expand Down
8 changes: 4 additions & 4 deletions examples/notebooks/world_rotation.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -503,11 +503,11 @@
"pyplot.arrow(0, 0, 1, 0, width=0.02, alpha=0.5)\n",
"pyplot.arrow(0, 0, 0, 1, width=0.02, alpha=0.5)\n",
"\n",
"arrow_x = ezkl.string_to_float(witness['outputs'][0][0], out_scale)\n",
"arrow_y = ezkl.string_to_float(witness['outputs'][0][1], out_scale)\n",
"arrow_x = ezkl.felt_to_float(witness['outputs'][0][0], out_scale)\n",
"arrow_y = ezkl.felt_to_float(witness['outputs'][0][1], out_scale)\n",
"pyplot.arrow(0, 0, arrow_x, arrow_y, width=0.02)\n",
"arrow_x = ezkl.string_to_float(witness['outputs'][0][2], out_scale)\n",
"arrow_y = ezkl.string_to_float(witness['outputs'][0][3], out_scale)\n",
"arrow_x = ezkl.felt_to_float(witness['outputs'][0][2], out_scale)\n",
"arrow_y = ezkl.felt_to_float(witness['outputs'][0][3], out_scale)\n",
"pyplot.arrow(0, 0, arrow_x, arrow_y, width=0.02)"
]
}
Expand Down
79 changes: 57 additions & 22 deletions src/circuit/ops/chip.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ use serde::{Deserialize, Serialize};
use tosubcommand::ToFlags;

use crate::{
circuit::ops::base::BaseOp,
circuit::{
ops::base::BaseOp,
table::{Range, RangeCheck, Table},
utils,
},
Expand Down Expand Up @@ -540,7 +540,9 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
&mut self,
cs: &mut ConstraintSystem<F>,
input: &VarTensor,
index: &VarTensor,
range: Range,
logrows: usize,
) -> Result<(), Box<dyn Error>>
where
F: Field,
Expand All @@ -556,7 +558,7 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
let range_check =
if let std::collections::btree_map::Entry::Vacant(e) = self.range_checks.entry(range) {
// as all tables have the same input we see if there's another table who's input we can reuse
let range_check = RangeCheck::<F>::configure(cs, range);
let range_check = RangeCheck::<F>::configure(cs, range, logrows);
e.insert(range_check.clone());
range_check
} else {
Expand All @@ -565,32 +567,60 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {

for x in 0..input.num_blocks() {
for y in 0..input.num_inner_cols() {
let single_col_sel = cs.complex_selector();
let len = range_check.selector_constructor.degree;
let multi_col_selector = cs.complex_selector();

for (col_idx, input_col) in range_check.inputs.iter().enumerate() {
cs.lookup("", |cs| {
let mut res = vec![];
let sel = cs.query_selector(multi_col_selector);

let synthetic_sel = match len {
1 => Expression::Constant(F::from(1)),
_ => match index {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[x][y], Rotation(0))
}
_ => unreachable!(),
},
};

let input_query = match &input {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[x][y], Rotation(0))
}
_ => unreachable!(),
};

cs.lookup("", |cs| {
let mut res = vec![];
let sel = cs.query_selector(single_col_sel);
let default_x = range_check.get_first_element(col_idx);

let input_query = match &input {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[x][y], Rotation(0))
}
_ => unreachable!(),
};
let col_expr = sel.clone()
* range_check
.selector_constructor
.get_expr_at_idx(col_idx, synthetic_sel);

let default_x = range_check.get_first_element();
let multiplier = range_check
.selector_constructor
.get_selector_val_at_idx(col_idx);

let not_sel = Expression::Constant(F::ONE) - sel.clone();
let not_expr = Expression::Constant(multiplier) - col_expr.clone();

res.extend([(
sel.clone() * input_query.clone()
+ not_sel.clone() * Expression::Constant(default_x),
range_check.input,
)]);
res.extend([(
col_expr.clone() * input_query.clone()
+ not_expr.clone() * Expression::Constant(default_x),
*input_col,
)]);

res
});
selectors.insert((range, x, y), single_col_sel);
log::trace!("---------------- col {:?} ------------------", col_idx,);
log::trace!("expr: {:?}", col_expr,);
log::trace!("multiplier: {:?}", multiplier);
log::trace!("not_expr: {:?}", not_expr);
log::trace!("default x: {:?}", default_x);

res
});
}
selectors.insert((range, x, y), multi_col_selector);
}
}
self.range_check_selectors.extend(selectors);
Expand All @@ -600,6 +630,11 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
self.lookup_input = input.clone();
}

if let VarTensor::Empty = self.lookup_index {
debug!("assigning lookup index");
self.lookup_index = index.clone();
}

Ok(())
}

Expand Down
66 changes: 34 additions & 32 deletions src/circuit/ops/layouts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ use super::{
use crate::{
circuit::{
ops::base::BaseOp,
utils::{self},
utils::{self, F32},
},
fieldutils::{felt_to_i128, i128_to_felt},
tensor::{
Expand Down Expand Up @@ -105,6 +105,8 @@ pub fn div<F: PrimeField + TensorType + PartialOrd>(
BaseOp::Sub,
)?;

log::debug!("diff_with_input: {:?}", diff_with_input.get_int_evals()?);

range_check(
config,
region,
Expand Down Expand Up @@ -2482,6 +2484,28 @@ pub fn range_check<F: PrimeField + TensorType + PartialOrd>(

let is_dummy = region.is_dummy();

let table_index: ValTensor<F> = w
.get_inner_tensor()?
.par_enum_map(|_, e| {
Ok::<ValType<F>, TensorError>(if let Some(f) = e.get_felt_eval() {
let col_idx = if !is_dummy {
let table = config
.range_checks
.get(range)
.ok_or(TensorError::TableLookupError)?;
table.get_col_index(f)
} else {
F::ZERO
};
Value::known(col_idx).into()
} else {
Value::<F>::unknown().into()
})
})?
.into();

region.assign(&config.lookup_index, &table_index)?;

if !is_dummy {
(0..assigned_len)
.map(|i| {
Expand Down Expand Up @@ -2972,44 +2996,22 @@ pub fn range_check_percent<F: PrimeField + TensorType + PartialOrd>(
&[values[0].clone()],
&LookupOp::Recip {
input_scale: scale,
output_scale: scale,
// multiply by 100 to get the percent error
output_scale: F32(scale.0 * 100.0),
},
)?;

// Multiply the difference by the recip
let product = pairwise(config, region, &[diff, recip], BaseOp::Mult)?;
let rebased_product = div(config, region, &[product], F::from(scale.0 as u64))?;

let scale_squared = scale.0 * scale.0;

// Use the greater than look up table to check if the percent error is within the tolerance for upper bound
let tol = tol / 100.0;
let upper_bound = nonlinearity(
config,
region,
&[product.clone()],
&LookupOp::GreaterThan {
a: utils::F32(tol * scale_squared),
},
)?;

// Negate the product
let neg_product = neg(config, region, &[product])?;
let scaled_tol = (tol * scale.0) as i128;

// Use the greater than look up table to check if the percent error is within the tolerance for lower bound
let lower_bound = nonlinearity(
// check that it is within the tolerance range
range_check(
config,
region,
&[neg_product],
&LookupOp::GreaterThan {
a: utils::F32(tol * scale_squared),
},
)?;

// Add the lower_bound and upper_bound
let sum = pairwise(config, region, &[lower_bound, upper_bound], BaseOp::Add)?;

// Constrain the sum to be all zeros
is_zero_identity(config, region, &[sum.clone()], false)?;

Ok(sum)
&[rebased_product],
&(-scaled_tol, scaled_tol),
)
}
Loading

0 comments on commit 88098b8

Please sign in to comment.