Skip to content

Commit

Permalink
Fix OOM while building MST (#164)
Browse files Browse the repository at this point in the history
* Prepare for benchmarking

* fix: applied 'rayon' instead of 'thread' for OOM

* fix: update benchmark result

---------

Co-authored-by: Alex Kuzmin <[email protected]>
  • Loading branch information
sifnoc and alxkzmn authored Oct 26, 2023
1 parent f11d633 commit ffd8259
Show file tree
Hide file tree
Showing 7 changed files with 36 additions and 67 deletions.
11 changes: 5 additions & 6 deletions backend/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

11 changes: 5 additions & 6 deletions zk_prover/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions zk_prover/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ nova-scotia = { git = "https://github.com/nalinbhardwaj/Nova-Scotia" }
poseidon-rs = { git = "https://github.com/arnaucube/poseidon-rs" }
ff = {package="ff_ce" , version="0.11", features = ["derive"]}
num-traits = "0.2.16"
rayon = "1.8.0"

[dev-dependencies]
criterion= "0.3"
Expand Down
10 changes: 5 additions & 5 deletions zk_prover/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -118,27 +118,27 @@ Furthermore the benchmarking function `verify_zk_proof_benchmark` will also prin

## Current Benches

Run on MacBook Pro 2023, M2 Pro, 32GB RAM, 12 cores
Run on AWS EC2 instance `m7a.8xlarge` with 32 vcores and 128GB RAM

Benches run after PR #80 (`add solidity verifier`). In order to achieve small proof size, to be cheap to verify on-chain.

2^15 entries (32768) users, 2 assets. Range is 14 bytes, considering SHIBA INU token supply (110 bits) as the upper bound.
2^28 entries (268435456) users, one asset. Range is 14 bytes, considering SHIBA INU token supply (110 bits) as the upper bound.

| MST init |
| -------- |
| 4.2 s |
| 7143.9 s |

For Merkle Sum Tree Proof of Inclusion circuit

| VK Gen | Pk Gen | Proof Generation | Proof Verification | Proof Size (bytes) |
| --------- | --------- | ---------------- | ------------------ | ------------------ |
| 176.05 ms | 122.75 ms | 473.98 ms | 3.8 ms | 1632 |
| 88.92 ms | 135.96 ms | 369.31 ms | 3.65 ms | 1632 |

For Proof of Solvency circuit

| VK Gen | Pk Gen | Proof Generation | Proof Verification | Proof Size (bytes) |
| -------- | --------- | ---------------- | ------------------ | ------------------ |
| 63.22 ms | 27.075 ms | 133.82 ms | 3.4476 ms | 1568 |
| 32.86 ms | 31.76 ms | 139.60 ms | 4.09 ms | 1568 |

Gas cost to verify proof of solvency

Expand Down
4 changes: 2 additions & 2 deletions zk_prover/benches/full_solvency_flow.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@ use summa_solvency::{

const SAMPLE_SIZE: usize = 10;
const LEVELS: usize = 15;
const N_ASSETS: usize = 2;
const PATH_NAME: &str = "two_assets";
const N_ASSETS: usize = 1;
const PATH_NAME: &str = "one_asset";
const N_BYTES: usize = 14;

fn build_mstree(_c: &mut Criterion) {
Expand Down
2 changes: 1 addition & 1 deletion zk_prover/src/merkle_sum_tree/mst.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ pub struct MerkleSumTree<const N_ASSETS: usize, const N_BYTES: usize> {
}

impl<const N_ASSETS: usize, const N_BYTES: usize> MerkleSumTree<N_ASSETS, N_BYTES> {
pub const MAX_DEPTH: usize = 27;
pub const MAX_DEPTH: usize = 29;

/// Builds a Merkle Sum Tree from a CSV file stored at `path`. The CSV file must be formatted as follows:
///
Expand Down
64 changes: 17 additions & 47 deletions zk_prover/src/merkle_sum_tree/utils/build_tree.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use crate::merkle_sum_tree::{Entry, Node};
use halo2_proofs::halo2curves::bn256::Fr as Fp;
use std::thread;
use rayon::prelude::*;

pub fn build_merkle_tree_from_entries<const N_ASSETS: usize>(
entries: &[Entry<N_ASSETS>],
Expand Down Expand Up @@ -39,7 +39,7 @@ where
build_leaves_level(entries, &mut tree);

for level in 1..=depth {
build_middle_level(level, &mut tree, n)
build_middle_level(level, &mut tree)
}

let root = tree[depth][0].clone();
Expand All @@ -53,57 +53,27 @@ fn build_leaves_level<const N_ASSETS: usize>(
) where
[usize; N_ASSETS + 1]: Sized,
{
// Compute the leaves in parallel
let mut handles = vec![];
let chunk_size = (entries.len() + num_cpus::get() - 1) / num_cpus::get();
for chunk in entries.chunks(chunk_size) {
let chunk = chunk.to_vec();
handles.push(thread::spawn(move || {
chunk
.into_iter()
.map(|entry| entry.compute_leaf())
.collect::<Vec<_>>()
}));
}
let results = entries
.par_iter()
.map(|entry| entry.compute_leaf())
.collect::<Vec<_>>();

let mut index = 0;
for handle in handles {
let result = handle.join().unwrap();
for leaf in result {
tree[0][index] = leaf;
index += 1;
}
for (index, node) in results.iter().enumerate() {
tree[0][index] = node.clone();
}
}

fn build_middle_level<const N_ASSETS: usize>(
level: usize,
tree: &mut [Vec<Node<N_ASSETS>>],
n: usize,
) where
fn build_middle_level<const N_ASSETS: usize>(level: usize, tree: &mut [Vec<Node<N_ASSETS>>])
where
[usize; 2 * (1 + N_ASSETS)]: Sized,
{
let nodes_in_level = (n + (1 << level) - 1) / (1 << level);

let mut handles = vec![];
let chunk_size = (nodes_in_level + num_cpus::get() - 1) / num_cpus::get();

for chunk in tree[level - 1].chunks(chunk_size * 2) {
let chunk = chunk.to_vec();
handles.push(thread::spawn(move || {
chunk
.chunks(2)
.map(|pair| Node::middle(&pair[0], &pair[1]))
.collect::<Vec<_>>()
}));
}
let results: Vec<Node<N_ASSETS>> = (0..tree[level - 1].len())
.into_par_iter()
.step_by(2)
.map(|index| Node::middle(&tree[level - 1][index], &tree[level - 1][index + 1]))
.collect();

let mut index = 0;
for handle in handles {
let result = handle.join().unwrap();
for node in result {
tree[level][index] = node;
index += 1;
}
for (index, new_node) in results.into_iter().enumerate() {
tree[level][index] = new_node;
}
}

0 comments on commit ffd8259

Please sign in to comment.