From e95a3f9822e56377dc547c21fa21139c1cc08ba4 Mon Sep 17 00:00:00 2001 From: FiveMovesAhead Date: Tue, 23 Apr 2024 13:04:31 +0800 Subject: [PATCH] Initial version --- .github/workflows/build_algorithm.yml | 71 ++ .github/workflows/build_benchmarker.yml | 40 + .github/workflows/test_workspace.yml | 41 + .gitignore | 2 + Cargo.toml | 24 + tig-algorithms/Cargo.toml | 15 + tig-algorithms/src/knapsack/mod.rs | 100 ++ tig-algorithms/src/lib.rs | 3 + tig-algorithms/src/satisfiability/mod.rs | 100 ++ tig-algorithms/src/vehicle_routing/mod.rs | 100 ++ tig-api/Cargo.toml | 22 + tig-api/src/lib.rs | 143 +++ tig-benchmarker/Cargo.toml | 43 + tig-benchmarker/src/benchmarker.rs | 969 ++++++++++++++++++ tig-benchmarker/src/future_utils.rs | 66 ++ tig-benchmarker/src/lib.rs | 34 + tig-challenges/.gitignore | 1 + tig-challenges/Cargo.toml | 13 + tig-challenges/src/knapsack.rs | 130 +++ tig-challenges/src/lib.rs | 37 + tig-challenges/src/satisfiability.rs | 116 +++ tig-challenges/src/vehicle_routing.rs | 207 ++++ tig-protocol/.gitignore | 1 + tig-protocol/Cargo.toml | 17 + tig-protocol/src/add_block.rs | 1099 +++++++++++++++++++++ tig-protocol/src/context.rs | 210 ++++ tig-protocol/src/error.rs | 252 +++++ tig-protocol/src/lib.rs | 66 ++ tig-protocol/src/submit_algorithm.rs | 95 ++ tig-protocol/src/submit_benchmark.rs | 284 ++++++ tig-protocol/src/submit_proof.rs | 158 +++ tig-protocol/src/verify_proof.rs | 79 ++ tig-structs/.gitignore | 1 + tig-structs/Cargo.toml | 14 + tig-structs/src/api.rs | 171 ++++ tig-structs/src/config.rs | 95 ++ tig-structs/src/core.rs | 270 +++++ tig-structs/src/lib.rs | 42 + tig-utils/Cargo.toml | 45 + tig-utils/src/eth.rs | 53 + tig-utils/src/frontiers.rs | 206 ++++ tig-utils/src/hash.rs | 15 + tig-utils/src/json.rs | 60 ++ tig-utils/src/lib.rs | 14 + tig-utils/src/number.rs | 355 +++++++ tig-utils/src/request.rs | 220 +++++ tig-wasm/Cargo.toml | 19 + tig-wasm/build.rs | 24 + tig-wasm/src/entry_point_template.rs | 26 + tig-wasm/src/lib.rs | 2 + tig-worker/Cargo.toml | 20 + tig-worker/src/lib.rs | 2 + tig-worker/src/main.rs | 161 +++ tig-worker/src/worker.rs | 165 ++++ 54 files changed, 6518 insertions(+) create mode 100644 .github/workflows/build_algorithm.yml create mode 100644 .github/workflows/build_benchmarker.yml create mode 100644 .github/workflows/test_workspace.yml create mode 100644 Cargo.toml create mode 100644 tig-algorithms/Cargo.toml create mode 100644 tig-algorithms/src/knapsack/mod.rs create mode 100644 tig-algorithms/src/lib.rs create mode 100644 tig-algorithms/src/satisfiability/mod.rs create mode 100644 tig-algorithms/src/vehicle_routing/mod.rs create mode 100644 tig-api/Cargo.toml create mode 100644 tig-api/src/lib.rs create mode 100644 tig-benchmarker/Cargo.toml create mode 100644 tig-benchmarker/src/benchmarker.rs create mode 100644 tig-benchmarker/src/future_utils.rs create mode 100644 tig-benchmarker/src/lib.rs create mode 100644 tig-challenges/.gitignore create mode 100644 tig-challenges/Cargo.toml create mode 100644 tig-challenges/src/knapsack.rs create mode 100644 tig-challenges/src/lib.rs create mode 100644 tig-challenges/src/satisfiability.rs create mode 100644 tig-challenges/src/vehicle_routing.rs create mode 100644 tig-protocol/.gitignore create mode 100644 tig-protocol/Cargo.toml create mode 100644 tig-protocol/src/add_block.rs create mode 100644 tig-protocol/src/context.rs create mode 100644 tig-protocol/src/error.rs create mode 100644 tig-protocol/src/lib.rs create mode 100644 tig-protocol/src/submit_algorithm.rs create mode 100644 tig-protocol/src/submit_benchmark.rs create mode 100644 tig-protocol/src/submit_proof.rs create mode 100644 tig-protocol/src/verify_proof.rs create mode 100644 tig-structs/.gitignore create mode 100644 tig-structs/Cargo.toml create mode 100644 tig-structs/src/api.rs create mode 100644 tig-structs/src/config.rs create mode 100644 tig-structs/src/core.rs create mode 100644 tig-structs/src/lib.rs create mode 100644 tig-utils/Cargo.toml create mode 100644 tig-utils/src/eth.rs create mode 100644 tig-utils/src/frontiers.rs create mode 100644 tig-utils/src/hash.rs create mode 100644 tig-utils/src/json.rs create mode 100644 tig-utils/src/lib.rs create mode 100644 tig-utils/src/number.rs create mode 100644 tig-utils/src/request.rs create mode 100644 tig-wasm/Cargo.toml create mode 100644 tig-wasm/build.rs create mode 100644 tig-wasm/src/entry_point_template.rs create mode 100644 tig-wasm/src/lib.rs create mode 100644 tig-worker/Cargo.toml create mode 100644 tig-worker/src/lib.rs create mode 100644 tig-worker/src/main.rs create mode 100644 tig-worker/src/worker.rs diff --git a/.github/workflows/build_algorithm.yml b/.github/workflows/build_algorithm.yml new file mode 100644 index 00000000..4206162a --- /dev/null +++ b/.github/workflows/build_algorithm.yml @@ -0,0 +1,71 @@ +name: Build Algorithm + +on: + push: + branches: + - 'satisfiability/*' + - 'vehicle_routing/*' + - 'knapsack/*' + - 'test/satisfiability/*' + - 'test/vehicle_routing/*' + - 'test/knapsack/*' + - 'dev/satisfiability/*' + - 'dev/vehicle_routing/*' + - 'dev/knapsack/*' + +jobs: + build_wasm: + name: Compile Algorithm to WASM + runs-on: ubuntu-latest + permissions: + contents: write + statuses: write + steps: + - uses: actions/checkout@v4 + - name: Set Env Vars + run: | + CHALLENGE=`echo $GITHUB_REF_NAME | rev | cut -d/ -f2 | rev` + ALGORITHM=`echo $GITHUB_REF_NAME | rev | cut -d/ -f1 | rev` + WASM_PATH=tig-algorithms/wasm/${CHALLENGE}/${ALGORITHM}.wasm + if [ -f $WASM_PATH ]; then + echo "SKIP_JOB=true" >> $GITHUB_ENV + else + echo "SKIP_JOB=false" >> $GITHUB_ENV + fi + echo "CHALLENGE=$CHALLENGE" >> $GITHUB_ENV + echo "ALGORITHM=$ALGORITHM" >> $GITHUB_ENV + echo "WASM_PATH=$ALGORITHM" >> $GITHUB_ENV + - uses: dtolnay/rust-toolchain@stable + with: + targets: wasm32-wasi + - name: Cargo Build + if: env.SKIP_JOB != 'true' + run: > + CHALLENGE=${{ env.CHALLENGE }} + ALGORITHM=${{ env.ALGORITHM }} + cargo build -p tig-wasm --target wasm32-wasi --release --features entry-point; + mkdir -p tig-algorithms/wasm/${{ env.CHALLENGE }}; + - name: Optimize WASM + if: env.SKIP_JOB != 'true' + uses: NiklasEi/wasm-opt-action@v2 + with: + file: target/wasm32-wasi/release/tig_wasm.wasm + output: tig-algorithms/wasm/${{ env.CHALLENGE }}/${{ env.ALGORITHM }}.wasm + options: -O2 --remove-imports + - name: Auto commit + if: env.SKIP_JOB != 'true' + id: auto_commit + uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: Compiled algorithm ${{ env.ALGORITHM }} into WASM + - name: Update Commit Status (Success) + if: env.SKIP_JOB != 'true' && success() + uses: myrotvorets/set-commit-status-action@master + with: + status: 'success' + sha: ${{ steps.auto_commit.outputs.commit_hash }} + - name: Update Commit Status (Failure) + if: env.SKIP_JOB != 'true' && failure() + uses: myrotvorets/set-commit-status-action@master + with: + status: 'failure' diff --git a/.github/workflows/build_benchmarker.yml b/.github/workflows/build_benchmarker.yml new file mode 100644 index 00000000..20aeba3b --- /dev/null +++ b/.github/workflows/build_benchmarker.yml @@ -0,0 +1,40 @@ +name: Build Benchmarker + +on: + push: + branches: + - 'benchmarker/*' + - 'test/benchmarker/*' + - 'dev/benchmarker/*' + +jobs: + build_wasm: + name: Compile Benchmarker to WASM + runs-on: ubuntu-latest + permissions: + contents: write + statuses: write + steps: + - uses: actions/checkout@v4 + - name: Install + run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh + - name: Cargo Build + run: > + wasm-pack build tig-benchmarker --release --target web; + rm tig-benchmarker/wasm/.gitignore; + - name: Auto commit + id: auto_commit + uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: Add compiled WASM for benchmarker + - name: Update Commit Status (Success) + if: success() + uses: myrotvorets/set-commit-status-action@master + with: + status: 'success' + sha: ${{ steps.auto_commit.outputs.commit_hash }} + - name: Update Commit Status (Failure) + if: failure() + uses: myrotvorets/set-commit-status-action@master + with: + status: 'failure' diff --git a/.github/workflows/test_workspace.yml b/.github/workflows/test_workspace.yml new file mode 100644 index 00000000..46209aee --- /dev/null +++ b/.github/workflows/test_workspace.yml @@ -0,0 +1,41 @@ +name: Test Workspace + +on: + push: + branches-ignore: + - 'benchmarker/*' + - 'satisfiability/*' + - 'vehicle_routing/*' + - 'knapsack/*' + - 'test/benchmarker/*' + - 'test/satisfiability/*' + - 'test/vehicle_routing/*' + - 'test/knapsack/*' + - 'dev/benchmarker/*' + - 'dev/satisfiability/*' + - 'dev/vehicle_routing/*' + - 'dev/knapsack/*' + +jobs: + test_workspace: + name: Test Workspace + permissions: + statuses: write + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + targets: x86_64-unknown-linux-gnu + - name: Cargo Test + run: cargo test + - name: Update Commit Status (Success) + if: success() + uses: myrotvorets/set-commit-status-action@master + with: + status: 'success' + - name: Update Commit Status (Failure) + if: failure() + uses: myrotvorets/set-commit-status-action@master + with: + status: 'failure' \ No newline at end of file diff --git a/.gitignore b/.gitignore index 6985cf1b..99f32d54 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,5 @@ Cargo.lock # MSVC Windows builds of rustc generate these, which store debugging information *.pdb + +.vscode/ \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 00000000..cf67c127 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,24 @@ +[workspace] +members = [ + "tig-algorithms", + "tig-api", + "tig-benchmarker", + "tig-challenges", + "tig-protocol", + "tig-structs", + "tig-utils", + "tig-wasm", + "tig-worker", +] +exclude = [] +resolver = "2" + +[workspace.package] +authors = ["TIG UG ", "Ying Chan "] +repository = "https://github.com/tig-foundation/tig-monorepo" +edition = "2021" +readme = "README.md" + +[profile.release] +lto = true +codegen-units = 1 diff --git a/tig-algorithms/Cargo.toml b/tig-algorithms/Cargo.toml new file mode 100644 index 00000000..3f85cf10 --- /dev/null +++ b/tig-algorithms/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "tig-algorithms" +version = "0.1.0" +authors.workspace = true +repository.workspace = true +edition.workspace = true + +[dependencies] +anyhow = "1.0.81" +ndarray = "0.15.6" +rand = { version = "0.8.5", default-features = false, features = ["std_rng"] } +tig-challenges = { path = "../tig-challenges" } + +[lib] +crate-type = ["cdylib", "rlib"] diff --git a/tig-algorithms/src/knapsack/mod.rs b/tig-algorithms/src/knapsack/mod.rs new file mode 100644 index 00000000..a6baab13 --- /dev/null +++ b/tig-algorithms/src/knapsack/mod.rs @@ -0,0 +1,100 @@ +// c003_a001 placeholder +// c003_a002 placeholder +// c003_a003 placeholder +// c003_a004 placeholder +// c003_a005 placeholder +// c003_a006 placeholder +// c003_a007 placeholder +// c003_a008 placeholder +// c003_a009 placeholder +// c003_a010 placeholder +// c003_a011 placeholder +// c003_a012 placeholder +// c003_a013 placeholder +// c003_a014 placeholder +// c003_a015 placeholder +// c003_a016 placeholder +// c003_a017 placeholder +// c003_a018 placeholder +// c003_a019 placeholder +// c003_a020 placeholder +// c003_a021 placeholder +// c003_a022 placeholder +// c003_a023 placeholder +// c003_a024 placeholder +// c003_a025 placeholder +// c003_a026 placeholder +// c003_a027 placeholder +// c003_a028 placeholder +// c003_a029 placeholder +// c003_a030 placeholder +// c003_a031 placeholder +// c003_a032 placeholder +// c003_a033 placeholder +// c003_a034 placeholder +// c003_a035 placeholder +// c003_a036 placeholder +// c003_a037 placeholder +// c003_a038 placeholder +// c003_a039 placeholder +// c003_a040 placeholder +// c003_a041 placeholder +// c003_a042 placeholder +// c003_a043 placeholder +// c003_a044 placeholder +// c003_a045 placeholder +// c003_a046 placeholder +// c003_a047 placeholder +// c003_a048 placeholder +// c003_a049 placeholder +// c003_a050 placeholder +// c003_a051 placeholder +// c003_a052 placeholder +// c003_a053 placeholder +// c003_a054 placeholder +// c003_a055 placeholder +// c003_a056 placeholder +// c003_a057 placeholder +// c003_a058 placeholder +// c003_a059 placeholder +// c003_a060 placeholder +// c003_a061 placeholder +// c003_a062 placeholder +// c003_a063 placeholder +// c003_a064 placeholder +// c003_a065 placeholder +// c003_a066 placeholder +// c003_a067 placeholder +// c003_a068 placeholder +// c003_a069 placeholder +// c003_a070 placeholder +// c003_a071 placeholder +// c003_a072 placeholder +// c003_a073 placeholder +// c003_a074 placeholder +// c003_a075 placeholder +// c003_a076 placeholder +// c003_a077 placeholder +// c003_a078 placeholder +// c003_a079 placeholder +// c003_a080 placeholder +// c003_a081 placeholder +// c003_a082 placeholder +// c003_a083 placeholder +// c003_a084 placeholder +// c003_a085 placeholder +// c003_a086 placeholder +// c003_a087 placeholder +// c003_a088 placeholder +// c003_a089 placeholder +// c003_a090 placeholder +// c003_a091 placeholder +// c003_a092 placeholder +// c003_a093 placeholder +// c003_a094 placeholder +// c003_a095 placeholder +// c003_a096 placeholder +// c003_a097 placeholder +// c003_a098 placeholder +// c003_a099 placeholder +// c003_a100 placeholder diff --git a/tig-algorithms/src/lib.rs b/tig-algorithms/src/lib.rs new file mode 100644 index 00000000..6181d659 --- /dev/null +++ b/tig-algorithms/src/lib.rs @@ -0,0 +1,3 @@ +pub mod knapsack; +pub mod satisfiability; +pub mod vehicle_routing; diff --git a/tig-algorithms/src/satisfiability/mod.rs b/tig-algorithms/src/satisfiability/mod.rs new file mode 100644 index 00000000..71a70334 --- /dev/null +++ b/tig-algorithms/src/satisfiability/mod.rs @@ -0,0 +1,100 @@ +// c001_a001 placeholder +// c001_a002 placeholder +// c001_a003 placeholder +// c001_a004 placeholder +// c001_a005 placeholder +// c001_a006 placeholder +// c001_a007 placeholder +// c001_a008 placeholder +// c001_a009 placeholder +// c001_a010 placeholder +// c001_a011 placeholder +// c001_a012 placeholder +// c001_a013 placeholder +// c001_a014 placeholder +// c001_a015 placeholder +// c001_a016 placeholder +// c001_a017 placeholder +// c001_a018 placeholder +// c001_a019 placeholder +// c001_a020 placeholder +// c001_a021 placeholder +// c001_a022 placeholder +// c001_a023 placeholder +// c001_a024 placeholder +// c001_a025 placeholder +// c001_a026 placeholder +// c001_a027 placeholder +// c001_a028 placeholder +// c001_a029 placeholder +// c001_a030 placeholder +// c001_a031 placeholder +// c001_a032 placeholder +// c001_a033 placeholder +// c001_a034 placeholder +// c001_a035 placeholder +// c001_a036 placeholder +// c001_a037 placeholder +// c001_a038 placeholder +// c001_a039 placeholder +// c001_a040 placeholder +// c001_a041 placeholder +// c001_a042 placeholder +// c001_a043 placeholder +// c001_a044 placeholder +// c001_a045 placeholder +// c001_a046 placeholder +// c001_a047 placeholder +// c001_a048 placeholder +// c001_a049 placeholder +// c001_a050 placeholder +// c001_a051 placeholder +// c001_a052 placeholder +// c001_a053 placeholder +// c001_a054 placeholder +// c001_a055 placeholder +// c001_a056 placeholder +// c001_a057 placeholder +// c001_a058 placeholder +// c001_a059 placeholder +// c001_a060 placeholder +// c001_a061 placeholder +// c001_a062 placeholder +// c001_a063 placeholder +// c001_a064 placeholder +// c001_a065 placeholder +// c001_a066 placeholder +// c001_a067 placeholder +// c001_a068 placeholder +// c001_a069 placeholder +// c001_a070 placeholder +// c001_a071 placeholder +// c001_a072 placeholder +// c001_a073 placeholder +// c001_a074 placeholder +// c001_a075 placeholder +// c001_a076 placeholder +// c001_a077 placeholder +// c001_a078 placeholder +// c001_a079 placeholder +// c001_a080 placeholder +// c001_a081 placeholder +// c001_a082 placeholder +// c001_a083 placeholder +// c001_a084 placeholder +// c001_a085 placeholder +// c001_a086 placeholder +// c001_a087 placeholder +// c001_a088 placeholder +// c001_a089 placeholder +// c001_a090 placeholder +// c001_a091 placeholder +// c001_a092 placeholder +// c001_a093 placeholder +// c001_a094 placeholder +// c001_a095 placeholder +// c001_a096 placeholder +// c001_a097 placeholder +// c001_a098 placeholder +// c001_a099 placeholder +// c001_a100 placeholder diff --git a/tig-algorithms/src/vehicle_routing/mod.rs b/tig-algorithms/src/vehicle_routing/mod.rs new file mode 100644 index 00000000..62bb1af6 --- /dev/null +++ b/tig-algorithms/src/vehicle_routing/mod.rs @@ -0,0 +1,100 @@ +// c002_a001 placeholder +// c002_a002 placeholder +// c002_a003 placeholder +// c002_a004 placeholder +// c002_a005 placeholder +// c002_a006 placeholder +// c002_a007 placeholder +// c002_a008 placeholder +// c002_a009 placeholder +// c002_a010 placeholder +// c002_a011 placeholder +// c002_a012 placeholder +// c002_a013 placeholder +// c002_a014 placeholder +// c002_a015 placeholder +// c002_a016 placeholder +// c002_a017 placeholder +// c002_a018 placeholder +// c002_a019 placeholder +// c002_a020 placeholder +// c002_a021 placeholder +// c002_a022 placeholder +// c002_a023 placeholder +// c002_a024 placeholder +// c002_a025 placeholder +// c002_a026 placeholder +// c002_a027 placeholder +// c002_a028 placeholder +// c002_a029 placeholder +// c002_a030 placeholder +// c002_a031 placeholder +// c002_a032 placeholder +// c002_a033 placeholder +// c002_a034 placeholder +// c002_a035 placeholder +// c002_a036 placeholder +// c002_a037 placeholder +// c002_a038 placeholder +// c002_a039 placeholder +// c002_a040 placeholder +// c002_a041 placeholder +// c002_a042 placeholder +// c002_a043 placeholder +// c002_a044 placeholder +// c002_a045 placeholder +// c002_a046 placeholder +// c002_a047 placeholder +// c002_a048 placeholder +// c002_a049 placeholder +// c002_a050 placeholder +// c002_a051 placeholder +// c002_a052 placeholder +// c002_a053 placeholder +// c002_a054 placeholder +// c002_a055 placeholder +// c002_a056 placeholder +// c002_a057 placeholder +// c002_a058 placeholder +// c002_a059 placeholder +// c002_a060 placeholder +// c002_a061 placeholder +// c002_a062 placeholder +// c002_a063 placeholder +// c002_a064 placeholder +// c002_a065 placeholder +// c002_a066 placeholder +// c002_a067 placeholder +// c002_a068 placeholder +// c002_a069 placeholder +// c002_a070 placeholder +// c002_a071 placeholder +// c002_a072 placeholder +// c002_a073 placeholder +// c002_a074 placeholder +// c002_a075 placeholder +// c002_a076 placeholder +// c002_a077 placeholder +// c002_a078 placeholder +// c002_a079 placeholder +// c002_a080 placeholder +// c002_a081 placeholder +// c002_a082 placeholder +// c002_a083 placeholder +// c002_a084 placeholder +// c002_a085 placeholder +// c002_a086 placeholder +// c002_a087 placeholder +// c002_a088 placeholder +// c002_a089 placeholder +// c002_a090 placeholder +// c002_a091 placeholder +// c002_a092 placeholder +// c002_a093 placeholder +// c002_a094 placeholder +// c002_a095 placeholder +// c002_a096 placeholder +// c002_a097 placeholder +// c002_a098 placeholder +// c002_a099 placeholder +// c002_a100 placeholder diff --git a/tig-api/Cargo.toml b/tig-api/Cargo.toml new file mode 100644 index 00000000..4808aea8 --- /dev/null +++ b/tig-api/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "tig-api" +version = "0.1.0" +authors.workspace = true +repository.workspace = true +edition.workspace = true +readme.workspace = true + +[lib] +crate-type = ["cdylib", "rlib"] + +[dependencies] +anyhow = "1.0.81" +query_map = { version = "0.7.0", features = ["url-query"] } +serde = { version = "1.0.196", features = ["derive"] } +serde_json = { version = "1.0.113" } +tig-utils = { path = "../tig-utils" } +tig-structs = { path = "../tig-structs" } + +[features] +request = ["tig-utils/request"] +request-js = ["tig-utils/request-js"] diff --git a/tig-api/src/lib.rs b/tig-api/src/lib.rs new file mode 100644 index 00000000..567f3c8a --- /dev/null +++ b/tig-api/src/lib.rs @@ -0,0 +1,143 @@ +#[cfg(not(any(feature = "request", feature = "request-js")))] +compile_error!("Either feature `request` or `request-js` must be enabled"); +#[cfg(all(feature = "request", feature = "request-js"))] +compile_error!("features `request` and `request-js` are mutually exclusive"); + +use anyhow::{anyhow, Result}; +use query_map::QueryMap; +use serde::de::DeserializeOwned; +use std::{collections::HashMap, vec}; +pub use tig_structs::api::*; +use tig_utils::{dejsonify, get, jsonify, post}; + +pub struct Api { + api_url: String, + api_key: String, +} + +impl Api { + pub fn new(api_url: String, api_key: String) -> Self { + Self { api_url, api_key } + } + + async fn get(&self, path: String) -> Result + where + T: DeserializeOwned, + { + let resp = get::( + format!("{}/{}", self.api_url, path).as_str(), + Some( + vec![ + ("x-api-key".to_string(), self.api_key.clone()), + ("user-agent".to_string(), "TIG API".to_string()), + ] + .into_iter() + .collect(), + ), + ) + .await?; + dejsonify::(&resp).map_err(|e| anyhow!("Failed to dejsonify: {}", e)) + } + async fn post(&self, path: String, body: String) -> Result + where + T: DeserializeOwned, + { + let resp = post::( + format!("{}/{}", self.api_url, path).as_str(), + body.as_str(), + Some( + vec![ + ("x-api-key".to_string(), self.api_key.clone()), + ("user-agent".to_string(), "TIG API".to_string()), + ] + .into_iter() + .collect(), + ), + ) + .await?; + dejsonify::(&resp).map_err(|e| anyhow!("Failed to dejsonify: {}", e)) + } + + pub async fn get_challenges(&self, req: GetChallengesReq) -> Result { + let mut query = HashMap::::new(); + if let Some(block_id) = req.block_id { + query.insert("block_id".to_string(), block_id); + } + let query = QueryMap::from(query); + self.get(format!("get-challenges?{}", query.to_query_string())) + .await + } + + pub async fn get_algorithms(&self, req: GetAlgorithmsReq) -> Result { + let mut query = HashMap::::new(); + if let Some(block_id) = req.block_id { + query.insert("block_id".to_string(), block_id); + } + let query = QueryMap::from(query); + self.get(format!("get-algorithms?{}", query.to_query_string())) + .await + } + + pub async fn get_players(&self, req: GetPlayersReq) -> Result { + let mut query = HashMap::::new(); + if let Some(block_id) = req.block_id { + query.insert("block_id".to_string(), block_id); + } + query.insert("player_type".to_string(), req.player_type.to_string()); + let query = QueryMap::from(query); + self.get(format!("get-players?{}", query.to_query_string())) + .await + } + + pub async fn get_benchmarks(&self, req: GetBenchmarksReq) -> Result { + let mut query = HashMap::::new(); + if let Some(block_id) = req.block_id { + query.insert("block_id".to_string(), block_id); + } + query.insert("player_id".to_string(), req.player_id); + let query = QueryMap::from(query); + self.get(format!("get-benchmarks?{}", query.to_query_string())) + .await + } + + pub async fn get_benchmark_data( + &self, + req: GetBenchmarkDataReq, + ) -> Result { + let mut query = HashMap::::new(); + query.insert("benchmark_id".to_string(), req.benchmark_id); + let query = QueryMap::from(query); + self.get(format!("get-benchmark-data?{}", query.to_query_string())) + .await + } + + pub async fn get_block(&self, req: GetBlockReq) -> Result { + let mut query = HashMap::::new(); + if let Some(id) = req.id { + query.insert("id".to_string(), id); + } + if let Some(height) = req.height { + query.insert("height".to_string(), height.to_string()); + } + if let Some(round) = req.round { + query.insert("round".to_string(), round.to_string()); + } + let query = QueryMap::from(query); + self.get(format!("get-block?{}", query.to_query_string())) + .await + } + + pub async fn submit_algorithm(&self, req: SubmitAlgorithmReq) -> Result { + self.post("submit-algorithm".to_string(), jsonify(&req)) + .await + } + + pub async fn submit_benchmark(&self, req: SubmitBenchmarkReq) -> Result { + self.post("submit-benchmark".to_string(), jsonify(&req)) + .await + } + + pub async fn submit_proof(&self, req: SubmitProofReq) -> Result { + self.post("submit-proof".to_string(), jsonify(&req)).await + } +} diff --git a/tig-benchmarker/Cargo.toml b/tig-benchmarker/Cargo.toml new file mode 100644 index 00000000..9be9897d --- /dev/null +++ b/tig-benchmarker/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "tig-benchmarker" +version = "0.1.0" +authors.workspace = true +repository.workspace = true +edition.workspace = true + +[dependencies] +anyhow = "1.0.81" +futures = { version = "0.3.30" } +gloo-timers = { version = "0.3.0", optional = true, features = ["futures"] } +js-sys = { version = "0.3.68", optional = true } +once_cell = "1.19.0" +rand = { version = "0.8.5", default-features = false, features = ["std_rng"] } +rand_distr = { version = "0.4.3", default-features = false, features = [ + "alloc", +] } +serde = { version = "1.0", features = ["derive"] } +serde-wasm-bindgen = { version = "0.6.5", optional = true } +tig-api = { path = "../tig-api" } +tig-structs = { path = "../tig-structs" } +tig-utils = { path = "../tig-utils" } +tig-worker = { path = "../tig-worker" } +wasm-bindgen = { version = "0.2.91", features = [ + "serde-serialize", +], optional = true } +wasm-bindgen-futures = { version = "0.4.41", optional = true } +web-sys = { version = "0.3.68", features = ['console'], optional = true } + +[lib] +crate-type = ["cdylib", "rlib"] + +[features] +default = ["browser"] +browser = [ + "dep:gloo-timers", + "dep:wasm-bindgen", + "dep:js-sys", + "dep:serde-wasm-bindgen", + "dep:wasm-bindgen-futures", + "dep:web-sys", + "tig-api/request-js", +] diff --git a/tig-benchmarker/src/benchmarker.rs b/tig-benchmarker/src/benchmarker.rs new file mode 100644 index 00000000..733722d9 --- /dev/null +++ b/tig-benchmarker/src/benchmarker.rs @@ -0,0 +1,969 @@ +use crate::future_utils::{self, time, Mutex}; +use once_cell::sync::OnceCell; +use rand::{ + distributions::{Alphanumeric, DistString, WeightedIndex}, + rngs::StdRng, + SeedableRng, +}; +use rand_distr::Distribution; +use serde::Serialize; +use std::collections::{HashMap, HashSet}; +use tig_api::*; +use tig_structs::{config::WasmVMConfig, core::*}; +use tig_utils::*; +use tig_worker::compute_solution; + +type Result = std::result::Result; + +#[cfg_attr(feature = "browser", wasm_bindgen::prelude::wasm_bindgen)] +#[derive(Serialize, Clone, Debug)] +pub struct Duration { + pub start: u64, + pub end: u64, + pub now: u64, +} + +#[cfg_attr(feature = "browser", wasm_bindgen::prelude::wasm_bindgen)] +#[derive(Serialize, Debug, Clone)] +pub struct Job { + benchmark_id: String, + settings: BenchmarkSettings, + duration: Duration, + solution_signature_threshold: u32, + wasm_blob: Vec, + nonce_iter: NonceIterator, + wasm_vm_config: WasmVMConfig, +} + +#[cfg_attr(feature = "browser", wasm_bindgen::prelude::wasm_bindgen)] +#[derive(Serialize, Debug, Clone)] +pub struct State { + running: bool, + status: HashMap, + latest_block: Option, + benchmarker_data: Option, + challenges: Vec, + download_urls: HashMap, + algorithms_by_challenge: HashMap>, + selected_algorithms: HashMap, + benchmarks: HashMap, + proofs: HashMap, + frauds: HashMap, + job: Option, +} + +impl State { + pub fn new() -> State { + State { + running: false, + status: HashMap::new(), + latest_block: None, + algorithms_by_challenge: HashMap::new(), + selected_algorithms: HashMap::new(), + benchmarks: HashMap::new(), + proofs: HashMap::new(), + frauds: HashMap::new(), + benchmarker_data: None, + challenges: Vec::new(), + download_urls: HashMap::new(), + job: None, + } + } +} + +#[cfg_attr(feature = "browser", wasm_bindgen::prelude::wasm_bindgen)] +#[derive(Serialize, Debug, Clone)] +pub struct NonceIterator { + nonces: Option>, + current: u32, + attempts: u32, +} + +impl NonceIterator { + pub fn new(nonces: Option>) -> Self { + Self { + nonces, + current: 0, + attempts: 0, + } + } + pub fn attempts(&self) -> u32 { + self.attempts + } + pub fn is_recompute(&self) -> bool { + self.nonces.is_some() + } + pub fn is_finished(&self) -> bool { + self.nonces.as_ref().is_some_and(|x| x.is_empty()) || self.current == u32::MAX + } +} +impl Iterator for NonceIterator { + type Item = u32; + + fn next(&mut self) -> Option { + if let Some(nonces) = self.nonces.as_mut() { + let value = nonces.pop(); + self.attempts += value.is_some() as u32; + value + } else if self.current < u32::MAX { + let value = Some(self.current); + self.attempts += 1; + self.current += 1; + value + } else { + None + } + } +} + +static STATE: OnceCell> = OnceCell::new(); +static API: OnceCell = OnceCell::new(); +static PLAYER_ID: OnceCell = OnceCell::new(); +const BLOCK_DATA_POLLER_ID: &'static str = "Block data poller"; +const WORKER_ID: &'static str = "Benchmark worker"; +const MANAGER_ID: &'static str = "Benchmark manager"; +const B_SUBMITTER_ID: &'static str = "Benchmark submitter"; +const P_SUBMITTER_ID: &'static str = "Proof submitter"; + +pub fn mutex() -> &'static Mutex { + STATE.get().unwrap_or_else(|| { + web_sys::console::error_1(&"mutex".to_string().into()); + panic!("FIXME"); + }) +} + +pub async fn start() { + let mut state = mutex().lock().await; + (*state).running = true; +} + +pub async fn stop() { + let mut state = mutex().lock().await; + (*state).running = false; +} + +pub async fn select_algorithm(challenge_id: String, algorithm_id: String) { + let mut state = mutex().lock().await; + (*state) + .selected_algorithms + .insert(challenge_id, algorithm_id); +} + +pub async fn setup(api_url: String, api_key: String, player_id: String, num_workers: u32) { + STATE.get_or_init(|| Mutex::new(State::new())); + API.get_or_init(|| Api::new(api_url, api_key)); + PLAYER_ID.get_or_init(|| player_id); + + update_block_data().await.unwrap_or_else(|_| { + web_sys::console::error_1(&"setup".to_string().into()); + panic!("FIXME"); + }); + future_utils::spawn(async { + update_status(BLOCK_DATA_POLLER_ID, "Running").await; + loop { + future_utils::sleep(30000).await; + if let Err(e) = update_block_data().await { + update_status(BLOCK_DATA_POLLER_ID, &e).await; + } + } + }); + + for _ in 0..num_workers { + future_utils::spawn(async { + update_status(WORKER_ID, "Stopped").await; + let mut curr_running: bool = false; + loop { + let next_running = { mutex().lock().await.running }; + if curr_running != next_running { + curr_running = next_running; + if curr_running { + update_status(WORKER_ID, "Starting").await; + } else { + update_status(WORKER_ID, "Stopped").await; + } + } + if !curr_running { + future_utils::sleep(5000).await; + continue; + } + if let Err(e) = do_benchmark().await { + update_status(WORKER_ID, &e.to_string()).await; + } + future_utils::sleep(1000).await; + } + }); + } + future_utils::spawn(async { + update_status(MANAGER_ID, "Stopped").await; + let mut curr_running: bool = false; + loop { + let next_running = { mutex().lock().await.running }; + if curr_running != next_running { + curr_running = next_running; + if curr_running { + update_status(MANAGER_ID, "Starting").await; + } else { + update_status(MANAGER_ID, "Stopped").await; + } + } + if !curr_running { + future_utils::sleep(5000).await; + continue; + } + if let Err(e) = do_manage_benchmark().await { + update_status(MANAGER_ID, &e.to_string()).await; + future_utils::sleep(5000).await; + } + } + }); + future_utils::spawn(async { + update_status(B_SUBMITTER_ID, "Stopped").await; + let mut curr_running: bool = false; + loop { + let next_running = { mutex().lock().await.running }; + if curr_running != next_running { + curr_running = next_running; + if curr_running { + update_status(B_SUBMITTER_ID, "Starting").await; + } else { + update_status(B_SUBMITTER_ID, "Stopped").await; + } + } + if !curr_running { + future_utils::sleep(5000).await; + continue; + } + if let Err(e) = do_submit_benchmark().await { + update_status(B_SUBMITTER_ID, &e.to_string()).await; + } + future_utils::sleep(5000).await; + } + }); + future_utils::spawn(async { + update_status(P_SUBMITTER_ID, "Stopped").await; + let mut curr_running: bool = false; + loop { + let next_running = { mutex().lock().await.running }; + if curr_running != next_running { + curr_running = next_running; + if curr_running { + update_status(P_SUBMITTER_ID, "Starting").await; + } else { + update_status(P_SUBMITTER_ID, "Stopped").await; + } + } + if !curr_running { + future_utils::sleep(5000).await; + continue; + } + if let Err(e) = do_submit_proof().await { + update_status(P_SUBMITTER_ID, &e.to_string()).await; + } + future_utils::sleep(5000).await; + } + }); +} + +async fn get_latest_block_id() -> String { + let state = mutex().lock().await; + state + .latest_block + .as_ref() + .unwrap_or_else(|| { + web_sys::console::error_1(&"get_latest_block_id".to_string().into()); + panic!("FIXME"); + }) + .id + .clone() +} + +async fn update_status(id: &str, status: &str) { + let s = format!("[{}]: {}", id, status); + println!("{}", s); + #[cfg(feature = "browser")] + web_sys::console::log_1(&s.into()); + let mut state = mutex().lock().await; + (*state).status.insert(id.to_string(), status.to_string()); +} + +async fn get_latest_block() -> Result { + let GetBlockResp { block, .. } = API + .get() + .unwrap_or_else(|| { + web_sys::console::error_1(&"get_latest_block".to_string().into()); + panic!("FIXME"); + }) + .get_block(GetBlockReq { + id: None, + round: None, + height: None, + include_data: false, + }) + .await + .map_err(|e| format!("Failed to get latest block: {:?}", e))?; + Ok(block.unwrap_or_else(|| { + web_sys::console::error_1(&"get_latest_block2".to_string().into()); + panic!("FIXME"); + })) +} + +async fn get_benchmarks() -> Result<(Vec, Vec, Vec)> { + let GetBenchmarksResp { + benchmarks, + proofs, + frauds, + .. + } = API + .get() + .unwrap_or_else(|| { + web_sys::console::error_1(&"get_benchmarks".to_string().into()); + panic!("FIXME"); + }) + .get_benchmarks(GetBenchmarksReq { + block_id: Some(get_latest_block_id().await), + player_id: PLAYER_ID + .get() + .unwrap_or_else(|| { + web_sys::console::error_1(&"get_benchmarks".to_string().into()); + panic!("FIXME"); + }) + .clone(), + }) + .await + .map_err(|e| format!("Failed to get benchmarks: {:?}", e))?; + Ok((benchmarks, proofs, frauds)) +} + +async fn get_benchmarker_data() -> Result> { + let GetPlayersResp { players, .. } = API + .get() + .unwrap_or_else(|| { + web_sys::console::error_1(&"get_benchmarker_data".to_string().into()); + panic!("FIXME"); + }) + .get_players(GetPlayersReq { + block_id: Some(get_latest_block_id().await), + player_type: PlayerType::Benchmarker, + }) + .await + .map_err(|e| format!("Failed to get players: {:?}", e))?; + Ok(players + .into_iter() + .find(|x| { + x.id == *PLAYER_ID.get().unwrap_or_else(|| { + web_sys::console::error_1(&"get_benchmarker_data2".to_string().into()); + panic!("FIXME"); + }) + }) + .map(|x| { + x.block_data.unwrap_or_else(|| { + web_sys::console::error_1(&"get_benchmarker_data3".to_string().into()); + panic!("FIXME"); + }) + })) +} + +async fn get_challenges() -> Result> { + let GetChallengesResp { challenges, .. } = API + .get() + .unwrap_or_else(|| { + web_sys::console::error_1(&"get_challenges".to_string().into()); + panic!("FIXME"); + }) + .get_challenges(GetChallengesReq { + block_id: Some(get_latest_block_id().await), + }) + .await + .map_err(|e| format!("Failed to get challenges: {:?}", e))?; + Ok(challenges) +} + +async fn get_algorithms() -> Result<(HashMap>, HashMap)> { + let GetAlgorithmsResp { + algorithms, wasms, .. + } = API + .get() + .unwrap_or_else(|| { + web_sys::console::error_1(&"get_algorithms".to_string().into()); + panic!("FIXME"); + }) + .get_algorithms(GetAlgorithmsReq { + block_id: Some(get_latest_block_id().await), + }) + .await + .map_err(|e| format!("Failed to get algorithms: {:?}", e))?; + let algorithms_by_challenge: HashMap> = + algorithms.into_iter().fold(HashMap::new(), |mut acc, x| { + acc.entry(x.details.challenge_id.clone()) + .or_default() + .push(x.clone()); + acc + }); + let download_urls = wasms + .into_iter() + .filter(|x| x.details.download_url.is_some()) + .map(|x| { + ( + x.algorithm_id, + x.details.download_url.unwrap_or_else(|| { + web_sys::console::error_1(&"get_algorithms2".to_string().into()); + panic!("FIXME"); + }), + ) + }) + .collect(); + Ok((algorithms_by_challenge, download_urls)) +} + +async fn update_block_data() -> Result<()> { + let block = get_latest_block().await?; + { + let mut state = mutex().lock().await; + (*state).latest_block = Some(block.clone()); + } + let results = future_utils::join( + get_algorithms(), + get_benchmarker_data(), + get_benchmarks(), + get_challenges(), + ) + .await?; + let mut state = mutex().lock().await; + let (algorithms_by_challenge, download_urls) = results.0?; + (*state).algorithms_by_challenge = algorithms_by_challenge; + (*state).download_urls = download_urls; + + let benchmarker_data = results.1?; + (*state).benchmarker_data = benchmarker_data; + + let (benchmarks, proofs, frauds) = results.2?; + (*state).benchmarks.retain(|_, x| { + x.details.block_started + >= block + .details + .height + .saturating_sub(block.config().benchmark_submissions.lifespan_period) + }); + let keys_to_keep: HashSet = state.benchmarks.keys().cloned().collect(); + (*state) + .proofs + .retain(|_, x| keys_to_keep.contains(&x.benchmark_id)); + (*state) + .frauds + .retain(|_, x| keys_to_keep.contains(&x.benchmark_id)); + for x in benchmarks { + (*state).benchmarks.insert(x.id.clone(), x); + } + for x in proofs { + (*state).proofs.insert(x.benchmark_id.clone(), x); + } + for x in frauds { + (*state).frauds.insert(x.benchmark_id.clone(), x); + } + + let challenges = results.3?; + (*state).challenges = challenges; + Ok(()) +} + +async fn find_settings_to_recompute() -> Option<(String, BenchmarkSettings, NonceIterator)> { + let state = mutex().lock().await; + for (benchmark_id, benchmark) in state.benchmarks.iter() { + if !state.proofs.contains_key(benchmark_id) && benchmark.state.is_some() { + let sampled_nonces = benchmark.state().sampled_nonces.clone().unwrap_or_else(|| { + web_sys::console::error_1(&"find_settings_to_recompute".to_string().into()); + panic!("FIXME"); + }); + println!("Sampled nonces: {:?}", sampled_nonces); + return Some(( + benchmark_id.clone(), + benchmark.settings.clone(), + NonceIterator::new(Some(sampled_nonces)), + )); + } + } + None +} + +async fn pick_settings_to_benchmark() -> (String, BenchmarkSettings, NonceIterator) { + let block_id = get_latest_block_id().await; + let state = mutex().lock().await; + let num_qualifiers_by_challenge = match &state.benchmarker_data { + Some(data) => data.num_qualifiers_by_challenge.clone().unwrap_or_else(|| { + web_sys::console::error_1(&"pick_settings_to_benchmark".to_string().into()); + panic!("FIXME"); + }), + None => HashMap::new(), + }; + let percent_qualifiers_by_challenge: HashMap = state + .challenges + .iter() + .map(|c| { + let player_num_qualifiers = *num_qualifiers_by_challenge.get(&c.id).unwrap_or(&0); + let challenge_num_qualifiers = *c.block_data().num_qualifiers(); + let percent = if player_num_qualifiers == 0 || challenge_num_qualifiers == 0 { + 0f64 + } else { + (player_num_qualifiers as f64) / (challenge_num_qualifiers as f64) + }; + (c.id.clone(), percent) + }) + .collect(); + let mut rng = StdRng::seed_from_u64(time() as u64); + let challenge_weights: Vec<(String, f64)> = state + .selected_algorithms + .keys() + .map(|challenge_id| { + ( + challenge_id.clone(), + 1f64 - percent_qualifiers_by_challenge[challenge_id] + 1e-10f64, + ) + }) + .collect(); + let dist = WeightedIndex::new( + &challenge_weights + .iter() + .map(|w| w.1.clone()) + .collect::>(), + ) + .unwrap_or_else(|_| { + web_sys::console::error_1(&"pick_settings_to_benchmark2".to_string().into()); + panic!("FIXME"); + }); + let index = dist.sample(&mut rng); + + let random_challenge_id = challenge_weights[index].0.clone(); + let selected_algorithm_id = state + .selected_algorithms + .get(&random_challenge_id) + .unwrap_or_else(|| { + web_sys::console::error_1(&"pick_settings_to_benchmark3".to_string().into()); + panic!("FIXME"); + }) + .clone(); + + let challenge = state + .challenges + .iter() + .find(|c| c.id == random_challenge_id) + .unwrap_or_else(|| { + web_sys::console::error_1(&"pick_settings_to_benchmark4".to_string().into()); + panic!("FIXME"); + }); + let min_difficulty = challenge.details.min_difficulty(); + let max_difficulty = challenge.details.max_difficulty(); + let block_data = &challenge.block_data(); + let random_difficulty = block_data.base_frontier().sample(&mut rng).scale( + &min_difficulty, + &max_difficulty, + *block_data.scaling_factor(), + ); + ( + Alphanumeric.sample_string(&mut rng, 32), + BenchmarkSettings { + player_id: PLAYER_ID + .get() + .unwrap_or_else(|| { + web_sys::console::error_1(&"pick_settings_to_benchmark5".to_string().into()); + panic!("FIXME"); + }) + .clone(), + block_id, + challenge_id: random_challenge_id, + algorithm_id: selected_algorithm_id, + difficulty: random_difficulty, + }, + NonceIterator::new(None), + ) +} + +async fn download_wasm_blob(algorithm_id: &String) -> Result> { + let state = mutex().lock().await; + let download_url = state + .download_urls + .get(algorithm_id.as_str()) + .ok_or_else(|| format!("Algorithm {} does not have wasm download_url", algorithm_id))?; + let wasm = get::>(download_url.as_str(), None) + .await + .map_err(|e| format!("Failed to download wasm from {}: {:?}", download_url, e))?; + Ok(wasm) +} + +async fn do_benchmark() -> Result<()> { + while let Some((job, Some(nonce))) = { + let mut state = mutex().lock().await; + if state.running { + (*state) + .job + .as_mut() + .map(|x| (x.clone(), x.nonce_iter.next())) + } else { + None + } + } { + if let Ok(solution_data) = compute_solution( + &job.settings, + nonce, + job.wasm_blob.as_slice(), + job.wasm_vm_config.max_memory, + job.wasm_vm_config.max_fuel, + ) + .map_err(|e| e.to_string())? + { + if solution_data.calc_solution_signature() <= job.solution_signature_threshold { + let mut state = mutex().lock().await; + if let Some(Some(solutions_meta_data)) = (*state) + .benchmarks + .get_mut(&job.benchmark_id) + .map(|x| x.solutions_meta_data.as_mut()) + { + solutions_meta_data.push(solution_data.clone().into()); + } + if let Some(Some(solutions_data)) = (*state) + .proofs + .get_mut(&job.benchmark_id) + .map(|x| x.solutions_data.as_mut()) + { + solutions_data.push(solution_data); + if !job.nonce_iter.is_recompute() { + (*state) + .benchmarks + .get_mut(&job.benchmark_id) + .unwrap_or_else(|| { + web_sys::console::error_1(&"do_benchmark7".to_string().into()); + panic!("FIXME"); + }) + .details + .num_solutions += 1; + } + } else { + return Ok(()); + } + } + } + future_utils::sleep(1).await; + } + update_status(WORKER_ID, "Finished job").await; + Ok(()) +} + +async fn do_manage_benchmark() -> Result<()> { + update_status(MANAGER_ID, "Checking for any benchmarks to recompute").await; + let (benchmark_id, settings, nonce_iter) = if let Some(x) = find_settings_to_recompute().await { + update_status(MANAGER_ID, "Found benchmark to recompute").await; + x + } else { + update_status(MANAGER_ID, "Picking new settings to benchmark").await; + pick_settings_to_benchmark().await + }; + update_status(MANAGER_ID, &format!("{:?}", settings)).await; + + update_status( + MANAGER_ID, + &format!("Downloading algorithm: {}", settings.algorithm_id), + ) + .await; + let wasm_blob = download_wasm_blob(&settings.algorithm_id).await?; + + update_status(MANAGER_ID, &format!("Setting up benchmark")).await; + let mut state = mutex().lock().await; + let solution_signature_threshold = *state + .challenges + .iter() + .find(|x| x.id == settings.challenge_id) + .unwrap_or_else(|| { + web_sys::console::error_1(&"do_manage_benchmark".to_string().into()); + panic!("FIXME"); + }) + .block_data() + .solution_signature_threshold(); + let block = state.latest_block.as_ref().unwrap_or_else(|| { + web_sys::console::error_1(&"do_manage_benchmark2".to_string().into()); + panic!("FIXME"); + }); + let block_started = block.details.height; + let wasm_vm_config = block.config().wasm_vm.clone(); + if !nonce_iter.is_recompute() { + (*state).benchmarks.insert( + benchmark_id.clone(), + Benchmark { + id: benchmark_id.clone(), + settings: settings.clone(), + details: BenchmarkDetails { + block_started, + num_solutions: 0, + }, + state: None, + solutions_meta_data: Some(Vec::new()), + solution_data: None, + }, + ); + } + (*state).proofs.insert( + benchmark_id.clone(), + Proof { + benchmark_id: benchmark_id.clone(), + state: None, + solutions_data: Some(Vec::new()), + }, + ); + (*state).job = Some(Job { + benchmark_id: benchmark_id.clone(), + settings, + duration: Duration { + start: time(), + end: time() + 20000, + now: time(), + }, + solution_signature_threshold, + wasm_blob, + nonce_iter, + wasm_vm_config, + }); + drop(state); + + loop { + let mut state = mutex().lock().await; + if !state.running { + break; + } + let job = (*state).job.as_mut().unwrap_or_else(|| { + web_sys::console::error_1(&"do_manage_benchmark3".to_string().into()); + panic!("FIXME"); + }); + job.duration.now = time(); + if job.duration.now > job.duration.end { + break; + } + + if job.nonce_iter.is_finished() { + break; + } + let num_attempts = job.nonce_iter.attempts(); + let num_solutions = state + .proofs + .get(&benchmark_id) + .unwrap_or_else(|| { + web_sys::console::error_1(&"do_manage_benchmark4".to_string().into()); + panic!("FIXME"); + }) + .solutions_data() + .len(); + drop(state); + update_status( + MANAGER_ID, + &format!( + "Computed {} solutions out of {} instances", + num_solutions, num_attempts + ), + ) + .await; + + future_utils::sleep(200).await; + } + + let mut state = mutex().lock().await; + let num_solutions = state + .proofs + .get(&benchmark_id) + .unwrap_or_else(|| { + web_sys::console::error_1(&"do_manage_benchmark5".to_string().into()); + panic!("FIXME"); + }) + .solutions_data() + .len(); + let num_attempts = state + .job + .as_ref() + .unwrap_or_else(|| { + web_sys::console::error_1(&"do_manage_benchmark6".to_string().into()); + panic!("FIXME"); + }) + .nonce_iter + .attempts(); + (*state).job = None; + if num_solutions == 0 { + (*state).benchmarks.remove(&benchmark_id); + (*state).proofs.remove(&benchmark_id); + } + drop(state); + update_status( + MANAGER_ID, + &format!( + "Finished. Computed {} solutions out of {} instances", + num_solutions, num_attempts + ), + ) + .await; + Ok(()) +} + +async fn do_submit_benchmark() -> Result<()> { + update_status(B_SUBMITTER_ID, "Finding benchmark to submit").await; + let benchmark_to_submit = { + let mut state = mutex().lock().await; + let State { + ref mut benchmarks, + ref proofs, + ref job, + .. + } = *state; + let job_benchmark_id = job.as_ref().map(|x| &x.benchmark_id); + benchmarks + .values_mut() + .find(|benchmark| { + job_benchmark_id != Some(&benchmark.id) && benchmark.solutions_meta_data.is_some() + }) + .map(|benchmark| { + ( + benchmark.id.clone(), + benchmark.settings.clone(), + benchmark.solutions_meta_data.take().unwrap_or_else(|| { + web_sys::console::error_1(&"run_benchmark_submitter".to_string().into()); + panic!("FIXME"); + }), + proofs + .get(&benchmark.id) + .unwrap_or_else(|| { + web_sys::console::error_1( + &"run_benchmark_submitter".to_string().into(), + ); + panic!("FIXME"); + }) + .solutions_data() + .first() + .unwrap() + .clone(), + ) + }) + }; + if let Some((old_benchmark_id, settings, solutions_meta_data, solution_data)) = + benchmark_to_submit + { + update_status( + B_SUBMITTER_ID, + &format!("Submitting benchmark: {:?}", settings), + ) + .await; + let resp = API + .get() + .unwrap_or_else(|| { + web_sys::console::error_1(&"run_benchmark_submitter2".to_string().into()); + panic!("FIXME"); + }) + .submit_benchmark(SubmitBenchmarkReq { + settings, + solutions_meta_data, + solution_data, + }) + .await + .map_err(|e| format!("Failed to submit benchmark: {:?}", e))?; + update_status(B_SUBMITTER_ID, &format!("{:?}", resp)).await; + if resp.benchmark_id != old_benchmark_id { + let mut state = mutex().lock().await; + let mut benchmark = (*state) + .benchmarks + .remove(&old_benchmark_id) + .unwrap_or_else(|| { + web_sys::console::error_1(&"run_benchmark_submitter3".to_string().into()); + panic!("FIXME"); + }); + let mut proof = (*state) + .proofs + .remove(&old_benchmark_id) + .unwrap_or_else(|| { + web_sys::console::error_1(&"run_benchmark_submitter4".to_string().into()); + panic!("FIXME"); + }); + benchmark.id = resp.benchmark_id.clone(); + proof.benchmark_id = resp.benchmark_id.clone(); + (*state) + .benchmarks + .insert(resp.benchmark_id.clone(), benchmark); + (*state).proofs.insert(resp.benchmark_id.clone(), proof); + } + } else { + update_status(B_SUBMITTER_ID, "No benchmark to submit").await; + } + Ok(()) +} + +async fn do_submit_proof() -> Result<()> { + update_status(P_SUBMITTER_ID, "Finding proof to submit").await; + let proof_to_submit = { + let mut state = mutex().lock().await; + let State { + ref benchmarks, + ref mut proofs, + ref job, + .. + } = *state; + let job_benchmark_id = job.as_ref().map(|x| &x.benchmark_id); + proofs + .values_mut() + .find(|x| { + job_benchmark_id != Some(&x.benchmark_id) + && x.solutions_data.is_some() + && benchmarks + .get(&x.benchmark_id) + .is_some_and(|x| x.state.is_some()) + }) + .map(|x| { + let state = benchmarks + .get(&x.benchmark_id) + .unwrap_or_else(|| { + web_sys::console::error_1(&"do_submit_proof".to_string().into()); + panic!("FIXME"); + }) + .state + .as_ref() + .unwrap_or_else(|| { + web_sys::console::error_1(&"do_submit_proof2".to_string().into()); + panic!("FIXME"); + }); + let sampled_nonces: HashSet = state + .sampled_nonces + .clone() + .unwrap_or_else(|| { + web_sys::console::error_1(&"do_submit_proof3".to_string().into()); + panic!("FIXME"); + }) + .into_iter() + .collect(); + let mut solutions_data = x.solutions_data.take().unwrap_or_else(|| { + web_sys::console::error_1(&"do_submit_proof4".to_string().into()); + panic!("FIXME"); + }); + solutions_data.retain(|x| sampled_nonces.contains(&x.nonce)); + (x.benchmark_id.clone(), solutions_data) + }) + }; + if let Some((benchmark_id, solutions_data)) = proof_to_submit { + update_status( + P_SUBMITTER_ID, + &format!("Submitting proof for benchmark {}", benchmark_id), + ) + .await; + let resp = API + .get() + .unwrap_or_else(|| { + web_sys::console::error_1(&"do_submit_proof5".to_string().into()); + panic!("FIXME"); + }) + .submit_proof(SubmitProofReq { + benchmark_id: benchmark_id.clone(), + solutions_data: solutions_data.into(), + }) + .await + .map_err(|e| { + format!( + "Failed to submit proof for benchmark {}: {:?}", + benchmark_id, e + ) + })?; + update_status(P_SUBMITTER_ID, &format!("{:?}", resp)).await; + } else { + update_status(P_SUBMITTER_ID, "No proof to submit").await; + } + Ok(()) +} diff --git a/tig-benchmarker/src/future_utils.rs b/tig-benchmarker/src/future_utils.rs new file mode 100644 index 00000000..49c27fab --- /dev/null +++ b/tig-benchmarker/src/future_utils.rs @@ -0,0 +1,66 @@ +use serde::{de::DeserializeOwned, Serialize}; +use std::future::Future; + +use super::*; +pub use futures::lock::Mutex; +use gloo_timers::future::TimeoutFuture; +use js_sys::{Array, Date, Promise}; +use serde_wasm_bindgen::{from_value, to_value}; +use wasm_bindgen::prelude::*; +use wasm_bindgen_futures::future_to_promise; +use wasm_bindgen_futures::JsFuture; + +fn to_string(e: T) -> String { + format!("{:?}", e) +} +pub async fn join( + a: impl Future + 'static, + b: impl Future + 'static, + c: impl Future + 'static, + d: impl Future + 'static, +) -> Result<(T, U, V, W), String> +where + T: Serialize + DeserializeOwned + 'static, + U: Serialize + DeserializeOwned + 'static, + V: Serialize + DeserializeOwned + 'static, + W: Serialize + DeserializeOwned + 'static, +{ + let a = future_to_promise(async move { Ok(to_value(&a.await)?) }); + let b = future_to_promise(async move { Ok(to_value(&b.await)?) }); + let c = future_to_promise(async move { Ok(to_value(&c.await)?) }); + let d = future_to_promise(async move { Ok(to_value(&d.await)?) }); + + let promises = Array::new(); + promises.push(&a); + promises.push(&b); + promises.push(&c); + promises.push(&d); + + let js_promise = Promise::all(&promises); + let js_values = JsFuture::from(js_promise).await.map_err(to_string)?; + + let values = js_values.dyn_into::().map_err(to_string)?; + let results = ( + from_value(values.get(0)).map_err(to_string)?, + from_value(values.get(1)).map_err(to_string)?, + from_value(values.get(2)).map_err(to_string)?, + from_value(values.get(3)).map_err(to_string)?, + ); + + Ok(results) +} + +pub fn spawn(f: impl Future + 'static) { + let _ = future_to_promise(async move { + f.await; + Ok(JsValue::undefined()) + }); +} + +pub async fn sleep(ms: u32) { + TimeoutFuture::new(ms).await; +} + +pub fn time() -> u64 { + Date::now() as u64 +} diff --git a/tig-benchmarker/src/lib.rs b/tig-benchmarker/src/lib.rs new file mode 100644 index 00000000..237b9aeb --- /dev/null +++ b/tig-benchmarker/src/lib.rs @@ -0,0 +1,34 @@ +mod benchmarker; +mod future_utils; + +#[cfg(feature = "browser")] +mod exports { + use super::*; + use wasm_bindgen::prelude::*; + + #[wasm_bindgen] + pub async fn state() -> JsValue { + let state = benchmarker::mutex().lock().await.clone(); + serde_wasm_bindgen::to_value(&state).unwrap() + } + + #[wasm_bindgen] + pub async fn start() { + benchmarker::start().await; + } + + #[wasm_bindgen] + pub async fn stop() { + benchmarker::stop().await; + } + + #[wasm_bindgen] + pub async fn select_algorithm(challenge_id: String, algorithm_id: String) { + benchmarker::select_algorithm(challenge_id, algorithm_id).await; + } + + #[wasm_bindgen] + pub async fn setup(api_url: String, api_key: String, player_id: String, num_workers: u32) { + benchmarker::setup(api_url, api_key, player_id.to_string(), num_workers).await; + } +} diff --git a/tig-challenges/.gitignore b/tig-challenges/.gitignore new file mode 100644 index 00000000..ea8c4bf7 --- /dev/null +++ b/tig-challenges/.gitignore @@ -0,0 +1 @@ +/target diff --git a/tig-challenges/Cargo.toml b/tig-challenges/Cargo.toml new file mode 100644 index 00000000..33ad5222 --- /dev/null +++ b/tig-challenges/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "tig-challenges" +version = "0.1.0" +authors.workspace = true +repository.workspace = true +edition.workspace = true + +[dependencies] +anyhow = "1.0.81" +ndarray = "0.15.6" +rand = { version = "0.8.5", default-features = false, features = ["std_rng"] } +serde = { version = "1.0.196", features = ["derive"] } +serde_json = { version = "1.0.113" } diff --git a/tig-challenges/src/knapsack.rs b/tig-challenges/src/knapsack.rs new file mode 100644 index 00000000..eca34506 --- /dev/null +++ b/tig-challenges/src/knapsack.rs @@ -0,0 +1,130 @@ +use anyhow::{anyhow, Result}; +use rand::{rngs::StdRng, Rng, SeedableRng}; +use serde::{Deserialize, Serialize}; +use serde_json::{from_value, Map, Value}; +use std::collections::HashSet; + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct Difficulty { + pub num_items: usize, + pub better_than_baseline: u32, +} + +impl crate::DifficultyTrait<2> for Difficulty { + fn from_arr(arr: &[i32; 2]) -> Self { + Self { + num_items: arr[0] as usize, + better_than_baseline: arr[1] as u32, + } + } + + fn to_arr(&self) -> [i32; 2] { + [self.num_items as i32, self.better_than_baseline as i32] + } +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct Solution { + pub items: Vec, +} + +impl crate::SolutionTrait for Solution {} + +impl TryFrom> for Solution { + type Error = serde_json::Error; + + fn try_from(v: Map) -> Result { + from_value(Value::Object(v)) + } +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct Challenge { + pub seed: u32, + pub difficulty: Difficulty, + pub weights: Vec, + pub values: Vec, + pub max_weight: u32, + pub min_value: u32, +} + +impl crate::ChallengeTrait for Challenge { + fn generate_instance(seed: u32, difficulty: &Difficulty) -> Result { + let mut rng: StdRng = StdRng::seed_from_u64(seed as u64); + + let weights: Vec = (0..difficulty.num_items) + .map(|_| rng.gen_range(1..50)) + .collect(); + let values: Vec = (0..difficulty.num_items) + .map(|_| rng.gen_range(1..50)) + .collect(); + let max_weight: u32 = weights.iter().sum::() / 2; + + // Baseline greedy algorithm + let mut sorted_value_to_weight_ratio: Vec = (0..difficulty.num_items).collect(); + sorted_value_to_weight_ratio.sort_by(|&a, &b| { + let ratio_a = values[a] as f64 / weights[a] as f64; + let ratio_b = values[b] as f64 / weights[b] as f64; + ratio_b.partial_cmp(&ratio_a).unwrap() + }); + + let mut total_weight = 0; + let mut min_value = 0; + for &item in &sorted_value_to_weight_ratio { + if total_weight + weights[item] > max_weight { + continue; + } + min_value += values[item]; + total_weight += weights[item]; + } + min_value = (min_value as f64 * (1.0 + difficulty.better_than_baseline as f64 / 1000.0)) + .round() as u32; + + Ok(Challenge { + seed, + difficulty: difficulty.clone(), + weights, + values, + max_weight, + min_value, + }) + } + + fn verify_solution(&self, solution: &Solution) -> Result<()> { + let selected_items: HashSet = solution.items.iter().cloned().collect(); + if selected_items.len() != solution.items.len() { + return Err(anyhow!("Duplicate items selected.")); + } + if let Some(item) = selected_items + .iter() + .find(|&&item| item >= self.weights.len()) + { + return Err(anyhow!("Item ({}) is out of bounds", item)); + } + + let total_weight = selected_items + .iter() + .map(|&item| self.weights[item]) + .sum::(); + if total_weight > self.max_weight { + return Err(anyhow!( + "Total weight ({}) exceeded max weight ({})", + total_weight, + self.max_weight + )); + } + let total_value = selected_items + .iter() + .map(|&item| self.values[item]) + .sum::(); + if total_value < self.min_value { + Err(anyhow!( + "Total value ({}) does not reach minimum value ({})", + total_value, + self.min_value + )) + } else { + Ok(()) + } + } +} diff --git a/tig-challenges/src/lib.rs b/tig-challenges/src/lib.rs new file mode 100644 index 00000000..e228256f --- /dev/null +++ b/tig-challenges/src/lib.rs @@ -0,0 +1,37 @@ +use anyhow::{anyhow, Result}; +use serde::de::DeserializeOwned; +use serde::Serialize; +pub trait DifficultyTrait: Serialize + DeserializeOwned { + fn from_arr(arr: &[i32; N]) -> Self; + fn to_arr(&self) -> [i32; N]; +} +pub trait SolutionTrait: Serialize + DeserializeOwned {} + +pub trait ChallengeTrait: Serialize + DeserializeOwned +where + T: SolutionTrait, + U: DifficultyTrait, +{ + fn generate_instance(seed: u32, difficulty: &U) -> Result; + fn generate_instance_from_str(seed: u32, difficulty: &str) -> Result { + Self::generate_instance(seed, &serde_json::from_str(difficulty)?) + } + fn generate_instance_from_vec(seed: u32, difficulty: &Vec) -> Result { + match difficulty.as_slice().try_into() { + Ok(difficulty) => Self::generate_instance_from_arr(seed, &difficulty), + Err(_) => Err(anyhow!("Invalid difficulty length")), + } + } + fn generate_instance_from_arr(seed: u32, difficulty: &[i32; N]) -> Result { + Self::generate_instance(seed, &U::from_arr(difficulty)) + } + + fn verify_solution(&self, solution: &T) -> Result<()>; + fn verify_solution_from_json(&self, solution: &str) -> Result> { + Ok(self.verify_solution(&serde_json::from_str(solution)?)) + } +} + +pub mod knapsack; +pub mod satisfiability; +pub mod vehicle_routing; diff --git a/tig-challenges/src/satisfiability.rs b/tig-challenges/src/satisfiability.rs new file mode 100644 index 00000000..09cb473f --- /dev/null +++ b/tig-challenges/src/satisfiability.rs @@ -0,0 +1,116 @@ +use anyhow::{anyhow, Result}; +use ndarray::{Array2, Axis}; +use rand::{ + distributions::{Distribution, Uniform}, + rngs::StdRng, + SeedableRng, +}; +use serde::{Deserialize, Serialize}; +use serde_json::{from_value, Map, Value}; + +#[derive(Serialize, Deserialize, Debug, Copy, Clone)] +pub struct Difficulty { + pub num_variables: usize, + pub clauses_to_variables_percent: u32, +} + +impl crate::DifficultyTrait<2> for Difficulty { + fn from_arr(arr: &[i32; 2]) -> Self { + Self { + num_variables: arr[0] as usize, + clauses_to_variables_percent: arr[1] as u32, + } + } + + fn to_arr(&self) -> [i32; 2] { + [ + self.num_variables as i32, + self.clauses_to_variables_percent as i32, + ] + } +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct Solution { + pub variables: Vec, +} + +impl crate::SolutionTrait for Solution {} + +impl TryFrom> for Solution { + type Error = serde_json::Error; + + fn try_from(v: Map) -> Result { + from_value(Value::Object(v)) + } +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct Challenge { + pub seed: u32, + pub difficulty: Difficulty, + pub clauses: Vec>, +} + +impl crate::ChallengeTrait for Challenge { + fn generate_instance(seed: u32, difficulty: &Difficulty) -> Result { + let mut rng = StdRng::seed_from_u64(seed as u64); + let num_clauses = (difficulty.num_variables as f64 + * difficulty.clauses_to_variables_percent as f64 + / 100.0) + .floor() as usize; + + let var_distr = Uniform::new(1, difficulty.num_variables as i32 + 1); + // Create a uniform distribution for negations. + let neg_distr = Uniform::new(0, 2); + + // Generate the clauses array. + let clauses_array = Array2::from_shape_fn((num_clauses, 3), |_| var_distr.sample(&mut rng)); + + // Generate the negations array. + let negations = Array2::from_shape_fn((num_clauses, 3), |_| { + if neg_distr.sample(&mut rng) == 0 { + -1 + } else { + 1 + } + }); + + // Combine clauses array with negations. + let clauses_array = clauses_array * negations; + + // Convert Array2 to Vec> + let clauses = clauses_array + .axis_iter(Axis(0)) + .map(|row| row.to_vec()) + .collect(); + + Ok(Self { + seed, + difficulty: difficulty.clone(), + clauses, + }) + } + + fn verify_solution(&self, solution: &Solution) -> Result<()> { + if solution.variables.len() != self.difficulty.num_variables { + return Err(anyhow!( + "Invalid number of variables. Expected: {}, Actual: {}", + self.difficulty.num_variables, + solution.variables.len() + )); + } + + if let Some((idx, _)) = self.clauses.iter().enumerate().find(|(_, clause)| { + clause.iter().any(|&literal| { + let var_idx = literal.abs() as usize - 1; + let var_value = solution.variables[var_idx]; + (literal > 0 && var_value) || (literal < 0 && !var_value) + }) + }) { + Err(anyhow!("Clause '{}' not satisfied", idx)) + } else { + Ok(()) + } + } +} diff --git a/tig-challenges/src/vehicle_routing.rs b/tig-challenges/src/vehicle_routing.rs new file mode 100644 index 00000000..b468634d --- /dev/null +++ b/tig-challenges/src/vehicle_routing.rs @@ -0,0 +1,207 @@ +use anyhow::{anyhow, Result}; +use rand::{rngs::StdRng, Rng, SeedableRng}; +use serde::{Deserialize, Serialize}; +use serde_json::{from_value, Map, Value}; + +#[derive(Serialize, Deserialize, Debug, Copy, Clone)] +pub struct Difficulty { + pub num_nodes: usize, + pub better_than_baseline: u32, +} + +impl crate::DifficultyTrait<2> for Difficulty { + fn from_arr(arr: &[i32; 2]) -> Self { + Self { + num_nodes: arr[0] as usize, + better_than_baseline: arr[1] as u32, + } + } + + fn to_arr(&self) -> [i32; 2] { + [self.num_nodes as i32, self.better_than_baseline as i32] + } +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct Solution { + pub routes: Vec>, +} + +impl crate::SolutionTrait for Solution {} + +impl TryFrom> for Solution { + type Error = serde_json::Error; + + fn try_from(v: Map) -> Result { + from_value(Value::Object(v)) + } +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct Challenge { + pub seed: u32, + pub difficulty: Difficulty, + pub demands: Vec, + pub distance_matrix: Vec>, + pub max_total_distance: i32, + pub max_capacity: i32, +} + +impl crate::ChallengeTrait for Challenge { + fn generate_instance(seed: u32, difficulty: &Difficulty) -> Result { + let mut rng: StdRng = StdRng::seed_from_u64(seed as u64); + + let num_nodes = difficulty.num_nodes; + let max_capacity = 100; + + let mut node_positions: Vec<(f64, f64)> = (0..num_nodes) + .map(|_| (rng.gen::() * 500.0, rng.gen::() * 500.0)) + .collect(); + node_positions[0] = (250.0, 250.0); // Depot is node 0, and in the center + + let mut demands: Vec = (0..num_nodes).map(|_| rng.gen_range(15..30)).collect(); + demands[0] = 0; // Depot demand is 0 + + let distance_matrix: Vec> = node_positions + .iter() + .map(|&from| { + node_positions + .iter() + .map(|&to| { + let dx = from.0 - to.0; + let dy = from.1 - to.1; + dx.hypot(dy).round() as i32 + }) + .collect() + }) + .collect(); + + let baseline_routes = + calc_baseline_routes(num_nodes, max_capacity, &demands, &distance_matrix)?; + let baseline_routes_total_distance = calc_routes_total_distance( + num_nodes, + max_capacity, + &demands, + &distance_matrix, + &baseline_routes, + )?; + let max_total_distance = (baseline_routes_total_distance + * (1000 - difficulty.better_than_baseline as i32) + / 1000) as i32; + + Ok(Challenge { + seed, + difficulty: difficulty.clone(), + demands, + distance_matrix, + max_total_distance, + max_capacity, + }) + } + + fn verify_solution(&self, solution: &Solution) -> Result<()> { + let total_distance = calc_routes_total_distance( + self.difficulty.num_nodes, + self.max_capacity, + &self.demands, + &self.distance_matrix, + &solution.routes, + )?; + if total_distance <= self.max_total_distance { + Ok(()) + } else { + Err(anyhow!( + "Total distance ({}) exceeds max total distance ({})", + total_distance, + self.max_total_distance + )) + } + } +} + +pub fn calc_baseline_routes( + num_nodes: usize, + max_capacity: i32, + demands: &Vec, + distance_matrix: &Vec>, +) -> Result>> { + let mut routes = Vec::new(); + let mut visited = vec![false; num_nodes]; + visited[0] = true; + + while visited.iter().any(|&v| !v) { + let mut route = vec![0]; + let mut current_node = 0; + let mut capacity = max_capacity; + + while capacity > 0 && visited.iter().any(|&v| !v) { + let eligible_nodes: Vec = (0..num_nodes) + .filter(|&node| !visited[node] && demands[node] <= capacity) + .collect(); + + if !eligible_nodes.is_empty() { + let &closest_node = eligible_nodes + .iter() + .min_by_key(|&&node| distance_matrix[current_node][node]) + .unwrap(); + capacity -= demands[closest_node]; + route.push(closest_node); + visited[closest_node] = true; + current_node = closest_node; + } else { + break; + } + } + + route.push(0); + routes.push(route); + } + + Ok(routes) +} + +pub fn calc_routes_total_distance( + num_nodes: usize, + max_capacity: i32, + demands: &Vec, + distance_matrix: &Vec>, + routes: &Vec>, +) -> Result { + let mut total_distance = 0; + let mut visited = vec![false; num_nodes]; + visited[0] = true; + + for route in routes { + if route.len() <= 2 || route[0] != 0 || route[route.len() - 1] != 0 { + return Err(anyhow!("Each route must start and end at node 0 (the depot), and visit at least one non-depot node")); + } + + let mut capacity = max_capacity; + let mut current_node = 0; + + for &node in &route[1..route.len() - 1] { + if visited[node] { + return Err(anyhow!( + "The same non-depot node cannot be visited more than once" + )); + } + if demands[node] > capacity { + return Err(anyhow!( + "The total demand on each route must not exceed max capacity" + )); + } + visited[node] = true; + capacity -= demands[node]; + total_distance += distance_matrix[current_node][node]; + current_node = node; + } + + total_distance += distance_matrix[current_node][0]; + } + + if visited.iter().any(|&v| !v) { + return Err(anyhow!("All nodes must be visited")); + } + + Ok(total_distance) +} diff --git a/tig-protocol/.gitignore b/tig-protocol/.gitignore new file mode 100644 index 00000000..ea8c4bf7 --- /dev/null +++ b/tig-protocol/.gitignore @@ -0,0 +1 @@ +/target diff --git a/tig-protocol/Cargo.toml b/tig-protocol/Cargo.toml new file mode 100644 index 00000000..8029dab0 --- /dev/null +++ b/tig-protocol/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "tig-protocol" +version = "0.1.0" +authors.workspace = true +repository.workspace = true +edition.workspace = true + +[dependencies] +anyhow = { version = "1.0.81" } +rand = "0.8.4" +serde = { version = "1.0.196", features = ["derive"] } +serde_json = { version = "1.0.113" } +tig-structs = { path = "../tig-structs" } +tig-utils = { path = "../tig-utils" } + +[lib] +crate-type = ["cdylib", "rlib"] diff --git a/tig-protocol/src/add_block.rs b/tig-protocol/src/add_block.rs new file mode 100644 index 00000000..daf014ac --- /dev/null +++ b/tig-protocol/src/add_block.rs @@ -0,0 +1,1099 @@ +use crate::context::*; +use rand::{prelude::SliceRandom, rngs::StdRng, SeedableRng}; +use std::{ + collections::{HashMap, HashSet}, + ops::Mul, +}; +use tig_structs::core::*; +use tig_utils::*; + +pub(crate) async fn execute(ctx: &mut T) -> String { + let block = create_block(ctx).await; + confirm_mempool_challenges(ctx, &block).await; + confirm_mempool_algorithms(ctx, &block).await; + confirm_mempool_benchmarks(ctx, &block).await; + confirm_mempool_proofs(ctx, &block).await; + confirm_mempool_frauds(ctx, &block).await; + confirm_mempool_wasms(ctx, &block).await; + update_cutoffs(ctx, &block).await; + update_solution_signature_thresholds(ctx, &block).await; + update_qualifiers(ctx, &block).await; + update_frontiers(ctx, &block).await; + update_influence(ctx, &block).await; + update_adoption(ctx, &block).await; + update_innovator_rewards(ctx, &block).await; + update_benchmarker_rewards(ctx, &block).await; + update_merge_points(ctx, &block).await; + update_merges(ctx, &block).await; + block.id +} + +async fn create_block(ctx: &mut T) -> Block { + let latest_block = ctx + .get_block(BlockFilter::Latest, false) + .await + .unwrap_or_else(|e| panic!("get_block error: {:?}", e)) + .expect("No latest block found"); + let config = ctx + .get_config() + .await + .unwrap_or_else(|e| panic!("get_config error: {:?}", e)); + let details = BlockDetails { + prev_block_id: latest_block.id.clone(), + height: latest_block.details.height + 1, + round: latest_block.details.height / config.rounds.blocks_per_round + 1, + }; + let from_block_started = details + .height + .saturating_sub(config.benchmark_submissions.lifespan_period); + let mut data = BlockData { + mempool_challenge_ids: HashSet::::new(), + mempool_algorithm_ids: HashSet::::new(), + mempool_benchmark_ids: HashSet::::new(), + mempool_fraud_ids: HashSet::::new(), + mempool_proof_ids: HashSet::::new(), + mempool_wasm_ids: HashSet::::new(), + active_challenge_ids: HashSet::::new(), + active_algorithm_ids: HashSet::::new(), + active_benchmark_ids: HashSet::::new(), + active_player_ids: HashSet::::new(), + }; + for challenge in ctx + .get_challenges(ChallengesFilter::Mempool, None) + .await + .unwrap_or_else(|e| panic!("get_challenges error: {:?}", e)) + .iter() + { + data.mempool_challenge_ids.insert(challenge.id.clone()); + } + for algorithm in ctx + .get_algorithms(AlgorithmsFilter::Mempool, None, false) + .await + .unwrap_or_else(|e| panic!("get_algorithms error: {:?}", e)) + .iter() + { + data.mempool_algorithm_ids.insert(algorithm.id.clone()); + } + for benchmark in ctx + .get_benchmarks(BenchmarksFilter::Mempool { from_block_started }, true) + .await + .unwrap_or_else(|e| panic!("get_benchmarks error: {:?}", e)) + .iter() + { + data.mempool_benchmark_ids.insert(benchmark.id.clone()); + } + for proof in ctx + .get_proofs(ProofsFilter::Mempool { from_block_started }, false) + .await + .unwrap_or_else(|e| panic!("get_proofs error: {:?}", e)) + .iter() + { + data.mempool_proof_ids.insert(proof.benchmark_id.clone()); + } + for fraud in ctx + .get_frauds(FraudsFilter::Mempool { from_block_started }, false) + .await + .unwrap_or_else(|e| panic!("get_frauds error: {:?}", e)) + .iter() + { + data.mempool_fraud_ids.insert(fraud.benchmark_id.clone()); + } + for wasm in ctx + .get_wasms(WasmsFilter::Mempool, false) + .await + .unwrap_or_else(|e| panic!("get_wasms error: {:?}", e)) + .iter() + { + data.mempool_wasm_ids.insert(wasm.algorithm_id.clone()); + } + + for challenge in ctx + .get_challenges(ChallengesFilter::Confirmed, None) + .await + .unwrap_or_else(|e| panic!("get_challenges error: {:?}", e)) + { + let round_active = challenge.state.unwrap().round_active; + if round_active.is_some_and(|x| details.round >= x) { + data.active_challenge_ids.insert(challenge.id); + } + } + let wasms: HashMap = ctx + .get_wasms(WasmsFilter::Confirmed, false) + .await + .unwrap_or_else(|e| panic!("get_wasms error: {:?}", e)) + .into_iter() + .map(|x| (x.algorithm_id.clone(), x)) + .collect(); + for algorithm in ctx + .get_algorithms(AlgorithmsFilter::Confirmed, None, false) + .await + .unwrap_or_else(|e| panic!("get_algorithms error: {:?}", e)) + { + let mut state = algorithm.state.unwrap(); + let round_pushed = state + .round_pushed + .unwrap_or(state.round_submitted() + config.algorithm_submissions.push_delay); + if details.round >= round_pushed + && wasms + .get(&algorithm.id) + .is_some_and(|w| w.details.compile_success) + { + data.active_algorithm_ids.insert(algorithm.id.clone()); + if state.round_pushed.is_none() { + state.round_pushed = Some(round_pushed); + ctx.update_algorithm_state(&algorithm.id, &state) + .await + .unwrap_or_else(|e| panic!("update_algorithm_state error: {:?}", e)); + } + } + } + let confirmed_proofs = ctx + .get_proofs(ProofsFilter::Confirmed { from_block_started }, false) + .await + .unwrap_or_else(|e| panic!("get_proofs error: {:?}", e)) + .into_iter() + .map(|x| (x.benchmark_id.clone(), x)) + .collect::>(); + let confirmed_frauds = ctx + .get_frauds(FraudsFilter::Confirmed { from_block_started }, false) + .await + .unwrap_or_else(|e| panic!("get_frauds error: {:?}", e)) + .into_iter() + .map(|x| (x.benchmark_id.clone(), x)) + .collect::>(); + for benchmark in ctx + .get_benchmarks(BenchmarksFilter::Confirmed { from_block_started }, false) + .await + .unwrap_or_else(|e| panic!("get_benchmarks error: {:?}", e)) + { + let proof = confirmed_proofs.get(&benchmark.id); + if proof.is_none() || confirmed_frauds.contains_key(&benchmark.id) { + continue; + } + // TODO check player state + let _player = ctx + .get_players( + PlayersFilter::Id(benchmark.settings.player_id.clone()), + None, + ) + .await + .unwrap_or_else(|e| panic!("get_players error: {:?}", e)) + .pop(); + let proof_state = proof.unwrap().state(); + let submission_delay = proof_state.submission_delay(); + let block_confirmed = proof_state.block_confirmed(); + let block_active = block_confirmed + + submission_delay * config.benchmark_submissions.submission_delay_multiplier; + if details.height >= block_active { + data.active_benchmark_ids.insert(benchmark.id.clone()); + data.active_player_ids + .insert(benchmark.settings.player_id.clone()); + } + } + + let block_id = ctx + .add_block(&details, &data, &config) + .await + .unwrap_or_else(|e| panic!("add_block error: {:?}", e)); + for challenge_id in data.mempool_challenge_ids.iter() { + let state = ChallengeState { + block_confirmed: None, + round_submitted: None, + round_active: None, + round_inactive: None, + }; + ctx.update_challenge_state(challenge_id, &state) + .await + .unwrap_or_else(|e| panic!("update_challenge_state error: {:?}", e)); + } + for algorithm_id in data.mempool_algorithm_ids.iter() { + let state = AlgorithmState { + block_confirmed: None, + round_submitted: None, + round_pushed: None, + round_merged: None, + }; + ctx.update_algorithm_state(algorithm_id, &state) + .await + .unwrap_or_else(|e| panic!("update_algorithm_state error: {:?}", e)); + } + for benchmark_id in data.mempool_benchmark_ids.iter() { + let state = BenchmarkState { + block_confirmed: None, + sampled_nonces: None, + }; + ctx.update_benchmark_state(benchmark_id, &state) + .await + .unwrap_or_else(|e| panic!("update_benchmark_state error: {:?}", e)); + } + for proof_id in data.mempool_proof_ids.iter() { + let state = ProofState { + block_confirmed: None, + submission_delay: None, + }; + ctx.update_proof_state(proof_id, &state) + .await + .unwrap_or_else(|e| panic!("update_proof_state error: {:?}", e)); + } + for fraud_id in data.mempool_fraud_ids.iter() { + let state = FraudState { + block_confirmed: None, + }; + ctx.update_fraud_state(fraud_id, &state) + .await + .unwrap_or_else(|e| panic!("get_benchmarks error: {:?}", e)); + } + + for challenge_id in data.active_challenge_ids.iter() { + let data = ChallengeBlockData { + num_qualifiers: None, + solution_signature_threshold: None, + scaled_frontier: None, + base_frontier: None, + scaling_factor: None, + qualifier_difficulties: None, + }; + ctx.update_challenge_block_data(challenge_id, &block_id, &data) + .await + .unwrap_or_else(|e| panic!("update_challenge_block_data error: {:?}", e)); + } + for algorithm_id in data.active_algorithm_ids.iter() { + let data = AlgorithmBlockData { + reward: None, + adoption: None, + merge_points: None, + num_qualifiers_by_player: None, + round_earnings: None, + }; + ctx.update_algorithm_block_data(algorithm_id, &block_id, &data) + .await + .unwrap_or_else(|e| panic!("update_algorithm_block_data error: {:?}", e)); + } + for player_id in data.active_player_ids.iter() { + let data = PlayerBlockData { + reward: None, + influence: None, + cutoff: None, + imbalance: None, + imbalance_penalty: None, + num_qualifiers_by_challenge: None, + round_earnings: None, + }; + ctx.update_player_block_data(player_id, &block_id, &data) + .await + .unwrap_or_else(|e| panic!("update_player_block_data error: {:?}", e)); + } + + Block { + id: block_id, + config: Some(config.clone()), + details, + data: Some(data), + } +} + +async fn confirm_mempool_challenges(ctx: &mut T, block: &Block) { + for challenge_id in block.data().mempool_challenge_ids.iter() { + let challenge = get_challenge_by_id(ctx, challenge_id, None) + .await + .unwrap_or_else(|e| panic!("get_challenge_by_id error: {:?}", e)); + let mut state = challenge.state().clone(); + state.block_confirmed = Some(block.details.height); + state.round_submitted = Some(block.details.round); + ctx.update_challenge_state(challenge_id, &state) + .await + .unwrap_or_else(|e| panic!("update_challenge_state error: {:?}", e)); + } +} + +async fn confirm_mempool_algorithms(ctx: &mut T, block: &Block) { + for algorithm_id in block.data().mempool_algorithm_ids.iter() { + let algorithm = get_algorithm_by_id(ctx, algorithm_id, None) + .await + .unwrap_or_else(|e| panic!("get_algorithm_by_id error: {:?}", e)); + let mut state = algorithm.state().clone(); + state.block_confirmed = Some(block.details.height); + state.round_submitted = Some(block.details.round); + ctx.update_algorithm_state(algorithm_id, &state) + .await + .unwrap_or_else(|e| panic!("update_algorithm_state error: {:?}", e)); + } +} + +async fn confirm_mempool_benchmarks(ctx: &mut T, block: &Block) { + let config = block.config(); + + for benchmark_id in block.data().mempool_benchmark_ids.iter() { + let benchmark = get_benchmark_by_id(ctx, benchmark_id, true) + .await + .unwrap_or_else(|e| panic!("get_benchmark_by_id error: {:?}", e)); + + let seed = u32_from_str(format!("{:?}|{:?}", block.id, benchmark_id).as_str()); + let mut rng = StdRng::seed_from_u64(seed as u64); + let solutions_meta_data = benchmark.solutions_meta_data(); + let mut indexes: Vec = (0..solutions_meta_data.len()).collect(); + indexes.shuffle(&mut rng); + + let mut state = benchmark.state().clone(); + state.sampled_nonces = Some( + indexes + .into_iter() + .take(config.benchmark_submissions.max_samples) + .map(|i| solutions_meta_data[i].nonce) + .collect(), + ); + state.block_confirmed = Some(block.details.height); + + ctx.update_benchmark_state(&benchmark_id, &state) + .await + .unwrap_or_else(|e| panic!("update_benchmark_state error: {:?}", e)); + } +} + +async fn confirm_mempool_proofs(ctx: &mut T, block: &Block) { + for benchmark_id in block.data().mempool_proof_ids.iter() { + let benchmark = get_benchmark_by_id(ctx, &benchmark_id, false) + .await + .unwrap_or_else(|e| panic!("get_benchmark_by_id error: {:?}", e)); + let proof = get_proof_by_benchmark_id(ctx, &benchmark_id, false) + .await + .unwrap_or_else(|e| panic!("get_proof_by_benchmark_id error: {:?}", e)); + let mut state = proof.state().clone(); + state.block_confirmed = Some(block.details.height); + state.submission_delay = Some(block.details.height - benchmark.details.block_started); + ctx.update_proof_state(benchmark_id, &state) + .await + .unwrap_or_else(|e| panic!("update_proof_state error: {:?}", e)); + } +} + +async fn confirm_mempool_frauds(ctx: &mut T, block: &Block) { + // Future Todo: slash player's rewards from past day + for benchmark_id in block.data().mempool_fraud_ids.iter() { + let fraud = get_fraud_by_id(ctx, &benchmark_id, false) + .await + .unwrap_or_else(|e| panic!("get_fraud_by_id error: {:?}", e)); + let mut state = fraud.state().clone(); + state.block_confirmed = Some(block.details.height); + ctx.update_fraud_state(benchmark_id, &state) + .await + .unwrap_or_else(|e| panic!("update_fraud_state error: {:?}", e)); + } +} + +async fn confirm_mempool_wasms(ctx: &mut T, block: &Block) { + for algorithm_id in block.data().mempool_wasm_ids.iter() { + let wasm = get_wasm_by_id(ctx, &algorithm_id, false) + .await + .unwrap_or_else(|e| panic!("get_benchmarks error: {:?}", e)); + let mut state = wasm.state().clone(); + state.block_confirmed = Some(block.details.height); + ctx.update_wasm_state(algorithm_id, &state) + .await + .unwrap_or_else(|e| panic!("update_wasm_state error: {:?}", e)); + } +} + +async fn update_cutoffs(ctx: &mut T, block: &Block) { + let config = block.config(); + let num_challenges = block.data().active_challenge_ids.len() as f64; + + let mut total_solutions_by_player = HashMap::::new(); + for benchmark_id in block.data().active_benchmark_ids.iter() { + let benchmark = get_benchmark_by_id(ctx, benchmark_id, false) + .await + .unwrap_or_else(|e| panic!("get_benchmark_by_id error: {:?}", e)); + *total_solutions_by_player + .entry(benchmark.settings.player_id.clone()) + .or_default() += benchmark.details.num_solutions as f64; + } + + for (player_id, total_solutions) in total_solutions_by_player.iter() { + let player = &get_player_by_id(ctx, player_id, Some(&block.id)) + .await + .unwrap_or_else(|e| panic!("get_player_by_id error: {:?}", e)); + let mut data = player.block_data().clone(); + + data.cutoff = + Some((total_solutions / num_challenges * config.qualifiers.cutoff_multiplier) as u32); + + ctx.update_player_block_data(player_id, &block.id, &data) + .await + .unwrap_or_else(|e| panic!("update_player_block_data error: {:?}", e)); + } +} + +async fn update_solution_signature_thresholds(ctx: &mut T, block: &Block) { + let config = block.config(); + + let mut num_new_solutions_by_challenge = HashMap::::new(); + for benchmark_id in block.data().mempool_proof_ids.iter() { + let benchmark = get_benchmark_by_id(ctx, benchmark_id, false) + .await + .unwrap_or_else(|e| panic!("get_benchmark_by_id error: {:?}", e)); + *num_new_solutions_by_challenge + .entry(benchmark.settings.challenge_id.clone()) + .or_default() += benchmark.details.num_solutions; + } + + for challenge_id in block.data().active_challenge_ids.iter() { + let num_new_solutions = *num_new_solutions_by_challenge + .get(challenge_id) + .unwrap_or(&0) as f64; + let equilibrium_rate = config.qualifiers.total_qualifiers_threshold as f64 + / config.benchmark_submissions.lifespan_period as f64; + let percentage_error = 1f64 + - num_new_solutions + / (config.solution_signature.equilibrium_rate_multiplier * equilibrium_rate); + let max_threshold = u32::MAX as f64; + let percent_delta = (percentage_error * config.solution_signature.percent_error_multiplier) + .abs() + .clamp(0f64, config.solution_signature.max_percent_delta) + .mul(if percentage_error < 0f64 { -1f64 } else { 1f64 }); + + let prev_solution_signature_threshold = + match get_challenge_by_id(ctx, challenge_id, Some(&block.details.prev_block_id)) + .await + .unwrap_or_else(|e| panic!("get_challenge_by_id error: {:?}", e)) + .block_data + { + Some(data) => *data.solution_signature_threshold() as f64, + None => max_threshold, + }; + let mut block_data = get_challenge_by_id(ctx, challenge_id, Some(&block.id)) + .await + .unwrap_or_else(|e| panic!("get_challenge_by_id error: {:?}", e)) + .block_data() + .clone(); + block_data.solution_signature_threshold = Some( + (prev_solution_signature_threshold + percent_delta * max_threshold) + .clamp(0f64, max_threshold) as u32, + ); + + ctx.update_challenge_block_data(challenge_id, &block.id, &block_data) + .await + .unwrap_or_else(|e| panic!("update_challenge_block_data error: {:?}", e)); + } +} + +async fn update_qualifiers(ctx: &mut T, block: &Block) { + let config = block.config(); + let BlockData { + active_benchmark_ids, + active_algorithm_ids, + active_challenge_ids, + active_player_ids, + .. + } = block.data(); + + let mut benchmarks_by_challenge = HashMap::>::new(); + for benchmark_id in active_benchmark_ids.iter() { + let benchmark = get_benchmark_by_id(ctx, benchmark_id, false) + .await + .unwrap_or_else(|e| panic!("get_benchmark_by_id error: {:?}", e)); + benchmarks_by_challenge + .entry(benchmark.settings.challenge_id.clone()) + .or_default() + .push(benchmark); + } + + let mut data_by_challenge = HashMap::::new(); + let mut data_by_player = HashMap::::new(); + let mut data_by_algorithm = HashMap::::new(); + let mut max_qualifiers_by_player = HashMap::::new(); + for challenge_id in active_challenge_ids.iter() { + let mut block_data = get_challenge_by_id(ctx, challenge_id, Some(&block.id)) + .await + .unwrap_or_else(|e| panic!("get_challenge_by_id error: {:?}", e)) + .block_data() + .clone(); + block_data.num_qualifiers = Some(0); + block_data.qualifier_difficulties = Some(HashSet::new()); + data_by_challenge.insert(challenge_id.clone(), block_data); + } + for algorithm_id in active_algorithm_ids.iter() { + let mut block_data = get_algorithm_by_id(ctx, algorithm_id, Some(&block.id)) + .await + .unwrap_or_else(|e| panic!("get_algorithm_by_id error: {:?}", e)) + .block_data() + .clone(); + block_data.num_qualifiers_by_player = Some(HashMap::new()); + data_by_algorithm.insert(algorithm_id.clone(), block_data); + } + for player_id in active_player_ids.iter() { + let mut block_data = get_player_by_id(ctx, player_id, Some(&block.id)) + .await + .unwrap_or_else(|e| panic!("get_player_by_id error: {:?}", e)) + .block_data() + .clone(); + max_qualifiers_by_player.insert(player_id.clone(), block_data.cutoff().clone()); + block_data.num_qualifiers_by_challenge = Some(HashMap::new()); + data_by_player.insert(player_id.clone(), block_data); + } + + for challenge_id in active_challenge_ids.iter() { + if !benchmarks_by_challenge.contains_key(challenge_id) { + continue; + } + let benchmarks = benchmarks_by_challenge.get_mut(challenge_id).unwrap(); + let mut points = benchmarks + .iter() + .map(|b| b.settings.difficulty.clone()) + .collect::(); + let mut frontier_indexes = HashMap::::new(); + let mut frontier_index = 0; + while !points.is_empty() { + let frontier = points.pareto_frontier(); + points = points.difference(&frontier).cloned().collect(); + frontier.iter().for_each(|p| { + frontier_indexes.insert(p.clone(), frontier_index); + }); + frontier_index += 1; + } + benchmarks.sort_by(|a, b| { + let a_index = frontier_indexes[&a.settings.difficulty]; + let b_index = frontier_indexes[&b.settings.difficulty]; + a_index.cmp(&b_index) + }); + + let mut max_qualifiers_by_player = max_qualifiers_by_player.clone(); + let mut curr_frontier_index = 0; + let challenge_data = data_by_challenge.get_mut(challenge_id).unwrap(); + for benchmark in benchmarks.iter() { + let BenchmarkSettings { + player_id, + algorithm_id, + difficulty, + .. + } = &benchmark.settings; + + if curr_frontier_index != frontier_indexes[difficulty] + && *challenge_data.num_qualifiers() > config.qualifiers.total_qualifiers_threshold + { + break; + } + curr_frontier_index = frontier_indexes[difficulty]; + let player_data = data_by_player.get_mut(player_id).unwrap(); + let algorithm_data = data_by_algorithm.get_mut(algorithm_id).unwrap(); + + let max_qualifiers = max_qualifiers_by_player.get(player_id).unwrap().clone(); + let num_qualifiers = benchmark.details.num_solutions.min(max_qualifiers); + max_qualifiers_by_player.insert(player_id.clone(), max_qualifiers - num_qualifiers); + + *player_data + .num_qualifiers_by_challenge + .as_mut() + .unwrap() + .entry(challenge_id.clone()) + .or_default() += num_qualifiers; + *algorithm_data + .num_qualifiers_by_player + .as_mut() + .unwrap() + .entry(player_id.clone()) + .or_default() += num_qualifiers; + *challenge_data.num_qualifiers.as_mut().unwrap() += num_qualifiers; + challenge_data + .qualifier_difficulties + .as_mut() + .unwrap() + .insert(difficulty.clone()); + } + } + + for (id, data) in data_by_challenge.iter() { + ctx.update_challenge_block_data(id, &block.id, data) + .await + .unwrap_or_else(|e| panic!("update_challenge_block_data error: {:?}", e)); + } + for (id, data) in data_by_algorithm.iter() { + ctx.update_algorithm_block_data(id, &block.id, data) + .await + .unwrap_or_else(|e| panic!("update_algorithm_block_data error: {:?}", e)); + } + for (id, data) in data_by_player.iter() { + ctx.update_player_block_data(id, &block.id, data) + .await + .unwrap_or_else(|e| panic!("update_player_block_data error: {:?}", e)); + } +} + +async fn update_frontiers(ctx: &mut T, block: &Block) { + let config = block.config(); + + for challenge_id in block.data().active_challenge_ids.iter() { + let challenge = get_challenge_by_id(ctx, challenge_id, Some(&block.id)) + .await + .unwrap_or_else(|e| panic!("get_challenge_by_id error: {:?}", e)); + let mut block_data = challenge.block_data().clone(); + + let min_difficulty = challenge.details.min_difficulty(); + let max_difficulty = challenge.details.max_difficulty(); + + let base_frontier = block_data + .qualifier_difficulties() + .iter() + .map(|d| d.iter().map(|x| -x).collect()) // mirror the points so easiest difficulties are first + .collect::() + .pareto_frontier() + .iter() + .map(|d| d.iter().map(|x| -x).collect()) + .collect::() // mirror the points back; + .extend(&min_difficulty, &max_difficulty); + + let multiplier = (*block_data.num_qualifiers() as f64 + / config.qualifiers.total_qualifiers_threshold as f64) + .clamp(0.0, config.difficulty_bounds.max_multiplier); + let scaled_frontier = base_frontier.scale(&min_difficulty, &max_difficulty, multiplier); + + block_data.base_frontier = Some(base_frontier); + block_data.scaled_frontier = Some(scaled_frontier); + block_data.scaling_factor = Some(multiplier); + + ctx.update_challenge_block_data(challenge_id, &block.id, &block_data) + .await + .unwrap_or_else(|e| panic!("update_challenge_block_data error: {:?}", e)); + } +} + +async fn update_influence(ctx: &mut T, block: &Block) { + let config = block.config(); + let BlockData { + active_challenge_ids, + active_player_ids, + .. + } = block.data(); + + if active_player_ids.len() == 0 { + return; + } + + let mut num_qualifiers_by_challenge = HashMap::::new(); + for challenge_id in active_challenge_ids.iter() { + num_qualifiers_by_challenge.insert( + challenge_id.clone(), + *get_challenge_by_id(ctx, challenge_id, Some(&block.id)) + .await + .unwrap_or_else(|e| panic!("get_challenge_by_id error: {:?}", e)) + .block_data() + .num_qualifiers(), + ); + } + + let mut player_data = HashMap::::new(); + for player_id in active_player_ids.iter() { + player_data.insert( + player_id.clone(), + get_player_by_id(ctx, player_id, Some(&block.id)) + .await + .unwrap_or_else(|e| panic!("get_player_by_id error: {:?}", e)) + .block_data() + .clone(), + ); + } + + let zero = PreciseNumber::from(0); + let one = PreciseNumber::from(1); + let imbalance_multiplier = + PreciseNumber::from_f64(config.multi_factor_proof_of_work.imbalance_multiplier); + let num_challenges = PreciseNumber::from(active_challenge_ids.len()); + + let mut weights = Vec::::new(); + for player_id in active_player_ids.iter() { + let data = player_data.get_mut(player_id).unwrap(); + + let mut percent_qualifiers = Vec::::new(); + for challenge_id in active_challenge_ids.iter() { + let num_qualifiers = num_qualifiers_by_challenge[challenge_id]; + let num_qualifiers_by_player = *data + .num_qualifiers_by_challenge() + .get(challenge_id) + .unwrap_or(&0); + + percent_qualifiers.push(if num_qualifiers_by_player == 0 { + PreciseNumber::from(0) + } else { + PreciseNumber::from(num_qualifiers_by_player) / PreciseNumber::from(num_qualifiers) + }); + } + + let mean = percent_qualifiers.arithmetic_mean(); + let std = percent_qualifiers.standard_deviation(); + let cv = if mean == zero { + zero.clone() + } else { + std / mean + }; + + let imbalance = cv * cv / (num_challenges - one); + let imbalance_penalty = one - one / (one + imbalance_multiplier * imbalance); + + weights.push(mean * (one - imbalance_penalty)); + + data.imbalance = Some(imbalance); + data.imbalance_penalty = Some(imbalance_penalty); + } + + let influences = weights.l1_normalise(); + for (player_id, &influence) in active_player_ids.iter().zip(influences.iter()) { + let data = player_data.get_mut(player_id).unwrap(); + + data.influence = Some(influence); + + ctx.update_player_block_data(player_id, &block.id, &data) + .await + .unwrap_or_else(|e| panic!("update_player_block_data error: {:?}", e)); + } +} + +async fn update_adoption(ctx: &mut T, block: &Block) { + let BlockData { + active_algorithm_ids, + active_challenge_ids, + .. + } = block.data(); + + let mut algorithms_by_challenge = HashMap::>::new(); + for algorithm_id in active_algorithm_ids.iter() { + algorithms_by_challenge + .entry( + get_algorithm_by_id(ctx, algorithm_id, Some(&block.id)) + .await + .unwrap_or_else(|e| panic!("get_algorithm_by_id error: {:?}", e)) + .details + .challenge_id + .clone(), + ) + .or_default() + .push(algorithm_id); + } + + for challenge_id in active_challenge_ids.iter() { + if !algorithms_by_challenge.contains_key(challenge_id) { + continue; + } + + let mut algorithm_data = HashMap::<&String, AlgorithmBlockData>::new(); + for algorithm_id in algorithms_by_challenge[challenge_id].iter() { + algorithm_data.insert( + algorithm_id, + get_algorithm_by_id(ctx, algorithm_id, Some(&block.id)) + .await + .unwrap_or_else(|e| panic!("get_algorithm_by_id error: {:?}", e)) + .block_data() + .clone(), + ); + } + + let mut weights = Vec::::new(); + for (_, data) in algorithm_data.iter() { + let mut weight = PreciseNumber::from(0); + for (player_id, &num_qualifiers) in data.num_qualifiers_by_player().iter() { + let num_qualifiers = PreciseNumber::from(num_qualifiers); + let influence = *get_player_by_id(ctx, player_id, Some(&block.id)) + .await + .unwrap_or_else(|e| panic!("get_player_by_id error: {:?}", e)) + .block_data() + .influence(); + + weight += influence * num_qualifiers; + } + weights.push(weight); + } + + let adoption = weights.l1_normalise(); + for ((algorithm_id, data), adoption) in algorithm_data.iter_mut().zip(adoption) { + data.adoption = Some(adoption); + ctx.update_algorithm_block_data(algorithm_id, &block.id, data) + .await + .unwrap_or_else(|e| panic!("update_algorithm_block_data error: {:?}", e)); + } + } +} + +async fn update_innovator_rewards(ctx: &mut T, block: &Block) { + let config = block.config(); + + let adoption_threshold = + PreciseNumber::from_f64(config.algorithm_submissions.adoption_threshold); + let zero = PreciseNumber::from(0); + let mut eligible_algorithms_by_challenge = HashMap::>::new(); + for algorithm_id in block.data().active_algorithm_ids.iter() { + let algorithm = get_algorithm_by_id(ctx, algorithm_id, Some(&block.id)) + .await + .unwrap_or_else(|e| panic!("get_algorithm_by_id error: {:?}", e)); + let mut data = algorithm.block_data().clone(); + + if *data.adoption() >= adoption_threshold + || (algorithm.state().round_merged.is_some() && *data.adoption() > zero) + { + eligible_algorithms_by_challenge + .entry(algorithm.details.challenge_id.clone()) + .or_default() + .push(algorithm); + } + + data.reward = Some(zero.clone()); + ctx.update_algorithm_block_data(algorithm_id, &block.id, &data) + .await + .unwrap_or_else(|e| panic!("update_algorithm_block_data error: {:?}", e)); + } + if eligible_algorithms_by_challenge.len() == 0 { + return; + } + + let reward_pool_per_challenge = PreciseNumber::from_f64(get_block_reward(block)) + * PreciseNumber::from_f64(config.rewards.distribution.optimisations) + / PreciseNumber::from(eligible_algorithms_by_challenge.len()); + + let zero = PreciseNumber::from(0); + for (_, algorithms) in eligible_algorithms_by_challenge.iter() { + let mut total_adoption = zero.clone(); + for algorithm in algorithms.iter() { + total_adoption += *algorithm.block_data().adoption(); + } + + for algorithm in algorithms.iter() { + let mut data = algorithm.block_data().clone(); + let adoption = *data.adoption(); + + data.reward = Some(reward_pool_per_challenge * adoption / total_adoption); + + ctx.update_algorithm_block_data(&algorithm.id, &block.id, &data) + .await + .unwrap_or_else(|e| panic!("update_algorithm_block_data error: {:?}", e)); + } + } +} + +async fn update_benchmarker_rewards(ctx: &mut T, block: &Block) { + let config = block.config(); + + let reward_pool = PreciseNumber::from_f64(get_block_reward(block)) + * PreciseNumber::from_f64(config.rewards.distribution.benchmarkers); + + for player_id in block.data().active_player_ids.iter() { + let mut data = get_player_by_id(ctx, player_id, Some(&block.id)) + .await + .unwrap_or_else(|e| panic!("get_player_by_id error: {:?}", e)) + .block_data() + .clone(); + let influence = *data.influence(); + + data.reward = Some(influence * reward_pool); + + ctx.update_player_block_data(player_id, &block.id, &data) + .await + .unwrap_or_else(|e| panic!("update_player_block_data error: {:?}", e)); + } +} + +async fn update_merge_points(ctx: &mut T, block: &Block) { + let config = block.config(); + + let adoption_threshold = + PreciseNumber::from_f64(config.algorithm_submissions.adoption_threshold); + for algorithm_id in block.data().active_algorithm_ids.iter() { + let algorithm = get_algorithm_by_id(ctx, algorithm_id, Some(&block.id)) + .await + .unwrap_or_else(|e| panic!("get_algorithm_by_id error: {:?}", e)); + let mut data = algorithm.block_data().clone(); + + let prev_merge_points = + match get_algorithm_by_id(ctx, algorithm_id, Some(&block.details.prev_block_id)) + .await + .unwrap_or_else(|e| panic!("update_merge_points error: {:?}", e)) + .block_data + { + Some(data) => *data.merge_points(), + None => 0, + }; + data.merge_points = Some( + if algorithm.state().round_merged.is_some() || *data.adoption() < adoption_threshold { + prev_merge_points + } else { + prev_merge_points + 1 + }, + ); + ctx.update_algorithm_block_data(algorithm_id, &block.id, &data) + .await + .unwrap_or_else(|e| panic!("update_algorithm_block_data error: {:?}", e)); + } +} + +async fn update_merges(ctx: &mut T, block: &Block) { + let config = block.config(); + + if block.details.height % config.rounds.blocks_per_round != 0 { + return; + } + + let mut merge_algorithm_by_challenge = HashMap::::new(); + for algorithm_id in block.data().active_algorithm_ids.iter() { + let algorithm = get_algorithm_by_id(ctx, algorithm_id, Some(&block.id)) + .await + .unwrap_or_else(|e| panic!("get_algorithm_by_id error: {:?}", e)); + let challenge_id = algorithm.details.challenge_id.clone(); + let data = algorithm.block_data(); + + if algorithm.state().round_merged.is_some() + || *data.merge_points() < config.algorithm_submissions.merge_points_threshold + { + continue; + } + if !merge_algorithm_by_challenge.contains_key(&challenge_id) + || merge_algorithm_by_challenge[&challenge_id] + .block_data() + .merge_points + < data.merge_points + { + merge_algorithm_by_challenge.insert(challenge_id, algorithm); + } + } + + let round_merged = block.details.round + 1; + for (_, algorithm) in merge_algorithm_by_challenge.iter() { + let mut state = algorithm.state().clone(); + + state.round_merged = Some(round_merged); + + ctx.update_algorithm_state(&algorithm.id, &state) + .await + .unwrap_or_else(|e| panic!("update_algorithm_state error: {:?}", e)); + } +} + +fn get_block_reward(block: &Block) -> f64 { + let config = block.config(); + + config + .rewards + .schedule + .iter() + .filter(|s| s.round_start <= block.details.round) + .last() + .unwrap_or_else(|| { + panic!( + "get_block_reward error: Expecting a reward schedule for round {}", + block.details.round + ) + }) + .block_reward +} + +async fn get_player_by_id( + ctx: &mut T, + player_id: &String, + block_id: Option<&String>, +) -> anyhow::Result { + Ok(ctx + .get_players( + PlayersFilter::Id(player_id.clone()), + match block_id { + Some(block_id) => Some(BlockFilter::Id(block_id.clone())), + None => None, + }, + ) + .await? + .first() + .map(|x| x.to_owned()) + .expect(format!("Expecting player {} to exist", player_id).as_str())) +} + +async fn get_proof_by_benchmark_id( + ctx: &mut T, + benchmark_id: &String, + include_data: bool, +) -> anyhow::Result { + Ok(ctx + .get_proofs( + ProofsFilter::BenchmarkId(benchmark_id.clone()), + include_data, + ) + .await + .unwrap_or_else(|e| panic!("get_proofs error: {:?}", e)) + .first() + .map(|x| x.to_owned()) + .expect(format!("Expecting proof for benchmark {} to exist", benchmark_id).as_str())) +} + +async fn get_benchmark_by_id( + ctx: &mut T, + benchmark_id: &String, + include_data: bool, +) -> anyhow::Result { + Ok(ctx + .get_benchmarks(BenchmarksFilter::Id(benchmark_id.clone()), include_data) + .await? + .first() + .map(|x| x.to_owned()) + .expect(format!("Expecting benchmark {} to exist", benchmark_id).as_str())) +} + +async fn get_fraud_by_id( + ctx: &mut T, + benchmark_id: &String, + include_data: bool, +) -> anyhow::Result { + Ok(ctx + .get_frauds( + FraudsFilter::BenchmarkId(benchmark_id.clone()), + include_data, + ) + .await? + .first() + .map(|x| x.to_owned()) + .expect(format!("Expecting fraud {} to exist", benchmark_id).as_str())) +} + +async fn get_wasm_by_id( + ctx: &mut T, + algorithm_id: &String, + include_data: bool, +) -> anyhow::Result { + Ok(ctx + .get_wasms(WasmsFilter::AlgorithmId(algorithm_id.clone()), include_data) + .await? + .first() + .map(|x| x.to_owned()) + .expect(format!("Expecting wasm {} to exist", algorithm_id).as_str())) +} + +async fn get_algorithm_by_id( + ctx: &mut T, + algorithm_id: &String, + block_id: Option<&String>, +) -> anyhow::Result { + Ok(ctx + .get_algorithms( + AlgorithmsFilter::Id(algorithm_id.clone()), + match block_id { + Some(block_id) => Some(BlockFilter::Id(block_id.clone())), + None => None, + }, + false, + ) + .await? + .first() + .map(|x| x.to_owned()) + .expect(format!("Expecting algorithm {} to exist", algorithm_id).as_str())) +} + +async fn get_challenge_by_id( + ctx: &mut T, + challenge_id: &String, + block_id: Option<&String>, +) -> anyhow::Result { + Ok(ctx + .get_challenges( + ChallengesFilter::Id(challenge_id.clone()), + match block_id { + Some(block_id) => Some(BlockFilter::Id(block_id.clone())), + None => None, + }, + ) + .await? + .first() + .map(|x| x.to_owned()) + .expect(format!("Expecting challenge {} to exist", challenge_id).as_str())) +} diff --git a/tig-protocol/src/context.rs b/tig-protocol/src/context.rs new file mode 100644 index 00000000..6a93cf2b --- /dev/null +++ b/tig-protocol/src/context.rs @@ -0,0 +1,210 @@ +pub use anyhow::{Error as ContextError, Result as ContextResult}; +use tig_structs::{config::*, core::*}; + +pub enum SubmissionType { + Algorithm, + Benchmark, + Proof, +} + +#[derive(Debug, Clone, PartialEq)] +pub enum AlgorithmsFilter { + Id(String), + Name(String), + TxHash(String), + Mempool, + Confirmed, +} +#[derive(Debug, Clone, PartialEq)] +pub enum BenchmarksFilter { + Id(String), + Settings(BenchmarkSettings), + Mempool { from_block_started: u32 }, + Confirmed { from_block_started: u32 }, +} +#[derive(Debug, Clone, PartialEq)] +pub enum BlockFilter { + Latest, + Height(u32), + Id(String), + Round(u32), +} +#[derive(Debug, Clone, PartialEq)] +pub enum ChallengesFilter { + Id(String), + Name(String), + Mempool, + Confirmed, +} +#[derive(Debug, Clone, PartialEq)] +pub enum FraudsFilter { + BenchmarkId(String), + Mempool { from_block_started: u32 }, + Confirmed { from_block_started: u32 }, +} +#[derive(Debug, Clone, PartialEq)] +pub enum PlayersFilter { + Id(String), + Name(String), + Benchmarkers, + Innovators, +} +#[derive(Debug, Clone, PartialEq)] +pub enum ProofsFilter { + BenchmarkId(String), + Mempool { from_block_started: u32 }, + Confirmed { from_block_started: u32 }, +} +#[derive(Debug, Clone, PartialEq)] +pub enum WasmsFilter { + AlgorithmId(String), + Mempool, + Confirmed, +} +#[allow(async_fn_in_trait)] +pub trait Context { + async fn get_algorithms( + &mut self, + filter: AlgorithmsFilter, + block_data: Option, + include_data: bool, + ) -> ContextResult>; + async fn get_benchmarks( + &mut self, + filter: BenchmarksFilter, + include_data: bool, + ) -> ContextResult>; + async fn get_block( + &mut self, + filter: BlockFilter, + include_data: bool, + ) -> ContextResult>; + async fn get_challenges( + &mut self, + filter: ChallengesFilter, + block_data: Option, + ) -> ContextResult>; + async fn get_config(&mut self) -> ContextResult; + async fn get_frauds( + &mut self, + filter: FraudsFilter, + include_data: bool, + ) -> ContextResult>; + async fn get_players( + &mut self, + filter: PlayersFilter, + block_data: Option, + ) -> ContextResult>; + async fn get_proofs( + &mut self, + filter: ProofsFilter, + include_data: bool, + ) -> ContextResult>; + async fn get_wasms( + &mut self, + filter: WasmsFilter, + include_data: bool, + ) -> ContextResult>; + async fn verify_solution( + &mut self, + settings: &BenchmarkSettings, + nonce: u32, + solution: &Solution, + ) -> ContextResult>; + async fn compute_solution( + &mut self, + settings: &BenchmarkSettings, + nonce: u32, + wasm_vm_config: &WasmVMConfig, + ) -> ContextResult>; + async fn get_transaction(&mut self, tx_hash: &String) -> ContextResult; + + // Mempool + async fn add_block( + &mut self, + details: &BlockDetails, + data: &BlockData, + config: &ProtocolConfig, + ) -> ContextResult; + async fn add_challenge_to_mempool( + &mut self, + details: &ChallengeDetails, + ) -> ContextResult; + async fn add_algorithm_to_mempool( + &mut self, + details: &AlgorithmDetails, + code: &String, + ) -> ContextResult; + async fn add_benchmark_to_mempool( + &mut self, + settings: &BenchmarkSettings, + details: &BenchmarkDetails, + solutions_metadata: &Vec, + ) -> ContextResult; + async fn add_proof_to_mempool( + &mut self, + benchmark_id: &String, + solutions_data: &Vec, + ) -> ContextResult<()>; + async fn add_fraud_to_mempool( + &mut self, + benchmark_id: &String, + allegation: &String, + ) -> ContextResult<()>; + async fn add_wasm_to_mempool( + &mut self, + algorithm_id: &String, + details: &WasmDetails, + wasm_blob: &Option>, + ) -> ContextResult<()>; + + // Updates + async fn update_challenge_state( + &mut self, + challenge_id: &String, + state: &ChallengeState, + ) -> ContextResult<()>; + async fn update_challenge_block_data( + &mut self, + challenge_id: &String, + block_id: &String, + block_data: &ChallengeBlockData, + ) -> ContextResult<()>; + async fn update_algorithm_state( + &mut self, + algorithm_id: &String, + state: &AlgorithmState, + ) -> ContextResult<()>; + async fn update_algorithm_block_data( + &mut self, + algorithm_id: &String, + block_id: &String, + block_data: &AlgorithmBlockData, + ) -> ContextResult<()>; + async fn update_benchmark_state( + &mut self, + benchmark_id: &String, + state: &BenchmarkState, + ) -> ContextResult<()>; + async fn update_proof_state( + &mut self, + benchmark_id: &String, + state: &ProofState, + ) -> ContextResult<()>; + async fn update_fraud_state( + &mut self, + benchmark_id: &String, + state: &FraudState, + ) -> ContextResult<()>; + async fn update_player_block_data( + &mut self, + player_id: &String, + block_id: &String, + block_data: &PlayerBlockData, + ) -> ContextResult<()>; + async fn update_wasm_state( + &mut self, + algorithm_id: &String, + state: &WasmState, + ) -> ContextResult<()>; +} diff --git a/tig-protocol/src/error.rs b/tig-protocol/src/error.rs new file mode 100644 index 00000000..41849958 --- /dev/null +++ b/tig-protocol/src/error.rs @@ -0,0 +1,252 @@ +use tig_structs::core::{BenchmarkSettings, DifficultyParameter}; + +#[derive(Debug, PartialEq)] +pub enum ProtocolError { + DifficultyAboveHardestFrontier { + difficulty: Vec, + }, + DifficultyBelowEasiestFrontier { + difficulty: Vec, + }, + DuplicateAlgorithmName { + algorithm_name: String, + }, + DuplicateBenchmarkSettings { + settings: BenchmarkSettings, + }, + DuplicateNonce { + nonce: u32, + }, + DuplicateProof { + benchmark_id: String, + }, + DuplicateSubmissionFeeTx { + tx_hash: String, + }, + FlaggedAsFraud { + benchmark_id: String, + }, + InsufficientLifespan, + InsufficientSolutions { + min_num_solutions: usize, + num_solutions: usize, + }, + InvalidAlgorithm { + algorithm_id: String, + }, + InvalidBenchmark { + benchmark_id: String, + }, + InvalidBenchmarkNonce { + nonce: u32, + }, + InvalidBlock { + block_id: String, + }, + InvalidChallenge { + challenge_id: String, + }, + InvalidDifficulty { + difficulty: Vec, + difficulty_parameters: Vec, + }, + InvalidProofNonces { + expected_nonces: Vec, + submitted_nonces: Vec, + }, + InvalidSignatureFromSolutionData { + actual_signature: u32, + nonce: u32, + expected_signature: u32, + }, + InvalidSolution { + nonce: u32, + }, + InvalidSolutionData { + algorithm_id: String, + nonce: u32, + }, + InvalidSolutionSignature { + nonce: u32, + solution_signature: u32, + threshold: u32, + }, + InvalidSubmittingPlayer { + expected_player_id: String, + actual_player_id: String, + }, + InvalidSubmissionFeeAmount { + expected_amount: String, + actual_amount: String, + tx_hash: String, + }, + InvalidSubmissionFeeReceiver { + tx_hash: String, + expected_receiver: String, + actual_receiver: String, + }, + InvalidSubmissionFeeSender { + tx_hash: String, + expected_sender: String, + actual_sender: String, + }, + InvalidTransaction { + tx_hash: String, + }, +} + +impl std::fmt::Display for ProtocolError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ProtocolError::DifficultyAboveHardestFrontier { + difficulty, + } => write!( + f, + "Difficulty '{:?}' is above the hardest allowed frontier", + difficulty + ), + ProtocolError::DifficultyBelowEasiestFrontier { + difficulty, + } => write!( + f, + "Difficulty '{:?}' is below the easiest allowed frontier", + difficulty + ), + ProtocolError::DuplicateAlgorithmName { algorithm_name } => { + write!(f, "Algorithm name '{}' is already used", algorithm_name) + } + ProtocolError::DuplicateBenchmarkSettings { settings }=> { + write!(f, "A benchmark with settings '{:?}' has been submitted before.", settings) + } + ProtocolError::DuplicateNonce { nonce } => write!( + f, + "Nonce '{}' is submitted more than once", + nonce + ), + ProtocolError::DuplicateProof { benchmark_id } => { + write!(f, "Proof already submitted for benchmark '{}'", benchmark_id) + } + ProtocolError::DuplicateSubmissionFeeTx { tx_hash } => write!( + f, + "Transaction '{}' is already used", + tx_hash + ), + ProtocolError::FlaggedAsFraud { benchmark_id } => { + write!(f, "Benchmark '{}' is flagged as fraud", benchmark_id) + } + ProtocolError::InsufficientLifespan => { + write!(f, "Benchmark will have no lifespan remaining after submission delay penalty is applied.") + } + ProtocolError::InsufficientSolutions { + num_solutions, + min_num_solutions, + } => write!( + f, + "Insufficient number of solutions. Expected: '{}', Actual: '{}'", + min_num_solutions, num_solutions + ), + ProtocolError::InvalidAlgorithm { algorithm_id } => { + write!(f, "Algorithm '{}' does not exist or is not yet active", algorithm_id) + } + ProtocolError::InvalidBenchmark { benchmark_id } => { + write!(f, "Benchmark '{}' does not exist", benchmark_id) + } + ProtocolError::InvalidBenchmarkNonce { nonce } => { + write!(f, "Benchmark nonce '{}' is invalid. Must exist in solutions_meta_data", nonce) + } + ProtocolError::InvalidBlock { block_id } => { + write!(f, "Block '{}' does not exist", block_id) + } + ProtocolError::InvalidChallenge { challenge_id } => { + write!(f, "Challenge '{}' either does not exist or in not yet active", challenge_id) + } + ProtocolError::InvalidDifficulty { + difficulty, + difficulty_parameters, + } => write!( + f, + "Difficulty '{:?}' is invalid. Must match difficulty parameters '{:?}'", + difficulty, difficulty_parameters + ), + ProtocolError::InvalidProofNonces { + submitted_nonces, + expected_nonces: sampled_nonces, + } => write!( + f, + "Submitted nonces are invalid. Expected: '{:?}', Submitted '{:?}'", + sampled_nonces, submitted_nonces + ), + ProtocolError::InvalidSignatureFromSolutionData { + nonce, + expected_signature, + actual_signature, + } => write!( + f, + "Solution data for nonce '{}' produces invalid solution signature. Expected: '{}', Actual: '{}'", + nonce, expected_signature, actual_signature + ), + ProtocolError::InvalidSolution { nonce } => { + write!(f, "Solution for nonce '{}' is invalid", nonce) + } + ProtocolError::InvalidSolutionData { + algorithm_id, + nonce, + } => write!( + f, + "The solution data for nonce '{}' is invalid. Does not match the solution data re-computed using algorithm '{}'.", + nonce, algorithm_id + ), + ProtocolError::InvalidSolutionSignature { + nonce, + solution_signature, + threshold, + } => write!( + f, + "Solution signature '{}' for nonce '{}' is invalid. Must be less than or equal to threshold '{}'", + solution_signature, nonce, threshold + ), + ProtocolError::InvalidSubmittingPlayer { + expected_player_id, + actual_player_id, + } => write!( + f, + "Submission made by the invalid player. Expected: '{}', Actual: '{}'", + expected_player_id, actual_player_id + ), + ProtocolError::InvalidSubmissionFeeAmount { + expected_amount, + actual_amount, + tx_hash, + } => write!( + f, + "Transaction '{}' paid an invalid amount of submission fee. Expected: '{}', Actual: '{}'", + tx_hash, expected_amount, actual_amount + ), + ProtocolError::InvalidSubmissionFeeReceiver { tx_hash, expected_receiver, actual_receiver } => write!( + f, + "Transaction '{}' has invalid receiver. Expected: '{}', Actual: '{}'", + tx_hash, expected_receiver, actual_receiver + ), + ProtocolError::InvalidSubmissionFeeSender { + tx_hash, + expected_sender, + actual_sender, + } => write!( + f, + "Transaction '{}' has invalid sender. Expected: '{}', Actual: '{}'", + tx_hash, expected_sender, actual_sender + ), + ProtocolError::InvalidTransaction { + tx_hash, + } => write!( + f, + "Transaction '{}' is invalid", + tx_hash + ), + } + } +} + +impl std::error::Error for ProtocolError {} + +pub type ProtocolResult = std::result::Result; diff --git a/tig-protocol/src/lib.rs b/tig-protocol/src/lib.rs new file mode 100644 index 00000000..82887846 --- /dev/null +++ b/tig-protocol/src/lib.rs @@ -0,0 +1,66 @@ +mod add_block; +pub mod context; +mod error; +mod submit_algorithm; +mod submit_benchmark; +mod submit_proof; +mod verify_proof; +use context::*; +pub use error::*; +use tig_structs::core::*; + +pub struct Protocol { + pub ctx: T, +} + +impl<'a, T: Context> Protocol { + pub fn new(ctx: T) -> Self { + Self { ctx } + } + + pub async fn submit_algorithm( + &mut self, + player: &Player, + details: &AlgorithmDetails, + code: &String, + ) -> ProtocolResult { + submit_algorithm::execute(&mut self.ctx, player, details, code).await + } + + pub async fn submit_benchmark( + &mut self, + player: &Player, + settings: &BenchmarkSettings, + solutions_meta_data: &Vec, + solution_data: &SolutionData, + ) -> ProtocolResult<(String, Result<(), String>)> { + submit_benchmark::execute( + &mut self.ctx, + player, + settings, + solutions_meta_data, + solution_data, + ) + .await + } + + pub async fn submit_proof( + &mut self, + player: &Player, + benchmark_id: &String, + solutions_data: &Vec, + ) -> ProtocolResult> { + submit_proof::execute(&mut self.ctx, player, benchmark_id, solutions_data).await + } + + pub async fn verify_proof( + &mut self, + benchmark_id: &String, + ) -> ProtocolResult> { + verify_proof::execute(&mut self.ctx, benchmark_id).await + } + + pub async fn add_block(&mut self) -> String { + add_block::execute(&mut self.ctx).await + } +} diff --git a/tig-protocol/src/submit_algorithm.rs b/tig-protocol/src/submit_algorithm.rs new file mode 100644 index 00000000..33c97cd3 --- /dev/null +++ b/tig-protocol/src/submit_algorithm.rs @@ -0,0 +1,95 @@ +use crate::{context::*, error::*}; +use tig_structs::core::*; +use tig_utils::*; + +pub(crate) async fn execute( + ctx: &mut T, + player: &Player, + details: &AlgorithmDetails, + code: &String, +) -> ProtocolResult { + verify_algorithm_name_is_unique(ctx, details).await?; + verify_submission_fee(ctx, player, details).await?; + let algorithm_id = ctx + .add_algorithm_to_mempool(details, code) + .await + .unwrap_or_else(|e| panic!("add_algorithm_to_mempool error: {:?}", e)); + Ok(algorithm_id) +} + +async fn verify_algorithm_name_is_unique( + ctx: &mut T, + details: &AlgorithmDetails, +) -> ProtocolResult<()> { + if ctx + .get_algorithms(AlgorithmsFilter::Name(details.name.clone()), None, false) + .await + .unwrap_or_else(|e| panic!("get_algorithms error: {:?}", e)) + .first() + .is_some() + { + return Err(ProtocolError::DuplicateAlgorithmName { + algorithm_name: details.name.clone(), + }); + } + Ok(()) +} + +async fn verify_submission_fee( + ctx: &mut T, + player: &Player, + details: &AlgorithmDetails, +) -> ProtocolResult<()> { + let block = ctx + .get_block(BlockFilter::Latest, false) + .await + .unwrap_or_else(|e| panic!("get_block error: {:?}", e)) + .expect("No latest block found"); + + if ctx + .get_algorithms( + AlgorithmsFilter::TxHash(details.tx_hash.clone()), + None, + false, + ) + .await + .unwrap_or_else(|e| panic!("get_algorithms error: {:?}", e)) + .first() + .is_some() + { + return Err(ProtocolError::DuplicateSubmissionFeeTx { + tx_hash: details.tx_hash.clone(), + }); + } + + let transaction = ctx.get_transaction(&details.tx_hash).await.map_err(|_| { + ProtocolError::InvalidTransaction { + tx_hash: details.tx_hash.clone(), + } + })?; + if transaction.sender != player.id { + return Err(ProtocolError::InvalidSubmissionFeeSender { + tx_hash: details.tx_hash.clone(), + expected_sender: player.id.clone(), + actual_sender: transaction.sender.clone(), + }); + } + let burn_address = block.config().erc20.burn_address.clone(); + if transaction.receiver != burn_address { + return Err(ProtocolError::InvalidSubmissionFeeReceiver { + tx_hash: details.tx_hash.clone(), + expected_receiver: burn_address, + actual_receiver: transaction.receiver.clone(), + }); + } + + let expected_amount = block.config().algorithm_submissions.submission_fee; + if transaction.amount != expected_amount { + return Err(ProtocolError::InvalidSubmissionFeeAmount { + tx_hash: details.tx_hash.clone(), + expected_amount: jsonify(&expected_amount), + actual_amount: jsonify(&transaction.amount), + }); + } + Ok(()) +} diff --git a/tig-protocol/src/submit_benchmark.rs b/tig-protocol/src/submit_benchmark.rs new file mode 100644 index 00000000..2e4d6505 --- /dev/null +++ b/tig-protocol/src/submit_benchmark.rs @@ -0,0 +1,284 @@ +use crate::{context::*, error::*}; +use std::collections::HashMap; +use tig_structs::core::*; +use tig_utils::*; + +pub(crate) async fn execute( + ctx: &mut T, + player: &Player, + settings: &BenchmarkSettings, + solutions_meta_data: &Vec, + solution_data: &SolutionData, +) -> ProtocolResult<(String, Result<(), String>)> { + verify_player_owns_benchmark(player, settings)?; + let block = get_block_by_id(ctx, &settings.block_id).await?; + verify_sufficient_lifespan(ctx, &block).await?; + let challenge = get_challenge_by_id(ctx, &settings.challenge_id, &block).await?; + verify_algorithm(ctx, &settings.algorithm_id, &block).await?; + verify_sufficient_solutions(&block, solutions_meta_data)?; + verify_benchmark_settings_are_unique(ctx, settings).await?; + verify_nonces_are_unique(solutions_meta_data)?; + verify_solutions_signatures(solutions_meta_data, &challenge)?; + verify_benchmark_difficulty(&settings.difficulty, &challenge)?; + let benchmark_id = ctx + .add_benchmark_to_mempool( + &settings, + &BenchmarkDetails { + block_started: block.details.height, + num_solutions: solutions_meta_data.len() as u32, + }, + solutions_meta_data, + ) + .await + .unwrap_or_else(|e| panic!("add_benchmark_to_mempool error: {:?}", e)); + let mut verified = Ok(()); + if let Err(e) = + verify_solution_is_valid(ctx, settings, solutions_meta_data, solution_data).await + { + ctx.add_fraud_to_mempool(&benchmark_id, &e.to_string()) + .await + .unwrap_or_else(|e| panic!("add_fraud_to_mempool error: {:?}", e)); + verified = Err(e.to_string()); + } + Ok((benchmark_id, verified)) +} + +fn verify_player_owns_benchmark( + player: &Player, + settings: &BenchmarkSettings, +) -> ProtocolResult<()> { + if player.id != settings.player_id { + return Err(ProtocolError::InvalidSubmittingPlayer { + actual_player_id: player.id.clone(), + expected_player_id: settings.player_id.clone(), + }); + } + Ok(()) +} + +async fn verify_sufficient_lifespan(ctx: &mut T, block: &Block) -> ProtocolResult<()> { + let latest_block = ctx + .get_block(BlockFilter::Latest, false) + .await + .unwrap_or_else(|e| panic!("get_block error: {:?}", e)) + .expect("Expecting latest block to exist"); + let config = block.config(); + let submission_delay = latest_block.details.height - block.details.height + 1; + if submission_delay * (config.benchmark_submissions.submission_delay_multiplier + 1) + >= config.benchmark_submissions.lifespan_period + { + return Err(ProtocolError::InsufficientLifespan); + } + Ok(()) +} + +async fn get_challenge_by_id( + ctx: &mut T, + challenge_id: &String, + block: &Block, +) -> ProtocolResult { + if !block.data().active_challenge_ids.contains(challenge_id) { + return Err(ProtocolError::InvalidChallenge { + challenge_id: challenge_id.clone(), + }); + } + let challenge = ctx + .get_challenges( + ChallengesFilter::Id(challenge_id.clone()), + Some(BlockFilter::Id(block.id.clone())), + ) + .await + .unwrap_or_else(|e| panic!("get_challenges error: {:?}", e)) + .first() + .map(|x| x.to_owned()) + .ok_or_else(|| ProtocolError::InvalidChallenge { + challenge_id: challenge_id.clone(), + })?; + Ok(challenge) +} + +async fn verify_algorithm( + ctx: &mut T, + algorithm_id: &String, + block: &Block, +) -> ProtocolResult<()> { + if !ctx + .get_algorithms(AlgorithmsFilter::Id(algorithm_id.clone()), None, false) + .await + .unwrap_or_else(|e| panic!("get_algorithms error: {:?}", e)) + .first() + .is_some() + { + return Err(ProtocolError::InvalidAlgorithm { + algorithm_id: algorithm_id.clone(), + }); + } + if !block.data().active_algorithm_ids.contains(algorithm_id) { + return Err(ProtocolError::InvalidAlgorithm { + algorithm_id: algorithm_id.clone(), + }); + } + Ok(()) +} + +async fn get_block_by_id(ctx: &mut T, block_id: &String) -> ProtocolResult { + ctx.get_block(BlockFilter::Id(block_id.clone()), true) + .await + .unwrap_or_else(|e| panic!("get_block error: {:?}", e)) + .ok_or_else(|| ProtocolError::InvalidBlock { + block_id: block_id.clone(), + }) +} + +fn verify_sufficient_solutions( + block: &Block, + solutions_meta_data: &Vec, +) -> ProtocolResult<()> { + let min_num_solutions = block.config().benchmark_submissions.min_num_solutions as usize; + if solutions_meta_data.len() < min_num_solutions { + return Err(ProtocolError::InsufficientSolutions { + num_solutions: solutions_meta_data.len(), + min_num_solutions, + }); + } + Ok(()) +} + +async fn verify_benchmark_settings_are_unique( + ctx: &mut T, + settings: &BenchmarkSettings, +) -> ProtocolResult<()> { + if ctx + .get_benchmarks(BenchmarksFilter::Settings(settings.clone()), false) + .await + .unwrap_or_else(|e| panic!("get_benchmarks error: {:?}", e)) + .first() + .is_some() + { + return Err(ProtocolError::DuplicateBenchmarkSettings { + settings: settings.clone(), + }); + } + + Ok(()) +} + +fn verify_nonces_are_unique(solutions_meta_data: &Vec) -> ProtocolResult<()> { + let nonces: HashMap = + solutions_meta_data + .iter() + .fold(HashMap::new(), |mut acc, s| { + *acc.entry(s.nonce).or_insert(0) += 1; + acc + }); + + if let Some((&nonce, _)) = nonces.iter().find(|(_, &count)| count > 1) { + return Err(ProtocolError::DuplicateNonce { nonce }); + } + + Ok(()) +} + +fn verify_solutions_signatures( + solutions_meta_data: &Vec, + challenge: &Challenge, +) -> ProtocolResult<()> { + let solution_signature_threshold = *challenge.block_data().solution_signature_threshold(); + if let Some(s) = solutions_meta_data + .iter() + .find(|&s| s.solution_signature > solution_signature_threshold) + { + return Err(ProtocolError::InvalidSolutionSignature { + nonce: s.nonce, + solution_signature: s.solution_signature, + threshold: solution_signature_threshold, + }); + } + + Ok(()) +} + +fn verify_benchmark_difficulty(difficulty: &Vec, challenge: &Challenge) -> ProtocolResult<()> { + let challenge_data = challenge.block_data(); + + let difficulty_parameters = &challenge.details.difficulty_parameters; + if difficulty.len() != difficulty_parameters.len() + || difficulty + .iter() + .zip(difficulty_parameters.iter()) + .any(|(d, p)| *d < p.min_value || *d > p.max_value) + { + return Err(ProtocolError::InvalidDifficulty { + difficulty: difficulty.clone(), + difficulty_parameters: difficulty_parameters.clone(), + }); + } + + let (lower_frontier, upper_frontier) = if *challenge_data.scaling_factor() > 1f64 { + ( + challenge_data.base_frontier(), + challenge_data.scaled_frontier(), + ) + } else { + ( + challenge_data.scaled_frontier(), + challenge_data.base_frontier(), + ) + }; + match difficulty.within(lower_frontier, upper_frontier) { + PointCompareFrontiers::Above(_) => { + return Err(ProtocolError::DifficultyAboveHardestFrontier { + difficulty: difficulty.clone(), + }); + } + PointCompareFrontiers::Below(_) => { + return Err(ProtocolError::DifficultyBelowEasiestFrontier { + difficulty: difficulty.clone(), + }); + } + PointCompareFrontiers::Within => {} + } + + Ok(()) +} + +async fn verify_solution_is_valid( + ctx: &mut T, + settings: &BenchmarkSettings, + solutions_meta_data: &Vec, + solution_data: &SolutionData, +) -> ProtocolResult<()> { + let solutions_map: HashMap = solutions_meta_data + .iter() + .map(|d| (d.nonce, d.solution_signature)) + .collect(); + + if let Some(&expected_signature) = solutions_map.get(&solution_data.nonce) { + let signature = solution_data.calc_solution_signature(); + + if expected_signature != signature { + return Err(ProtocolError::InvalidSignatureFromSolutionData { + nonce: solution_data.nonce, + expected_signature, + actual_signature: signature, + }); + } + } else { + return Err(ProtocolError::InvalidBenchmarkNonce { + nonce: solution_data.nonce, + }); + } + + if ctx + .verify_solution(settings, solution_data.nonce, &solution_data.solution) + .await + .unwrap_or_else(|e| panic!("verify_solution error: {:?}", e)) + .is_err() + { + return Err(ProtocolError::InvalidSolution { + nonce: solution_data.nonce, + }); + } + + Ok(()) +} diff --git a/tig-protocol/src/submit_proof.rs b/tig-protocol/src/submit_proof.rs new file mode 100644 index 00000000..e2346d89 --- /dev/null +++ b/tig-protocol/src/submit_proof.rs @@ -0,0 +1,158 @@ +use crate::{context::*, error::*}; +use std::collections::{HashMap, HashSet}; +use tig_structs::core::*; + +pub(crate) async fn execute( + ctx: &mut T, + player: &Player, + benchmark_id: &String, + solutions_data: &Vec, +) -> ProtocolResult> { + verify_no_fraud(ctx, benchmark_id).await?; + verify_proof_not_already_submitted(ctx, benchmark_id).await?; + let benchmark = get_benchmark_by_id(ctx, benchmark_id).await?; + verify_benchmark_ownership(player, &benchmark)?; + verify_sufficient_lifespan(ctx, &benchmark).await?; + verify_sampled_nonces(&benchmark, &solutions_data)?; + ctx.add_proof_to_mempool(benchmark_id, solutions_data) + .await + .unwrap_or_else(|e| panic!("add_proof_to_mempool error: {:?}", e)); + if let Err(e) = verify_solutions_are_valid(ctx, &benchmark, &solutions_data).await { + ctx.add_fraud_to_mempool(benchmark_id, &e.to_string()) + .await + .unwrap_or_else(|e| panic!("add_fraud_to_mempool error: {:?}", e)); + return Ok(Err(e.to_string())); + } + Ok(Ok(())) +} + +async fn get_benchmark_by_id( + ctx: &mut T, + benchmark_id: &String, +) -> ProtocolResult { + ctx.get_benchmarks(BenchmarksFilter::Id(benchmark_id.clone()), true) + .await + .unwrap_or_else(|e| panic!("add_benchmark_to_mempool error: {:?}", e)) + .first() + .map(|x| x.to_owned()) + .ok_or_else(|| ProtocolError::InvalidBenchmark { + benchmark_id: benchmark_id.to_string(), + }) +} + +async fn verify_no_fraud(ctx: &mut T, benchmark_id: &String) -> ProtocolResult<()> { + if ctx + .get_frauds(FraudsFilter::BenchmarkId(benchmark_id.clone()), false) + .await + .unwrap_or_else(|e| panic!("get_frauds error: {:?}", e)) + .first() + .is_some() + { + return Err(ProtocolError::FlaggedAsFraud { + benchmark_id: benchmark_id.to_string(), + }); + } + Ok(()) +} + +async fn verify_proof_not_already_submitted( + ctx: &mut T, + benchmark_id: &String, +) -> ProtocolResult<()> { + if ctx + .get_proofs(ProofsFilter::BenchmarkId(benchmark_id.clone()), false) + .await + .unwrap_or_else(|e| panic!("get_proofs error: {:?}", e)) + .first() + .is_some() + { + return Err(ProtocolError::DuplicateProof { + benchmark_id: benchmark_id.to_string(), + }); + } + Ok(()) +} + +fn verify_benchmark_ownership(player: &Player, benchmark: &Benchmark) -> ProtocolResult<()> { + let expected_player_id = benchmark.settings.player_id.clone(); + if player.id != expected_player_id { + return Err(ProtocolError::InvalidSubmittingPlayer { + actual_player_id: player.id.to_string(), + expected_player_id, + }); + } + Ok(()) +} + +async fn verify_sufficient_lifespan( + ctx: &mut T, + benchmark: &Benchmark, +) -> ProtocolResult<()> { + let block = ctx + .get_block(BlockFilter::Latest, false) + .await + .unwrap_or_else(|e| panic!("get_block error: {:?}", e)) + .expect("Expecting latest block to exist"); + let config = block.config(); + let submission_delay = block.details.height - benchmark.details.block_started + 1; + if submission_delay * (config.benchmark_submissions.submission_delay_multiplier + 1) + >= config.benchmark_submissions.lifespan_period + { + return Err(ProtocolError::InsufficientLifespan); + } + Ok(()) +} + +fn verify_sampled_nonces( + benchmark: &Benchmark, + solutions_data: &Vec, +) -> ProtocolResult<()> { + let sampled_nonces: HashSet = benchmark.state().sampled_nonces().iter().cloned().collect(); + let proof_nonces: HashSet = solutions_data.iter().map(|d| d.nonce).collect(); + + if sampled_nonces != proof_nonces { + return Err(ProtocolError::InvalidProofNonces { + submitted_nonces: proof_nonces.into_iter().collect(), + expected_nonces: sampled_nonces.into_iter().collect(), + }); + } + Ok(()) +} + +async fn verify_solutions_are_valid( + ctx: &mut T, + benchmark: &Benchmark, + solutions_data: &Vec, +) -> ProtocolResult<()> { + let solutions_map: HashMap = benchmark + .solutions_meta_data() + .iter() + .map(|d| (d.nonce, d.solution_signature)) + .collect(); + + for d in solutions_data.iter() { + let submitted_signature = solutions_map[&d.nonce]; + let actual_signature = d.calc_solution_signature(); + + if submitted_signature != actual_signature { + return Err(ProtocolError::InvalidSignatureFromSolutionData { + nonce: d.nonce, + expected_signature: submitted_signature, + actual_signature, + }); + } + } + + for d in solutions_data.iter() { + if ctx + .verify_solution(&benchmark.settings, d.nonce, &d.solution) + .await + .unwrap_or_else(|e| panic!("verify_solution error: {:?}", e)) + .is_err() + { + return Err(ProtocolError::InvalidSolution { nonce: d.nonce }); + } + } + + Ok(()) +} diff --git a/tig-protocol/src/verify_proof.rs b/tig-protocol/src/verify_proof.rs new file mode 100644 index 00000000..85cc9af7 --- /dev/null +++ b/tig-protocol/src/verify_proof.rs @@ -0,0 +1,79 @@ +use crate::{context::*, error::*}; +use tig_structs::core::*; + +pub(crate) async fn execute( + ctx: &mut T, + benchmark_id: &String, +) -> ProtocolResult> { + let benchmark = get_benchmark_by_id(ctx, benchmark_id).await?; + let proof = get_proof_by_benchmark_id(ctx, benchmark_id).await?; + let mut verified = Ok(()); + if let Err(e) = verify_solutions_with_algorithm(ctx, &benchmark, &proof).await { + ctx.add_fraud_to_mempool(benchmark_id, &e.to_string()) + .await + .unwrap_or_else(|e| panic!("add_fraud_to_mempool error: {:?}", e)); + verified = Err(e.to_string()); + } + Ok(verified) +} + +async fn get_benchmark_by_id( + ctx: &mut T, + benchmark_id: &String, +) -> ProtocolResult { + Ok(ctx + .get_benchmarks(BenchmarksFilter::Id(benchmark_id.clone()), false) + .await + .unwrap_or_else(|e| panic!("get_benchmarks error: {:?}", e)) + .first() + .map(|x| x.to_owned()) + .expect(format!("Expecting benchmark {} to exist", benchmark_id).as_str())) +} + +async fn get_proof_by_benchmark_id( + ctx: &mut T, + benchmark_id: &String, +) -> ProtocolResult { + Ok(ctx + .get_proofs(ProofsFilter::BenchmarkId(benchmark_id.clone()), true) + .await + .unwrap_or_else(|e| panic!("get_proofs error: {:?}", e)) + .first() + .map(|x| x.to_owned()) + .expect(format!("Expecting proof for benchmark {} to exist", benchmark_id).as_str())) +} + +async fn verify_solutions_with_algorithm( + ctx: &mut T, + benchmark: &Benchmark, + proof: &Proof, +) -> ProtocolResult<()> { + let settings = &benchmark.settings; + let wasm_vm_config = ctx + .get_block(BlockFilter::Id(settings.block_id.clone()), false) + .await + .unwrap_or_else(|e| panic!("get_block error: {:?}", e)) + .expect(format!("Expecting block {} to exist", settings.block_id).as_str()) + .config + .unwrap() + .wasm_vm; + + for solution_data in proof.solutions_data() { + if let Ok(actual_solution_data) = ctx + .compute_solution(settings, solution_data.nonce, &wasm_vm_config) + .await + .unwrap_or_else(|e| panic!("compute_solution error: {:?}", e)) + { + if actual_solution_data == *solution_data { + continue; + } + } + + return Err(ProtocolError::InvalidSolutionData { + algorithm_id: settings.algorithm_id.clone(), + nonce: solution_data.nonce, + }); + } + + Ok(()) +} diff --git a/tig-structs/.gitignore b/tig-structs/.gitignore new file mode 100644 index 00000000..ea8c4bf7 --- /dev/null +++ b/tig-structs/.gitignore @@ -0,0 +1 @@ +/target diff --git a/tig-structs/Cargo.toml b/tig-structs/Cargo.toml new file mode 100644 index 00000000..be71b3c5 --- /dev/null +++ b/tig-structs/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "tig-structs" +version = "0.1.0" +authors.workspace = true +repository.workspace = true +edition.workspace = true + +[dependencies] +serde = { version = "1.0.196", features = ["derive"] } +serde_json = { version = "1.0.113" } +tig-utils = { path = "../tig-utils" } + +[lib] +crate-type = ["cdylib", "rlib"] diff --git a/tig-structs/src/api.rs b/tig-structs/src/api.rs new file mode 100644 index 00000000..3334aaf3 --- /dev/null +++ b/tig-structs/src/api.rs @@ -0,0 +1,171 @@ +use crate::{core::*, serializable_struct_with_getters}; +use serde::{Deserialize, Serialize}; + +serializable_struct_with_getters! { + RequestApiKeyReq { + signature: String, + address: String, + } +} + +serializable_struct_with_getters! { + RequestApiKeyResp { + api_key: String, + } +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum PlayerType { + Benchmarker, + Innovator, +} + +impl PlayerType { + pub fn to_string(self) -> String { + match self { + PlayerType::Benchmarker => "benchmarker".to_string(), + PlayerType::Innovator => "innovator".to_string(), + } + } + + pub fn from_string(s: String) -> Result { + match s.as_str() { + "benchmarker" => Ok(PlayerType::Benchmarker), + "innovator" => Ok(PlayerType::Innovator), + _ => Err("Invalid PlayerType".to_string()), + } + } +} + +serializable_struct_with_getters! { + GetPlayersReq { + block_id: Option, + player_type: PlayerType, + } +} + +serializable_struct_with_getters! { + GetPlayersResp { + block_id: String, + block_details: BlockDetails, + players: Vec, + } +} + +serializable_struct_with_getters! { + GetBlockReq { + id: Option, + round: Option, + height: Option, + include_data: bool, + } +} + +serializable_struct_with_getters! { + GetBlockResp { + block: Option, + } +} + +serializable_struct_with_getters! { + GetChallengesReq { + block_id: Option, + } +} + +serializable_struct_with_getters! { + GetChallengesResp { + block_id: String, + block_details: BlockDetails, + challenges: Vec, + } +} + +serializable_struct_with_getters! { + GetAlgorithmsReq { + block_id: Option, + } +} + +serializable_struct_with_getters! { + GetAlgorithmsResp { + block_id: String, + block_details: BlockDetails, + algorithms: Vec, + wasms: Vec, + } +} + +serializable_struct_with_getters! { + GetBenchmarksReq { + block_id: Option, + player_id: String, + } +} + +serializable_struct_with_getters! { + GetBenchmarksResp { + block_id: String, + block_details: BlockDetails, + benchmarks: Vec, + proofs: Vec, + frauds: Vec, + } +} + +serializable_struct_with_getters! { + GetBenchmarkDataReq { + benchmark_id: String, + } +} + +serializable_struct_with_getters! { + GetBenchmarkDataResp { + benchmark: Option, + proof: Option, + fraud: Option, + } +} + +serializable_struct_with_getters! { + SubmitBenchmarkReq { + settings: BenchmarkSettings, + solutions_meta_data: Vec, + solution_data: SolutionData, + } +} + +serializable_struct_with_getters! { + SubmitBenchmarkResp { + benchmark_id: String, + verified: Result<(), String>, + } +} + +serializable_struct_with_getters! { + SubmitProofReq { + benchmark_id: String, + solutions_data: Vec, + } +} + +serializable_struct_with_getters! { + SubmitProofResp { + verified: Result<(), String>, + } +} + +serializable_struct_with_getters! { + SubmitAlgorithmReq { + name: String, + challenge_id: String, + tx_hash: String, + code: String, + } +} + +serializable_struct_with_getters! { + SubmitAlgorithmResp { + algorithm_id: String, + } +} diff --git a/tig-structs/src/config.rs b/tig-structs/src/config.rs new file mode 100644 index 00000000..fc49fe82 --- /dev/null +++ b/tig-structs/src/config.rs @@ -0,0 +1,95 @@ +use crate::serializable_struct_with_getters; +use serde::{Deserialize, Serialize}; +use tig_utils::PreciseNumber; + +serializable_struct_with_getters! { + ProtocolConfig { + erc20: ERC20Config, + benchmark_submissions: BenchmarkSubmissionsConfig, + wasm_vm: WasmVMConfig, + solution_signature: SolutionSignatureConfig, + qualifiers: QualifiersConfig, + difficulty_bounds: DifficultyBoundsConfig, + multi_factor_proof_of_work: MultiFactorProofOfWorkConfig, + rounds: RoundsConfig, + algorithm_submissions: AlgorithmSubmissionsConfig, + rewards: RewardsConfig, + } +} +serializable_struct_with_getters! { + ERC20Config { + rpc_url: String, + chain_id: String, + token_address: String, + burn_address: String, + } +} +serializable_struct_with_getters! { + BenchmarkSubmissionsConfig { + min_num_solutions: u32, + submission_delay_multiplier: u32, + max_samples: usize, + lifespan_period: u32, + } +} +serializable_struct_with_getters! { + WasmVMConfig { + max_memory: u64, + max_fuel: u64, + } +} +serializable_struct_with_getters! { + SolutionSignatureConfig { + max_percent_delta: f64, + equilibrium_rate_multiplier: f64, + percent_error_multiplier: f64, + } +} +serializable_struct_with_getters! { + QualifiersConfig { + cutoff_multiplier: f64, + total_qualifiers_threshold: u32, + } +} +serializable_struct_with_getters! { + DifficultyBoundsConfig { + max_multiplier: f64, + } +} +serializable_struct_with_getters! { + MultiFactorProofOfWorkConfig { + imbalance_multiplier: f64, + } +} +serializable_struct_with_getters! { + RoundsConfig { + blocks_per_round: u32, + } +} +serializable_struct_with_getters! { + AlgorithmSubmissionsConfig { + submission_fee: PreciseNumber, + adoption_threshold: f64, + merge_points_threshold: u32, + push_delay: u32, + } +} +serializable_struct_with_getters! { + RewardsConfig { + distribution: DistributionConfig, + schedule: Vec, + } +} +serializable_struct_with_getters! { + DistributionConfig { + benchmarkers: f64, + optimisations: f64, + breakthroughs: f64, + } +} +serializable_struct_with_getters! { + EmissionsConfig { + block_reward: f64, + round_start: u32, + } +} diff --git a/tig-structs/src/core.rs b/tig-structs/src/core.rs new file mode 100644 index 00000000..7652c320 --- /dev/null +++ b/tig-structs/src/core.rs @@ -0,0 +1,270 @@ +use crate::{config::ProtocolConfig, serializable_struct_with_getters}; +use serde::{Deserialize, Serialize}; +use serde_json::{Map, Value}; +use std::collections::{HashMap, HashSet}; +use tig_utils::{jsonify, u32_from_str}; +pub use tig_utils::{Frontier, Point, PreciseNumber, Transaction}; + +serializable_struct_with_getters! { + Algorithm { + id: String, + details: AlgorithmDetails, + state: Option, + block_data: Option, + code: Option, + } +} +serializable_struct_with_getters! { + Benchmark { + id: String, + settings: BenchmarkSettings, + details: BenchmarkDetails, + state: Option, + solutions_meta_data: Option>, + solution_data: Option, + } +} +serializable_struct_with_getters! { + Block { + id: String, + details: BlockDetails, + data: Option, + config: Option, + } +} +serializable_struct_with_getters! { + Challenge { + id: String, + details: ChallengeDetails, + state: Option, + block_data: Option, + } +} +serializable_struct_with_getters! { + Player { + id: String, + details: PlayerDetails, + block_data: Option, + } +} +serializable_struct_with_getters! { + Proof { + benchmark_id: String, + state: Option, + solutions_data: Option>, + } +} +serializable_struct_with_getters! { + Fraud { + benchmark_id: String, + state: Option, + allegation: Option, + } +} +serializable_struct_with_getters! { + Wasm { + algorithm_id: String, + details: WasmDetails, + state: Option, + wasm_blob: Option>, + } +} + +// Algorithm child structs +serializable_struct_with_getters! { + AlgorithmDetails { + name: String, + player_id: String, + challenge_id: String, + tx_hash: String, + } +} +serializable_struct_with_getters! { + AlgorithmState { + block_confirmed: Option, + round_submitted: Option, + round_pushed: Option, + round_merged: Option, + } +} +serializable_struct_with_getters! { + AlgorithmBlockData { + num_qualifiers_by_player: Option>, + adoption: Option, + merge_points: Option, + reward: Option, + round_earnings: Option, + } +} + +// Benchmark child structs +serializable_struct_with_getters! { + BenchmarkSettings { + player_id: String, + block_id: String, + challenge_id: String, + algorithm_id: String, + difficulty: Vec, + } +} +impl BenchmarkSettings { + pub fn calc_seed(&self, nonce: u32) -> u32 { + u32_from_str(jsonify(&self).as_str()) ^ nonce + } +} +serializable_struct_with_getters! { + BenchmarkDetails { + block_started: u32, + num_solutions: u32, + } +} +serializable_struct_with_getters! { + BenchmarkState { + block_confirmed: Option, + sampled_nonces: Option>, + } +} +serializable_struct_with_getters! { + SolutionMetaData { + nonce: u32, + solution_signature: u32, + } +} +impl From for SolutionMetaData { + fn from(data: SolutionData) -> Self { + SolutionMetaData { + solution_signature: data.calc_solution_signature(), + nonce: data.nonce, + } + } +} + +// Block child structs +serializable_struct_with_getters! { + BlockDetails { + prev_block_id: String, + height: u32, + round: u32, + } +} +serializable_struct_with_getters! { + BlockData { + mempool_challenge_ids: HashSet, + mempool_algorithm_ids: HashSet, + mempool_benchmark_ids: HashSet, + mempool_proof_ids: HashSet, + mempool_fraud_ids: HashSet, + mempool_wasm_ids: HashSet, + active_challenge_ids: HashSet, + active_algorithm_ids: HashSet, + active_benchmark_ids: HashSet, + active_player_ids: HashSet, + } +} + +// Challenge child structs +serializable_struct_with_getters! { + ChallengeDetails { + name: String, + difficulty_parameters: Vec, + } +} +impl ChallengeDetails { + pub fn min_difficulty(&self) -> Point { + self.difficulty_parameters + .iter() + .map(|p| p.min_value) + .collect() + } + pub fn max_difficulty(&self) -> Point { + self.difficulty_parameters + .iter() + .map(|p| p.max_value) + .collect() + } +} +serializable_struct_with_getters! { + ChallengeState { + block_confirmed: Option, + round_submitted: Option, + round_active: Option, + round_inactive: Option, + } +} +serializable_struct_with_getters! { + DifficultyParameter { + name: String, + min_value: i32, + max_value: i32, + } +} +serializable_struct_with_getters! { + ChallengeBlockData { + solution_signature_threshold: Option, + num_qualifiers: Option, + qualifier_difficulties: Option>, + base_frontier: Option, + scaled_frontier: Option, + scaling_factor: Option, + } +} + +// Player child structs +serializable_struct_with_getters! { + PlayerDetails { + name: String, + } +} +serializable_struct_with_getters! { + PlayerBlockData { + num_qualifiers_by_challenge: Option>, + cutoff: Option, + imbalance: Option, + imbalance_penalty: Option, + influence: Option, + reward: Option, + round_earnings: Option, + } +} + +// Proof child structs +serializable_struct_with_getters! { + ProofState { + block_confirmed: Option, + submission_delay: Option, + } +} +pub type Solution = Map; +serializable_struct_with_getters! { + SolutionData { + nonce: u32, + runtime_signature: u32, + fuel_consumed: u64, + solution: Solution, + } +} +impl SolutionData { + pub fn calc_solution_signature(&self) -> u32 { + u32_from_str(&jsonify(self)) + } +} + +// Fraud child structs +serializable_struct_with_getters! { + FraudState { + block_confirmed: Option, + } +} +// Wasm child structs +serializable_struct_with_getters! { + WasmDetails { + compile_success: bool, + download_url: Option, + checksum: Option, + } +} +serializable_struct_with_getters! { + WasmState { + block_confirmed: Option, + } +} diff --git a/tig-structs/src/lib.rs b/tig-structs/src/lib.rs new file mode 100644 index 00000000..1e460674 --- /dev/null +++ b/tig-structs/src/lib.rs @@ -0,0 +1,42 @@ +pub mod api; +pub mod config; +pub mod core; + +#[macro_export] +macro_rules! serializable_struct_with_getters { + ( @ $name:ident { } -> ($($fields:tt)*) ($($getters:tt)*) ) => ( + #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] + pub struct $name { + $($fields)* + } + impl $name { + $($getters)* + } + ); + ( @ $name:ident { $param:ident : Option<$type:ty>, $($rest:tt)* } -> ($($fields:tt)*) ($($getters:tt)*) ) => ( + serializable_struct_with_getters!(@ $name { $($rest)* } -> ( + $($fields)* + #[serde(default)] + pub $param : Option<$type>, + ) ( + $($getters)* + pub fn $param(&self) -> &$type { + self.$param.as_ref().expect( + format!("Expected {}.{} to be Some, but it was None", stringify!($name), stringify!($param)).as_str() + ) + } + )); + ); + + ( @ $name:ident { $param:ident : $type:ty, $($rest:tt)* } -> ($($fields:tt)*) ($($getters:tt)*) ) => ( + serializable_struct_with_getters!(@ $name { $($rest)* } -> ( + $($fields)* + pub $param : $type, + ) ( + $($getters)* + )); + ); + ( $name:ident { $( $rest:tt)* } ) => { + serializable_struct_with_getters!(@ $name { $($rest)* } -> () ()); + }; +} diff --git a/tig-utils/Cargo.toml b/tig-utils/Cargo.toml new file mode 100644 index 00000000..1fa46c83 --- /dev/null +++ b/tig-utils/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "tig-utils" +version = "0.1.0" +authors.workspace = true +repository.workspace = true +edition.workspace = true +readme.workspace = true + +[dependencies] +anyhow = "1.0.81" +base64 = "0.22.0" +flate2 = "1.0.28" +hex = "0.4.3" +js-sys = { version = "0.3.68", optional = true } +md5 = "0.7.0" +rand = { version = "0.8.5", default-features = false, features = ["std_rng"] } +reqwest = { version = "0.12.2", optional = true } +serde = { version = "1.0.196", features = ["derive"] } +serde_json = { version = "1.0.113", features = ["preserve_order"] } +uint = "0.9.5" +wasm-bindgen = { version = "0.2.91", optional = true } +wasm-bindgen-futures = { version = "0.4.41", optional = true } +web3 = { version = "0.19.0", optional = true } +web-sys = { version = "0.3.68", optional = true, features = [ + 'Headers', + 'Request', + 'RequestInit', + 'RequestMode', + 'Response', + 'Window', +] } + +[lib] +crate-type = ["cdylib", "rlib"] + +[features] +default = [] +web3 = ["dep:web3"] +request = ["dep:reqwest"] +request-js = [ + "dep:wasm-bindgen", + "dep:js-sys", + "dep:wasm-bindgen-futures", + "dep:web-sys", +] diff --git a/tig-utils/src/eth.rs b/tig-utils/src/eth.rs new file mode 100644 index 00000000..09a9e0f9 --- /dev/null +++ b/tig-utils/src/eth.rs @@ -0,0 +1,53 @@ +use crate::number::PreciseNumber; +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct Transaction { + pub sender: String, + pub receiver: String, + pub amount: PreciseNumber, +} + +#[cfg(feature = "web3")] +mod web3_feature { + use crate::json::dejsonify; + use anyhow::{anyhow, Result}; + use hex::{self, ToHex}; + use web3::{signing::*, types::*}; + + pub fn recover_address_from_msg_and_sig(msg: &str, sig: &str) -> Result { + let hash_msg = hash_message(msg.as_bytes()); + let recovery = Recovery::from_raw_signature( + hash_msg, + hex::decode(sig.trim_start_matches("0x")) + .map_err(|e| web3::Error::InvalidResponse(e.to_string()))?, + )?; + let (signature, recovery_id) = recovery.as_signature().unwrap(); + let address = recover(hash_msg.as_bytes(), &signature, recovery_id)?; + Ok(format!("0x{}", address.encode_hex::())) + } + + pub async fn get_transaction(rpc_url: &str, tx_hash: &str) -> Result { + let transport = web3::transports::Http::new(rpc_url)?; + let eth = web3::Web3::new(transport).eth(); + + let tx_hash = H256::from_slice(hex::decode(tx_hash.trim_start_matches("0x"))?.as_slice()); + let receipt = eth + .transaction_receipt(tx_hash) + .await? + .ok_or_else(|| anyhow!("Receipt for transaction {} not found", tx_hash))?; + let tx = eth + .transaction(TransactionId::Hash(tx_hash)) + .await? + .ok_or_else(|| anyhow!("Transaction {} not found", tx_hash))?; + + Ok(super::Transaction { + sender: tx.from.unwrap().to_string(), + receiver: receipt.to.unwrap().to_string(), + amount: dejsonify(&tx.value.to_string())?, + }) + } +} + +#[cfg(feature = "web3")] +pub use web3_feature::*; diff --git a/tig-utils/src/frontiers.rs b/tig-utils/src/frontiers.rs new file mode 100644 index 00000000..9215d628 --- /dev/null +++ b/tig-utils/src/frontiers.rs @@ -0,0 +1,206 @@ +use rand::Rng; +use std::cmp::min; +use std::collections::HashSet; + +pub type Point = Vec; +pub type Frontier

= HashSet

; + +#[derive(Debug, Clone, PartialEq)] +pub enum PointCompareFrontiers

{ + Below(P), + Within, + Above(P), +} + +#[derive(Debug, Clone, PartialEq)] +pub enum ParetoCompare { + ADominatesB, + Equal, + BDominatesA, +} + +pub trait PointOps { + type Point; + + fn pareto_compare(&self, other: &Self) -> ParetoCompare; + fn scale(&self, min_point: &Self, max_point: &Self, multiplier: f64) -> Self::Point; + fn within( + &self, + lower_frontier: &Frontier, + upper_frontier: &Frontier, + ) -> PointCompareFrontiers; +} +pub trait FrontierOps { + type Point; + + fn pareto_frontier(&self) -> Frontier; + fn extend(&self, min_point: &Self::Point, max_point: &Self::Point) -> Frontier; + fn scale( + &self, + min_point: &Self::Point, + max_point: &Self::Point, + multiplier: f64, + ) -> Frontier; + fn sample(&self, rng: &mut T) -> Self::Point; +} + +impl PointOps for Point { + type Point = Point; + + fn pareto_compare(&self, other: &Self) -> ParetoCompare { + let mut a_dominate_b = false; + let mut b_dominate_a = false; + for (a_val, b_val) in self.iter().zip(other) { + if a_val < b_val { + b_dominate_a = true; + } else if a_val > b_val { + a_dominate_b = true; + } + } + if a_dominate_b == b_dominate_a { + ParetoCompare::Equal + } else if a_dominate_b { + ParetoCompare::ADominatesB + } else { + ParetoCompare::BDominatesA + } + } + fn scale( + &self, + min_point: &Self::Point, + max_point: &Self::Point, + multiplier: f64, + ) -> Self::Point { + self.iter() + .enumerate() + .map(|(i, value)| { + // Calculate the offset for the current dimension + let offset = ((value - min_point[i] + 1) as f64) * multiplier; + // Scale the point and clamp it between min_point and max_point + (min_point[i] + offset.ceil() as i32 - 1).clamp(min_point[i], max_point[i]) + }) + .collect() + } + fn within( + &self, + lower_frontier: &Frontier, + upper_frontier: &Frontier, + ) -> PointCompareFrontiers { + // Check if the point is not dominated by any point in the lower frontier + if let Some(point) = lower_frontier + .iter() + .find(|lower_point| self.pareto_compare(lower_point) == ParetoCompare::BDominatesA) + { + return PointCompareFrontiers::Below(point.clone()); + } + + // Check if the point does not dominate any point in the upper frontier + if let Some(point) = upper_frontier + .iter() + .find(|upper_point| self.pareto_compare(upper_point) == ParetoCompare::ADominatesB) + { + return PointCompareFrontiers::Above(point.clone()); + } + + PointCompareFrontiers::Within + } +} + +impl FrontierOps for Frontier { + type Point = Point; + + fn pareto_frontier(&self) -> Frontier { + let mut frontier = self.clone(); + + for point in self.iter() { + if !frontier.contains(point) { + continue; + } + + let mut dominated_points = HashSet::new(); + for other_point in frontier.iter() { + match point.pareto_compare(other_point) { + ParetoCompare::ADominatesB => { + dominated_points.insert(other_point.clone()); + } + ParetoCompare::BDominatesA => { + dominated_points.insert(point.clone()); + break; + } + ParetoCompare::Equal => {} + } + } + frontier = frontier.difference(&dominated_points).cloned().collect(); + } + + frontier + } + fn extend(&self, min_point: &Self::Point, max_point: &Self::Point) -> Frontier { + let mut frontier = self.clone(); + (0..min_point.len()).into_iter().for_each(|i| { + let mut d = min_point.clone(); + if let Some(v) = frontier.iter().map(|d| d[i]).max() { + d[i] = v; + } + if !frontier.contains(&d) { + d[i] = min(d[i] + 1, max_point[i]); + frontier.insert(d); + } + }); + frontier + } + fn scale( + &self, + min_point: &Self::Point, + max_point: &Self::Point, + multiplier: f64, + ) -> Frontier { + self.iter() + .map(|point| point.scale(min_point, max_point, multiplier)) + .collect() + } + fn sample(&self, rng: &mut R) -> Self::Point { + // FIXME only works for 2 dimensional points + // Potential strategy for >2d: triangulate -> sample triangle -> sample point in triangle + match self.iter().next() { + None => panic!("Frontier is empty"), + Some(point) => { + if point.len() != 2 { + panic!("Only 2 dimensional points are supported"); + } + } + }; + // randomly pick a dimension + let dim = (rng.next_u32() % 2) as usize; + let dim2 = (dim + 1) % 2; + + // sort points by that dimension + let mut sorted_points: Vec<&Point> = self.iter().collect(); + sorted_points.sort_by(|a, b| a[dim].cmp(&b[dim])); + + // sample value in that dimension + let min_v = sorted_points.first().unwrap()[dim]; + let max_v = sorted_points.last().unwrap()[dim]; + let rand_v = rng.gen_range(min_v..=max_v); + + // interpolate value in the other dimension + match sorted_points.binary_search_by(|point| point[dim].cmp(&rand_v)) { + Ok(idx) => sorted_points[idx].clone(), + Err(idx) => { + let a = sorted_points[idx - 1]; + let b = sorted_points[idx]; + let ratio = (rand_v - a[dim]) as f64 / (b[dim] - a[dim]) as f64; + let rand_v2 = (a[dim2] as f64 + ratio * (b[dim2] - a[dim2]) as f64).ceil() as i32; + // a is smaller than b in dim, but larger in dim2 + if rand_v2 == a[dim2] { + a.clone() + } else { + (0..2) + .into_iter() + .map(|i| if i == dim { rand_v } else { rand_v2 }) + .collect() + } + } + } + } +} diff --git a/tig-utils/src/hash.rs b/tig-utils/src/hash.rs new file mode 100644 index 00000000..6d862c22 --- /dev/null +++ b/tig-utils/src/hash.rs @@ -0,0 +1,15 @@ +use md5; + +pub fn md5_from_str(input: &str) -> String { + md5_from_bytes(input.as_bytes()) +} + +pub fn md5_from_bytes(input: &[u8]) -> String { + format!("{:x}", md5::compute(input)) +} + +pub fn u32_from_str(input: &str) -> u32 { + let result = md5::compute(input.as_bytes()); + let bytes = result[12..16].try_into().expect("Should not ever panic.."); + u32::from_le_bytes(bytes) +} diff --git a/tig-utils/src/json.rs b/tig-utils/src/json.rs new file mode 100644 index 00000000..fcacec46 --- /dev/null +++ b/tig-utils/src/json.rs @@ -0,0 +1,60 @@ +use flate2::{read::ZlibDecoder, write::ZlibEncoder, Compression}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use serde_json::{to_string, to_value, Map, Value}; +use std::{ + io::{Read, Write}, + str, +}; + +pub fn dejsonify<'a, T>(json_str: &'a str) -> serde_json::Result +where + T: Deserialize<'a>, +{ + serde_json::from_str::(json_str) +} + +pub fn jsonify(obj: &T) -> String +where + T: Serialize, +{ + to_string(&jsonify_internal( + &to_value(obj).expect("to_value failed on serializable object"), + )) + .expect("to_string failed on serializable object") +} + +pub fn jsonify_internal(json_value: &Value) -> Value { + match json_value { + Value::Object(obj) => { + let mut sorted_map = Map::new(); + let mut keys: Vec<&String> = obj.keys().collect(); + keys.sort(); + for key in keys { + if let Some(value) = obj.get(key) { + sorted_map.insert(key.clone(), jsonify_internal(value)); + } + } + Value::Object(sorted_map) + } + _ => json_value.clone(), + } +} + +pub fn decompress_obj(input: &[u8]) -> anyhow::Result +where + T: DeserializeOwned, +{ + let mut decoder = ZlibDecoder::new(input); + let mut decompressed = String::new(); + decoder.read_to_string(&mut decompressed)?; + Ok(dejsonify(&decompressed)?) +} + +pub fn compress_obj(input: T) -> Vec +where + T: Serialize, +{ + let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default()); + encoder.write_all(jsonify(&input).as_bytes()).unwrap(); + encoder.finish().unwrap() +} diff --git a/tig-utils/src/lib.rs b/tig-utils/src/lib.rs new file mode 100644 index 00000000..3eaea3e4 --- /dev/null +++ b/tig-utils/src/lib.rs @@ -0,0 +1,14 @@ +mod eth; +pub use eth::*; +mod frontiers; +pub use frontiers::*; +mod hash; +pub use hash::*; +mod json; +pub use json::*; +mod number; +pub use number::*; +#[cfg(any(feature = "request", feature = "request-js"))] +mod request; +#[cfg(any(feature = "request", feature = "request-js"))] +pub use request::*; diff --git a/tig-utils/src/number.rs b/tig-utils/src/number.rs new file mode 100644 index 00000000..429f7bcf --- /dev/null +++ b/tig-utils/src/number.rs @@ -0,0 +1,355 @@ +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use std::{ + cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd}, + fmt::Display, + iter::Sum, + ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Sub, SubAssign}, +}; +use uint::construct_uint; + +construct_uint! { + pub struct U256(4); +} + +impl U256 { + pub const fn from_u128(value: u128) -> Self { + let mut ret = [0; 4]; + ret[0] = value as u64; + ret[1] = (value >> 64) as u64; + U256(ret) + } +} + +#[derive(Debug, Copy, Clone)] +pub struct PreciseNumber(U256); + +impl PreciseNumber { + const DECIMALS: u32 = 18; + const PRECISION: U256 = U256::from_u128(10u128.pow(PreciseNumber::DECIMALS)); + + pub fn inner(&self) -> &U256 { + &self.0 + } + + pub fn from>(value: T) -> Self { + Self(value.into() * PreciseNumber::PRECISION) + } + + pub fn from_f64(value: f64) -> Self { + Self(((value * 10f64.powi(PreciseNumber::DECIMALS as i32)) as i128).into()) + } + + pub(crate) fn sqrt(&self) -> PreciseNumber { + let num = self.inner().as_u128() as f64; + let div = 10f64.powi(PreciseNumber::DECIMALS as i32); + PreciseNumber::from_f64((num / div).sqrt()) + } + + pub fn from_dec_str(value: &str) -> Result { + Ok(Self(U256::from_dec_str(value)?)) + } +} + +impl Display for PreciseNumber { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + +impl Serialize for PreciseNumber { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(self.to_string().as_str()) + } +} + +impl<'de> Deserialize<'de> for PreciseNumber { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + Self::from_dec_str(s.as_str()).map_err(|e| serde::de::Error::custom(e)) + } +} + +impl Add for PreciseNumber { + type Output = Self; + + fn add(self, other: Self) -> Self { + Self(self.0 + other.0) + } +} + +impl<'a> Add<&'a PreciseNumber> for PreciseNumber { + type Output = Self; + + fn add(self, other: &'a Self) -> Self { + Self(self.0 + other.0) + } +} + +impl<'a> Add for &'a PreciseNumber { + type Output = PreciseNumber; + + fn add(self, other: PreciseNumber) -> PreciseNumber { + PreciseNumber(self.0 + other.0) + } +} + +impl<'a, 'b> Add<&'a PreciseNumber> for &'b PreciseNumber { + type Output = PreciseNumber; + + fn add(self, other: &'a PreciseNumber) -> PreciseNumber { + PreciseNumber(self.0 + other.0) + } +} + +impl AddAssign for PreciseNumber { + fn add_assign(&mut self, rhs: Self) { + self.0 += rhs.0; + } +} + +impl<'a> AddAssign<&'a PreciseNumber> for PreciseNumber { + fn add_assign(&mut self, rhs: &'a PreciseNumber) { + self.0 += rhs.0; + } +} + +impl Sub for PreciseNumber { + type Output = Self; + + fn sub(self, other: Self) -> Self { + Self(self.0 - other.0) + } +} + +impl<'a> Sub<&'a PreciseNumber> for PreciseNumber { + type Output = Self; + + fn sub(self, other: &'a Self) -> Self { + Self(self.0 - other.0) + } +} + +impl<'a> Sub for &'a PreciseNumber { + type Output = PreciseNumber; + + fn sub(self, other: PreciseNumber) -> PreciseNumber { + PreciseNumber(self.0 - other.0) + } +} + +impl<'a, 'b> Sub<&'a PreciseNumber> for &'b PreciseNumber { + type Output = PreciseNumber; + + fn sub(self, other: &'a PreciseNumber) -> PreciseNumber { + PreciseNumber(self.0 - other.0) + } +} + +impl SubAssign for PreciseNumber { + fn sub_assign(&mut self, other: Self) { + self.0 -= other.0; + } +} + +impl<'a> SubAssign<&'a PreciseNumber> for PreciseNumber { + fn sub_assign(&mut self, other: &'a Self) { + self.0 -= other.0; + } +} + +impl Mul for PreciseNumber { + type Output = Self; + + fn mul(self, rhs: Self) -> Self::Output { + Self(self.0 * rhs.0 / Self::PRECISION) + } +} + +impl<'a> Mul<&'a PreciseNumber> for PreciseNumber { + type Output = Self; + + fn mul(self, rhs: &'a Self) -> Self::Output { + Self(self.0 * rhs.0 / Self::PRECISION) + } +} + +impl<'a> Mul for &'a PreciseNumber { + type Output = PreciseNumber; + + fn mul(self, rhs: PreciseNumber) -> PreciseNumber { + PreciseNumber(self.0 * rhs.0 / PreciseNumber::PRECISION) + } +} + +impl<'a, 'b> Mul<&'a PreciseNumber> for &'b PreciseNumber { + type Output = PreciseNumber; + + fn mul(self, rhs: &'a PreciseNumber) -> PreciseNumber { + PreciseNumber(self.0 * rhs.0 / PreciseNumber::PRECISION) + } +} + +impl MulAssign for PreciseNumber { + fn mul_assign(&mut self, other: Self) { + self.0 = self.0 * other.0 / Self::PRECISION; + } +} + +impl<'a> MulAssign<&'a PreciseNumber> for PreciseNumber { + fn mul_assign(&mut self, other: &'a Self) { + self.0 = self.0 * other.0 / Self::PRECISION; + } +} + +impl Div for PreciseNumber { + type Output = Self; + + fn div(self, rhs: Self) -> Self::Output { + Self(self.0 * Self::PRECISION / rhs.0) + } +} + +impl<'a> Div<&'a PreciseNumber> for PreciseNumber { + type Output = Self; + + fn div(self, rhs: &'a Self) -> Self::Output { + Self(self.0 * Self::PRECISION / rhs.0) + } +} + +impl<'a> Div for &'a PreciseNumber { + type Output = PreciseNumber; + + fn div(self, rhs: PreciseNumber) -> PreciseNumber { + PreciseNumber(self.0 * PreciseNumber::PRECISION / rhs.0) + } +} + +impl<'a, 'b> Div<&'a PreciseNumber> for &'b PreciseNumber { + type Output = PreciseNumber; + + fn div(self, rhs: &'a PreciseNumber) -> PreciseNumber { + PreciseNumber(self.0 * PreciseNumber::PRECISION / rhs.0) + } +} + +impl DivAssign for PreciseNumber { + fn div_assign(&mut self, other: Self) { + self.0 = self.0 * Self::PRECISION / other.0; + } +} + +impl<'a> DivAssign<&'a PreciseNumber> for PreciseNumber { + fn div_assign(&mut self, other: &'a Self) { + self.0 = self.0 * Self::PRECISION / other.0; + } +} + +impl PartialEq for PreciseNumber { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } +} + +impl<'a> PartialEq<&'a PreciseNumber> for PreciseNumber { + fn eq(&self, other: &&'a PreciseNumber) -> bool { + self.0 == other.0 + } +} + +impl<'a> PartialEq for &'a PreciseNumber { + fn eq(&self, other: &PreciseNumber) -> bool { + self.0 == other.0 + } +} + +impl PartialOrd for PreciseNumber { + fn partial_cmp(&self, other: &Self) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl<'a> PartialOrd<&'a PreciseNumber> for PreciseNumber { + fn partial_cmp(&self, other: &&'a PreciseNumber) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl<'a> PartialOrd for &'a PreciseNumber { + fn partial_cmp(&self, other: &PreciseNumber) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl Ord for PreciseNumber { + fn cmp(&self, other: &Self) -> Ordering { + self.0.cmp(&other.0) + } +} + +impl Eq for PreciseNumber {} + +impl<'a> Sum<&'a PreciseNumber> for PreciseNumber { + fn sum>(iter: I) -> Self { + iter.fold(PreciseNumber::from(0), |acc, x| acc + *x) + } +} + +impl Sum for PreciseNumber { + fn sum>(iter: I) -> Self { + iter.fold(PreciseNumber::from(0), |acc, x| acc + x) + } +} + +pub trait PreciseNumberOps { + fn l1_normalise(&self) -> Vec; + fn arithmetic_mean(&self) -> PreciseNumber; + fn variance(&self) -> PreciseNumber; + fn standard_deviation(&self) -> PreciseNumber; +} + +impl PreciseNumberOps for T +where + T: AsRef<[PreciseNumber]>, +{ + fn l1_normalise(&self) -> Vec { + let values = self.as_ref(); + let max_value = *values.iter().max().unwrap(); + let zero = PreciseNumber::from(0); + if max_value == zero { + values.iter().map(|_| zero.clone()).collect() + } else { + values.iter().map(|&x| x / max_value).collect() + } + } + + fn arithmetic_mean(&self) -> PreciseNumber { + let values = self.as_ref(); + let sum: PreciseNumber = values.iter().sum(); + let count = PreciseNumber::from(values.len() as u32); + sum / count + } + + fn variance(&self) -> PreciseNumber { + let values = self.as_ref(); + let mean = self.arithmetic_mean(); + let variance_sum: PreciseNumber = values + .iter() + .map(|&x| { + let diff = if x >= mean { x - mean } else { mean - x }; + diff * diff + }) + .sum::(); + let count = PreciseNumber::from(values.len() as u32); + variance_sum / count + } + fn standard_deviation(&self) -> PreciseNumber { + self.variance().sqrt() + } +} diff --git a/tig-utils/src/request.rs b/tig-utils/src/request.rs new file mode 100644 index 00000000..c9c0e2e2 --- /dev/null +++ b/tig-utils/src/request.rs @@ -0,0 +1,220 @@ +#[cfg(all(feature = "request", feature = "request-js"))] +compile_error!("features `request` and `request-js` are mutually exclusive"); + +use anyhow::{anyhow, Result}; + +#[cfg(feature = "request-js")] +mod request { + use super::*; + use wasm_bindgen::prelude::*; + use wasm_bindgen_futures::JsFuture; + use web_sys::{Headers, Request, RequestInit, RequestMode, Response}; + + #[allow(async_fn_in_trait)] + pub trait FromResponse: Sized { + async fn from_response(response: Response) -> Result; + } + + async fn check_status(response: Response) -> Result { + let status = response.status(); + if !(200..=299).contains(&status) { + let msg = match response.text() { + Ok(promise) => match JsFuture::from(promise).await { + Ok(value) => value.as_string().unwrap_or("".to_string()), + Err(_) => "".to_string(), + }, + Err(_) => "".to_string(), + }; + return Err(anyhow!("Request error (status: {}, body: {})", status, msg)); + } + Ok(response) + } + + impl FromResponse for Vec { + async fn from_response(response: Response) -> Result { + let promise = check_status(response).await?.array_buffer().unwrap(); + let future = JsFuture::from(promise); + let buffer = future + .await + .map_err(|_| anyhow!("Failed to read response body as array buffer"))?; + let uint8_array = js_sys::Uint8Array::new(&buffer); + Ok(uint8_array.to_vec()) + } + } + + impl FromResponse for String { + async fn from_response(response: Response) -> Result { + let promise = check_status(response) + .await? + .text() + .map_err(|_| anyhow!("Failed to read response body as text"))?; + JsFuture::from(promise) + .await + .map_err(|_| anyhow!("Failed to read response body as text"))? + .as_string() + .ok_or_else(|| anyhow!("Failed to convert JsValue to String")) + } + } + + async fn call( + method: &str, + url: &str, + body: Option<&JsValue>, + headers: Option, + ) -> Result + where + T: FromResponse, + { + let mut opts = RequestInit::new(); + opts.method(method); + opts.mode(RequestMode::Cors); + + if let Some(b) = body { + opts.body(Some(b)); + } + + if let Some(h) = headers { + opts.headers(&h); + } + + let request = Request::new_with_str_and_init(url, &opts) + .map_err(|_| anyhow!("Failed to create request"))?; + + let window = web_sys::window().ok_or_else(|| anyhow!("No global `window` exists"))?; + let response_value = JsFuture::from(window.fetch_with_request(&request)) + .await + .map_err(|_| anyhow!("Failed to fetch"))?; + + let response: Response = response_value + .dyn_into() + .map_err(|_| anyhow!("Failed to cast to Response"))?; + + T::from_response(response).await + } + + pub async fn get(url: &str, headers: Option>) -> Result + where + T: FromResponse, + { + let headers = convert_headers(headers)?; + call::("GET", url, None, headers).await + } + + pub async fn post(url: &str, body: &str, headers: Option>) -> Result + where + T: FromResponse, + { + let headers = convert_headers(headers)?; + let body_value = Some(JsValue::from_str(body)); + call::("POST", url, body_value.as_ref(), headers).await + } + + fn convert_headers(headers_option: Option>) -> Result> { + headers_option + .map(|headers_map| { + let headers = Headers::new().map_err(|_| anyhow!("Failed to create Headers"))?; + for (key, value) in headers_map { + headers + .set(&key, &value) + .map_err(|_| anyhow!("Failed to set header"))?; + } + Ok(headers) + }) + .transpose() + } +} + +#[cfg(feature = "request")] +mod request { + use super::*; + use reqwest::{ + header::{HeaderMap, HeaderName, HeaderValue}, + Response, + }; + + #[allow(async_fn_in_trait)] + pub trait FromResponse: Sized { + async fn from_response(response: Response) -> Result; + } + + async fn check_status(response: Response) -> Result { + let status = response.status().as_u16(); + if !(200..=299).contains(&status) { + let msg = match response.text().await { + Ok(msg) => msg.clone(), + Err(_) => "".to_string(), + }; + return Err(anyhow!("Request error (status: {}, body: {})", status, msg)); + } + Ok(response) + } + + impl FromResponse for Vec { + async fn from_response(response: Response) -> Result { + Ok(check_status(response).await?.bytes().await?.to_vec()) + } + } + + impl FromResponse for String { + async fn from_response(response: Response) -> Result { + Ok(check_status(response).await?.text().await?) + } + } + + async fn call( + method: &str, + url: &str, + body: Option, + headers: Option, + ) -> Result { + let client = reqwest::Client::new(); + let mut request_builder = client.request(method.parse().unwrap(), url); + + if let Some(b) = body { + request_builder = request_builder.body(b); + } + + if let Some(h) = headers { + request_builder = request_builder.headers(h); + } + + let response = request_builder.send().await?; + T::from_response(response).await + } + + pub async fn get( + url: &str, + headers: Option>, + ) -> Result { + let headers = convert_headers(headers)?; + call::("GET", url, None, headers).await + } + + pub async fn post( + url: &str, + body: &str, + headers: Option>, + ) -> Result { + let headers = convert_headers(headers)?; + let body_value = Some(body.to_string()); + call::("POST", url, body_value, headers).await + } + + fn convert_headers(headers_option: Option>) -> Result> { + headers_option + .map(|headers_map| { + let mut headers = HeaderMap::new(); + for (key, value) in headers_map { + let header_name = HeaderName::from_bytes(key.as_bytes()) + .map_err(|_| anyhow!("Invalid header name"))?; + let header_value = HeaderValue::from_str(&value) + .map_err(|_| anyhow!("Invalid header value"))?; + headers.insert(header_name, header_value); + } + Ok(headers) + }) + .transpose() + } +} + +pub use request::*; diff --git a/tig-wasm/Cargo.toml b/tig-wasm/Cargo.toml new file mode 100644 index 00000000..1a3115e4 --- /dev/null +++ b/tig-wasm/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "tig-wasm" +version = "0.1.0" +authors.workspace = true +repository.workspace = true +edition.workspace = true +readme.workspace = true + +[lib] +crate-type = ["cdylib", "staticlib"] + +[dependencies] +tig-algorithms = { path = "../tig-algorithms" } +tig-challenges = { path = "../tig-challenges" } +tig-utils = { path = "../tig-utils" } +wee_alloc = { version = "0.4.5" } + +[features] +entry-point = [] diff --git a/tig-wasm/build.rs b/tig-wasm/build.rs new file mode 100644 index 00000000..956cc723 --- /dev/null +++ b/tig-wasm/build.rs @@ -0,0 +1,24 @@ +use std::env; +use std::fs; +use std::path::Path; + +fn main() { + // Only run the following code if the "entry-point" feature is enabled + if env::var("CARGO_FEATURE_ENTRY_POINT").is_ok() { + // Read the CHALLENGE and ALGORITHM environment variables + let challenge = env::var("CHALLENGE").expect("CHALLENGE environment variable not set"); + let algorithm = env::var("ALGORITHM").expect("ALGORITHM environment variable not set"); + + let entry_point_template = fs::read_to_string("src/entry_point_template.rs") + .expect("Failed to read src/entry_point_template.rs"); + // Generate the code with the substituted values + let code = entry_point_template + .replace("{CHALLENGE}", challenge.as_str()) + .replace("{ALGORITHM}", algorithm.as_str()); + + // Write the generated code to a file + let out_dir = env::var("OUT_DIR").unwrap(); + let dest_path = Path::new(&out_dir).join("entry_point.rs"); + fs::write(dest_path, code).expect("Failed to write entry_point.rs"); + } +} diff --git a/tig-wasm/src/entry_point_template.rs b/tig-wasm/src/entry_point_template.rs new file mode 100644 index 00000000..f21afdb0 --- /dev/null +++ b/tig-wasm/src/entry_point_template.rs @@ -0,0 +1,26 @@ +#[global_allocator] +static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; + +use tig_algorithms::{CHALLENGE}::{ALGORITHM}; +use tig_challenges::{*, {CHALLENGE}::*}; +use tig_utils::compress_obj; + +#[no_mangle] +pub fn entry_point(seed: u32, difficulty: Difficulty, ptr: *mut u8, max_length: usize) { + let challenge = Challenge::generate_instance(seed, &difficulty).expect("Failed to generate challenge"); + if let Ok(Some(solution)) = {ALGORITHM}::solve_challenge(&challenge) { + if challenge.verify_solution(&solution).is_ok() { + let mut buffer = Vec::::new(); + let compressed = compress_obj(solution); + buffer.extend((compressed.len() as u32).to_be_bytes()); + buffer.extend(compressed); + if buffer.len() > max_length { + panic!("Encoded solution exceeds maximum length"); + } + + for (i, &byte) in buffer.iter().enumerate() { + unsafe { *ptr.add(i) = byte }; + } + } + } +} diff --git a/tig-wasm/src/lib.rs b/tig-wasm/src/lib.rs new file mode 100644 index 00000000..6972792f --- /dev/null +++ b/tig-wasm/src/lib.rs @@ -0,0 +1,2 @@ +#[cfg(feature = "entry-point")] +include!(concat!(env!("OUT_DIR"), "/entry_point.rs")); diff --git a/tig-worker/Cargo.toml b/tig-worker/Cargo.toml new file mode 100644 index 00000000..8a7de202 --- /dev/null +++ b/tig-worker/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "tig-worker" +version = "0.1.0" +authors.workspace = true +repository.workspace = true +edition.workspace = true +readme.workspace = true + +[lib] +crate-type = ["cdylib", "rlib"] + +[dependencies] +anyhow = "1.0.81" +clap = { version = "4.5.4" } +tig-challenges = { path = "../tig-challenges" } +tig-structs = { path = "../tig-structs" } +tig-utils = { path = "../tig-utils" } +wasmi = { git = "https://github.com/tig-foundation/wasmi.git", branch = "runtime_signature" } + +[features] diff --git a/tig-worker/src/lib.rs b/tig-worker/src/lib.rs new file mode 100644 index 00000000..85d58e6b --- /dev/null +++ b/tig-worker/src/lib.rs @@ -0,0 +1,2 @@ +mod worker; +pub use worker::*; diff --git a/tig-worker/src/main.rs b/tig-worker/src/main.rs new file mode 100644 index 00000000..96bcb182 --- /dev/null +++ b/tig-worker/src/main.rs @@ -0,0 +1,161 @@ +mod worker; +use anyhow::{anyhow, Result}; +use clap::{arg, Command}; +use std::{fs, path::PathBuf, process::exit}; +use tig_structs::core::{BenchmarkSettings, SolutionData}; +use tig_utils::{dejsonify, jsonify}; + +fn cli() -> Command { + Command::new("rust_cli_app") + .about("CLI app to compute or verify solutions") + .subcommand_required(true) + .arg_required_else_help(true) + .subcommand( + Command::new("compute_solution") + .about("Computes a solution") + .arg( + arg!( "Path to a settings file") + .value_parser(clap::value_parser!(PathBuf)), + ) + .arg(arg!( "A u32 nonce").value_parser(clap::value_parser!(u32))) + .arg(arg!( "Path to a wasm file").value_parser(clap::value_parser!(PathBuf))) + .arg( + arg!(--endless "Optional flag to compute solutions continuously") + .action(clap::ArgAction::SetTrue), + ) + .arg( + arg!(--fuel [FUEL] "Optional maximum fuel parameter for WASM VM") + .default_value("1000000000") + .value_parser(clap::value_parser!(u64)), + ) + .arg( + arg!(--mem [MEM] "Optional maximum memory parameter for WASM VM") + .default_value("1000000000") + .value_parser(clap::value_parser!(u64)), + ) + .arg( + arg!(--debug "Optional flag to print debug messages") + .action(clap::ArgAction::SetTrue), + ), + ) + .subcommand( + Command::new("verify_solution") + .about("Verifies a solution") + .arg( + arg!( "Path to a settings file") + .value_parser(clap::value_parser!(PathBuf)), + ) + .arg( + arg!( "Path to a solution file") + .value_parser(clap::value_parser!(PathBuf)), + ) + .arg( + arg!(--debug "Optional flag to print debug messages") + .action(clap::ArgAction::SetTrue), + ), + ) +} + +fn main() { + let matches = cli().get_matches(); + + let result = match matches.subcommand() { + Some(("compute_solution", sub_matches)) => { + let settings_path = sub_matches.get_one::("SETTINGS").unwrap(); + let nonce = *sub_matches.get_one::("NONCE").unwrap(); + let wasm_path = sub_matches.get_one::("WASM").unwrap(); + let endless = sub_matches.get_flag("endless"); + let max_fuel = *sub_matches.get_one::("fuel").unwrap(); + let max_memory = *sub_matches.get_one::("mem").unwrap(); + let debug = sub_matches.get_flag("debug"); + + compute_solution( + settings_path, + nonce, + wasm_path, + max_memory, + max_fuel, + endless, + debug, + ) + } + Some(("verify_solution", sub_matches)) => { + let settings_path = sub_matches.get_one::("SETTINGS").unwrap(); + let solution_path = sub_matches.get_one::("SOLUTION").unwrap(); + let debug = sub_matches.get_flag("debug"); + + verify_solution(settings_path, solution_path, debug) + } + _ => unreachable!("The CLI should prevent getting here"), + }; + match result { + Ok(_) => exit(0), + Err(e) => { + println!("Error: {}", e); + exit(1); + } + }; +} + +fn compute_solution( + settings_path: &PathBuf, + nonce: u32, + wasm_path: &PathBuf, + max_memory: u64, + max_fuel: u64, + endless: bool, + debug: bool, +) -> Result<()> { + let settings = dejsonify::( + &fs::read_to_string(settings_path) + .map_err(|e| anyhow!("Failed to read settings file: {}", e))?, + ) + .map_err(|e| anyhow!("Failed to dejsonify settings file: {}", e))?; + + let wasm = fs::read(wasm_path).map_err(|e| anyhow!("Failed to read wasm file: {}", e))?; + + let mut i = 0; + loop { + let result = + worker::compute_solution(&settings, nonce + i, wasm.as_slice(), max_memory, max_fuel)?; + match result { + Ok(solution_data) => { + println!("{}", jsonify(&solution_data)); + } + Err(e) => { + if debug { + println!("Nonce {}, no solution: {}", nonce + i, e); + } + } + } + i += 1; + if !endless { + break; + } + } + + Ok(()) +} + +fn verify_solution(settings_path: &PathBuf, solution_path: &PathBuf, debug: bool) -> Result<()> { + let settings = dejsonify::( + &fs::read_to_string(settings_path) + .map_err(|e| anyhow!("Failed to read settings file: {}", e))?, + ) + .map_err(|e| anyhow!("Failed to dejsonify settings file: {}", e))?; + let solution_data = dejsonify::( + fs::read_to_string(solution_path) + .map_err(|e| anyhow!("Failed to read solution file: {}", e))? + .as_str(), + ) + .map_err(|e| anyhow!("Failed to dejsonify solution: {}", e))?; + let result = worker::verify_solution(&settings, solution_data.nonce, &solution_data.solution)?; + if debug { + if let Err(e) = result { + println!("Solution is invalid: {}", e); + } else { + println!("Solution is valid"); + } + } + Ok(()) +} diff --git a/tig-worker/src/worker.rs b/tig-worker/src/worker.rs new file mode 100644 index 00000000..b6f0c795 --- /dev/null +++ b/tig-worker/src/worker.rs @@ -0,0 +1,165 @@ +use anyhow::{anyhow, Result}; +use tig_challenges::{knapsack, satisfiability, vehicle_routing, ChallengeTrait}; +use tig_structs::core::*; +use tig_utils::decompress_obj; +use wasmi::{Config, Engine, Linker, Module, Store, StoreLimitsBuilder}; + +const BUFFER_SIZE: usize = u16::MAX as usize; + +pub fn compute_solution( + settings: &BenchmarkSettings, + nonce: u32, + wasm: &[u8], + max_memory: u64, + max_fuel: u64, +) -> Result> { + if settings.difficulty.len() != 2 { + return Err(anyhow!("Unsupported difficulty length")); + } + + let mut config = Config::default(); + config.update_runtime_signature(true); + config.consume_fuel(true); + + let limits = StoreLimitsBuilder::new() + .memory_size(max_memory as usize) + .memories(1) + .trap_on_grow_failure(true) + .build(); + // Setup instance of wasm module + let engine = Engine::new(&config); + let mut store = Store::new(&engine, limits); + store.add_fuel(max_fuel).unwrap(); + let linker = Linker::new(&engine); + let module = Module::new(store.engine(), wasm) + .map_err(|e| anyhow!("Failed to instantiate module: {}", e))?; + let instance = &linker + .instantiate(&mut store, &module) + .map_err(|e| anyhow!("Failed to instantiate linker: {}", e))? + .start(&mut store) + .map_err(|e| anyhow!("Failed to start module: {}", e))?; + + // Create memory for entry_point to write solution to + let mut buffer = [0u8; BUFFER_SIZE]; + let memory = instance + .get_memory(&store, "memory") + .ok_or_else(|| anyhow!("Failed to find memory"))?; + memory + .write(&mut store, 0, &buffer) + .map_err(|e| anyhow!("Failed to write to memory: {}", e))?; + + // Run algorithm + let func = instance + .get_func(&store, "entry_point") + .ok_or_else(|| anyhow!("Failed to find entry_point"))?; + let seed = settings.calc_seed(nonce); + store.set_runtime_signature(seed as u64); + if let Err(e) = func + .typed::<(u32, i32, i32, i32, i32), ()>(&store) + .map_err(|e| anyhow!("Failed to instantiate function: {}", e))? + .call( + &mut store, + ( + seed, + settings.difficulty[0], + settings.difficulty[1], + 0, + BUFFER_SIZE as i32, + ), + ) + { + return Ok(Err(anyhow!("Error occured during execution: {}", e))); + } + // Get runtime signature + let runtime_signature_u64 = store.get_runtime_signature(); + let runtime_signature = (runtime_signature_u64 as u32) ^ ((runtime_signature_u64 >> 32) as u32); + let fuel_consumed = store.fuel_consumed().unwrap(); + // Read solution from memory + memory + .read(&store, 0, &mut buffer) + .map_err(|e| anyhow!("Failed to read from memory: {}", e))?; + let solution_len = u32::from_be_bytes(buffer[0..4].try_into().unwrap()) as usize; + if solution_len == 0 { + return Ok(Err(anyhow!( + "No solution found (runtime_signature: {}, fuel_consumed: {})", + runtime_signature, + fuel_consumed + ))); + } + if solution_len > BUFFER_SIZE - 4 { + return Ok(Err(anyhow!( + "Solution too large (solution_size: {}, runtime_signature: {}, fuel_consumed: {})", + solution_len, + runtime_signature, + fuel_consumed + ))); + } + let solution = decompress_obj(&buffer[4..4 + solution_len]) + .map_err(|e| anyhow!("Failed to convert buffer to solution: {}", e.to_string()))?; + + Ok(Ok(SolutionData { + nonce, + runtime_signature, + fuel_consumed, + solution, + })) +} + +pub fn verify_solution( + settings: &BenchmarkSettings, + nonce: u32, + solution: &Solution, +) -> Result> { + let seed = settings.calc_seed(nonce); + match settings.challenge_id.as_str() { + "c001" => { + let challenge = + satisfiability::Challenge::generate_instance_from_vec(seed, &settings.difficulty) + .map_err(|e| { + anyhow!( + "satisfiability::Challenge::generate_instance_from_vec error: {}", + e + ) + })?; + match satisfiability::Solution::try_from(solution.clone()) { + Ok(solution) => Ok(challenge.verify_solution(&solution)), + Err(_) => Ok(Err(anyhow!( + "Invalid solution. Cannot convert to satisfiability::Solution" + ))), + } + } + "c002" => { + let challenge = + vehicle_routing::Challenge::generate_instance_from_vec(seed, &settings.difficulty) + .map_err(|e| { + anyhow!( + "vehicle_routing::Challenge::generate_instance_from_vec error: {}", + e + ) + })?; + match vehicle_routing::Solution::try_from(solution.clone()) { + Ok(solution) => Ok(challenge.verify_solution(&solution)), + Err(_) => Ok(Err(anyhow!( + "Invalid solution. Cannot convert to vehicle_routing::Solution" + ))), + } + } + "c003" => { + let challenge = + knapsack::Challenge::generate_instance_from_vec(seed, &settings.difficulty) + .map_err(|e| { + anyhow!( + "knapsack::Challenge::generate_instance_from_vec error: {}", + e + ) + })?; + match knapsack::Solution::try_from(solution.clone()) { + Ok(solution) => Ok(challenge.verify_solution(&solution)), + Err(_) => Ok(Err(anyhow!( + "Invalid solution. Cannot convert to knapsack::Solution" + ))), + } + } + _ => panic!("Unknown challenge"), + } +}