From dc6d4b0277be8814fe3adddaa8661a09dfb1576d Mon Sep 17 00:00:00 2001 From: "@RandyMcMillan" Date: Fri, 2 Feb 2024 17:51:56 -0500 Subject: [PATCH] 1e6f00319db48217445d18fc026cdf8f8b802cc2/2059/828642/742494/03/00039f07 gnostr-relay:a hybrid git+nostr relay (WIP) --- .gnostr/reflog | 44 + ...0f6224f-make-gnostr-proxy-relay-list.patch | 17684 ++++++++++++++++ 2 files changed, 17728 insertions(+) create mode 100644 .gnostr/reflog create mode 100644 .gnostr/snapshots/relay-2061-827873-237371-1706482629-91d3d11-0f6224f-make-gnostr-proxy-relay-list.patch diff --git a/.gnostr/reflog b/.gnostr/reflog new file mode 100644 index 00000000..0908f1da --- /dev/null +++ b/.gnostr/reflog @@ -0,0 +1,44 @@ +wss://{RELAY}/{REPO}/000009fe73d29fa284914633f99a83cfe8978c16/HEAD@{0}:commit:f5cdb3c805462bb1d8a218a8523e0c79fb08e1de/2059/828640/744515/01/00013a97 gnostr-relay:a hybrid git+nostr relay (WIP) +wss://{RELAY}/{REPO}/51ba7b3e30b491c5dec221562d39bfd79aa8df70/HEAD@{1}:commit:Cargo.toml: +wss://{RELAY}/{REPO}/51ba7b3e30b491c5dec221562d39bfd79aa8df70/HEAD@{2}:commit:Cargo.toml: +wss://{RELAY}/{REPO}/00a3bfa31d47292abab47d91f99217fbb7da8f28/HEAD@{3}:commit:Cargo.toml: +wss://{RELAY}/{REPO}/c5a9e36e784912f9c43365e6c774630c6be1a7a0/HEAD@{4}:commit:Merge pull request #5 from gnostr-org/1706482629/91d3d11/0f6224f-make-gnostr-proxy-relay-list +wss://{RELAY}/{REPO}/d957f7b455f09f12842623b3e4d7c327940f9552/HEAD@{5}:commit:Merge pull request #6 from gnostr-org/1706482629/91d3d11/0f6224f-make-gnostr-proxy-relay-list +wss://{RELAY}/{REPO}/0f6224ff3601854cda46339eb1cad0c51c2699fb/HEAD@{6}:commit:Merge pull request #4 from gnostr-org/1706477352/5470f79/91d3d11-make-gnostr-test +wss://{RELAY}/{REPO}/2debededf64f0258b94c0c0085629c1edb06cdec/HEAD@{7}:commit:.gitignore: +wss://{RELAY}/{REPO}/e3797fa595b73683e05615f6ed8519f42e345de4/HEAD@{8}:commit:.gnostr/snapshots/relay-2061-827873-237555-1706482629-91d3d11-0f6224f-make-gnostr-proxy-relay-list.patch +wss://{RELAY}/{REPO}/4bdb30d7298f0fbe3469b45998f854f8f19fcf49/HEAD@{9}:commit:Cargo.lock:v0.0.2 +wss://{RELAY}/{REPO}/4bdb30d7298f0fbe3469b45998f854f8f19fcf49/HEAD@{10}:commit:Cargo.lock:v0.0.2 +wss://{RELAY}/{REPO}/c73d5b148119f629d02eccc69d005b6b8a3cc14c/HEAD@{11}:commit:Cargo.toml:v0.0.2 +wss://{RELAY}/{REPO}/0f6224ff3601854cda46339eb1cad0c51c2699fb/HEAD@{12}:commit:Merge pull request #4 from gnostr-org/1706477352/5470f79/91d3d11-make-gnostr-test +wss://{RELAY}/{REPO}/0f6224ff3601854cda46339eb1cad0c51c2699fb/HEAD@{13}:commit:Merge pull request #4 from gnostr-org/1706477352/5470f79/91d3d11-make-gnostr-test +wss://{RELAY}/{REPO}/0f6224ff3601854cda46339eb1cad0c51c2699fb/HEAD@{14}:commit:Merge pull request #4 from gnostr-org/1706477352/5470f79/91d3d11-make-gnostr-test +wss://{RELAY}/{REPO}/91d3d11a7447c517749b76e220700ca7e6c6fa81/HEAD@{15}:commit:Merge pull request #3 from gnostr-org/1706422086/5ebc1b5/7415bef-home_dir_gnostr_home +wss://{RELAY}/{REPO}/65126be08bd4ff80f10e13f6cb52926d597bc7ef/HEAD@{16}:commit:make:gnostr-test:gnostr-relay-list: port= +wss://{RELAY}/{REPO}/698b69e042dcc90be2c56546e5c38215f8ac7b41/HEAD@{17}:commit:.gitignore +wss://{RELAY}/{REPO}/91d3d11a7447c517749b76e220700ca7e6c6fa81/HEAD@{18}:commit:Merge pull request #3 from gnostr-org/1706422086/5ebc1b5/7415bef-home_dir_gnostr_home +wss://{RELAY}/{REPO}/91d3d11a7447c517749b76e220700ca7e6c6fa81/HEAD@{19}:commit:Merge pull request #3 from gnostr-org/1706422086/5ebc1b5/7415bef-home_dir_gnostr_home +wss://{RELAY}/{REPO}/91d3d11a7447c517749b76e220700ca7e6c6fa81/HEAD@{20}:commit:Merge pull request #3 from gnostr-org/1706422086/5ebc1b5/7415bef-home_dir_gnostr_home +wss://{RELAY}/{REPO}/7415bef303d5bef51e11e4977d56d703c8445d34/HEAD@{21}:commit:.github/workflows/gnostr-relay.yml +wss://{RELAY}/{REPO}/6e69eacbac010f3baa535057baa2bad95b2b1247/HEAD@{22}:commit:GNUmakefile:make port= +wss://{RELAY}/{REPO}/1c91dc5c87c865840a164777fc8d3335aa3eca30/HEAD@{23}:commit:src/cli.rs:main.rs +wss://{RELAY}/{REPO}/45bfe5b32135fe5609e960e826bfc80d12ef679e/HEAD@{24}:commit:src/cli.rs:main.rs +wss://{RELAY}/{REPO}/3bc97e02dc2b4fcb6ff5cbe03562fe4c15101a6d/HEAD@{25}:commit:cnofig.toml:revert folder_path +wss://{RELAY}/{REPO}/e10e1d14fba9ca00c7f4e5ff0967bbaaba4af603/HEAD@{26}:commit:config.toml:relay_url:name +wss://{RELAY}/{REPO}/ab5b08f33e8530b7720254ebc81631277a1f17e7/HEAD@{27}:commit:config.toml +wss://{RELAY}/{REPO}/a65049979a01a0facc9be06e7228a251c2a22a86/HEAD@{28}:commit:make:gnostr-test +wss://{RELAY}/{REPO}/c5b52782083e32e571b4d8266f7d35d490fcf8ea/HEAD@{29}:commit:config.toml: +wss://{RELAY}/{REPO}/88d0df2ef2efa511d6c476276761c81957ad8b5d/HEAD@{30}:commit:src/config.rs: +wss://{RELAY}/{REPO}/6180dbfa43fab0b49fe34a69c4e9e3ff961bc70e/HEAD@{31}:commit:src/main.rs:gnostr_home +wss://{RELAY}/{REPO}/efd5ad64e71642041db884817ab92e6cf49718dd/HEAD@{32}:commit:src/main.rs: +wss://{RELAY}/{REPO}/b97f52268aa52f5ce4f5f59334569fe3dec1ce0b/HEAD@{33}:commit:Cargo.toml:Cargo.lock +wss://{RELAY}/{REPO}/7415bef303d5bef51e11e4977d56d703c8445d34/HEAD@{34}:commit:.github/workflows/gnostr-relay.yml +wss://{RELAY}/{REPO}/7415bef303d5bef51e11e4977d56d703c8445d34/HEAD@{35}:commit:.github/workflows/gnostr-relay.yml +wss://{RELAY}/{REPO}/7415bef303d5bef51e11e4977d56d703c8445d34/HEAD@{36}:commit:.github/workflows/gnostr-relay.yml +wss://{RELAY}/{REPO}/b71f516ace1f8a1a4ff6a3ef2c226868216a33bf/HEAD@{37}:commit:.github/workflows/gnostr-relay.yml +wss://{RELAY}/{REPO}/5ebc1b5652a3f41adc9ad9e4243c6dffb97b3c16/HEAD@{38}:commit:Cargo.toml:v0.0.1 +wss://{RELAY}/{REPO}/204be6883e1ea110209418f79b8bb49b7002c96c/HEAD@{39}:commit:GNUmakefile:help:gnostr-test:gnostr-test-proxy +wss://{RELAY}/{REPO}/204be6883e1ea110209418f79b8bb49b7002c96c/HEAD@{40}:commit:GNUmakefile:help:gnostr-test:gnostr-test-proxy +wss://{RELAY}/{REPO}/c0d3a5efd5ceaa3db84246e43d3976f242c5d078/HEAD@{41}:commit:.gitignore:**DS_Store +wss://{RELAY}/{REPO}/03179bc88fe2afc1f1b50155bee699ec04e384d9/HEAD@{42}:commit:make:cargo-i +wss://{RELAY}/{REPO}/81a6168b0c4809c15b2fb4d39c3fce211e1f11ae/HEAD@{43}:commit:Cargo.toml:+[workspace]##exclude from gnostr-workspace diff --git a/.gnostr/snapshots/relay-2061-827873-237371-1706482629-91d3d11-0f6224f-make-gnostr-proxy-relay-list.patch b/.gnostr/snapshots/relay-2061-827873-237371-1706482629-91d3d11-0f6224f-make-gnostr-proxy-relay-list.patch new file mode 100644 index 00000000..e031887d --- /dev/null +++ b/.gnostr/snapshots/relay-2061-827873-237371-1706482629-91d3d11-0f6224f-make-gnostr-proxy-relay-list.patch @@ -0,0 +1,17684 @@ +diff --git a/.build.yml b/.build.yml +new file mode 100644 +index 0000000..83bef50 +--- /dev/null ++++ b/.build.yml +@@ -0,0 +1,20 @@ ++image: fedora/latest ++arch: x86_64 ++artifacts: ++ - nostr-rs-relay/target/release/nostr-rs-relay ++environment: ++ RUST_LOG: debug ++packages: ++ - cargo ++ - sqlite-devel ++ - protobuf-compiler ++sources: ++ - https://git.sr.ht/~gheartsfield/nostr-rs-relay/ ++shell: false ++tasks: ++ - build: | ++ cd nostr-rs-relay ++ cargo build --release ++ - test: | ++ cd nostr-rs-relay ++ cargo test --release +diff --git a/.cargo/config.toml b/.cargo/config.toml +new file mode 100644 +index 0000000..bff29e6 +--- /dev/null ++++ b/.cargo/config.toml +@@ -0,0 +1,2 @@ ++[build] ++rustflags = ["--cfg", "tokio_unstable"] +diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml +new file mode 100644 +index 0000000..8c80345 +--- /dev/null ++++ b/.github/workflows/ci.yml +@@ -0,0 +1,39 @@ ++name: Test and build ++ ++on: ++ push: ++ branches: ++ - master ++ ++jobs: ++ test_nostr-rs-relay: ++ runs-on: ubuntu-latest ++ steps: ++ - uses: actions/checkout@v3 ++ ++ - name: Update local toolchain ++ run: | ++ sudo apt-get install -y protobuf-compiler ++ rustup update ++ rustup component add clippy ++ rustup install nightly ++ ++ - name: Toolchain info ++ run: | ++ cargo --version --verbose ++ rustc --version ++ cargo clippy --version ++ ++ # - name: Lint ++ # run: | ++ # cargo fmt -- --check ++ # cargo clippy -- -D warnings ++ ++ - name: Test ++ run: | ++ cargo check ++ cargo test --all ++ ++ - name: Build ++ run: | ++ cargo build --release --locked +diff --git a/.github/workflows/gnostr-relay.yml b/.github/workflows/gnostr-relay.yml +new file mode 100755 +index 0000000..c1ce124 +--- /dev/null ++++ b/.github/workflows/gnostr-relay.yml +@@ -0,0 +1,50 @@ ++name: gnostr-relay ++ ++on: ++ schedule: ++ - cron: '0 2 * * *' # run at 2 AM UTC ++ pull_request: ++ branches: ++ - '*' ++ - '*/*' ++ - '**' ++ - 'master' ++ - 'main' ++ push: ++ branches: ++ - '*' ++ - '*/*' ++ - '**' ++ - 'master' ++ - 'main' ++ ++env: ++ GIT_DISCOVERY_ACROSS_FILESYSTEM: 1 ++ ++jobs: ++ build: ++ strategy: ++ fail-fast: false ++ matrix: ++ os: ["ubuntu-latest"] ++ tag: ["latest", "slim-bullseye"] ++ runs-on: ${{ matrix.os }} ++ container: rust:${{ matrix.tag }} ++ ++ steps: ++ - run: apt-get update && apt-get install autoconf build-essential curl cmake git jq libexpat1-dev libcurl4-openssl-dev libssl-dev libtool lsb-release make nodejs npm pkg-config protobuf-compiler python3 python-is-python3 sudo tcl zlib1g-dev -y ++ ## notice: this is a pre checkout step ++ ## notice: additional operations can be done prior to checkout ++ ## - run: apk update && apk add bash cmake git python3 && python3 -m ensurepip ++ - run: printenv ++ - name: checkout@v3 fetch-depth submodules set-safe-dir true ++ uses: actions/checkout@v3 ++ with: ++ fetch-depth: '100' ++ submodules: 'true' ++ set-safe-directory: 'true' ++ ## notice: these are post checkout steps ++ ## - run: apk update && apk add autoconf automake build-base openssl-dev libtool make ++ - run: touch ~/GITHUB_TOKEN.txt ++ - run: git config --global --add safe.directory /__w/gnostr/gnostr || true ++ - run: make cargo-i +diff --git a/.gitignore b/.gitignore +new file mode 100644 +index 0000000..3c5f52f +--- /dev/null ++++ b/.gitignore +@@ -0,0 +1,7 @@ ++**/target/ ++nostr.db ++nostr.db-* ++justfile ++**DS_Store ++.nvmrc ++.gnostr/proxy +diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml +new file mode 100644 +index 0000000..5925d8c +--- /dev/null ++++ b/.pre-commit-config.yaml +@@ -0,0 +1,16 @@ ++# See https://pre-commit.com for more information ++# See https://pre-commit.com/hooks.html for more hooks ++repos: ++- repo: https://github.com/pre-commit/pre-commit-hooks ++ rev: v4.3.0 ++ hooks: ++ - id: trailing-whitespace ++ - id: end-of-file-fixer ++ - id: check-yaml ++ - id: check-added-large-files ++- repo: https://github.com/doublify/pre-commit-rust ++ rev: v1.0 ++ hooks: ++# - id: fmt ++ - id: cargo-check ++ - id: clippy +diff --git a/Cargo.lock b/Cargo.lock +new file mode 100644 +index 0000000..0ee8aa0 +--- /dev/null ++++ b/Cargo.lock +@@ -0,0 +1,3874 @@ ++# This file is automatically @generated by Cargo. ++# It is not intended for manual editing. ++version = 3 ++ ++[[package]] ++name = "addr2line" ++version = "0.21.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" ++dependencies = [ ++ "gimli", ++] ++ ++[[package]] ++name = "adler" ++version = "1.0.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" ++ ++[[package]] ++name = "aes" ++version = "0.8.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" ++dependencies = [ ++ "cfg-if", ++ "cipher", ++ "cpufeatures", ++] ++ ++[[package]] ++name = "ahash" ++version = "0.4.7" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e" ++ ++[[package]] ++name = "ahash" ++version = "0.7.6" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" ++dependencies = [ ++ "getrandom", ++ "once_cell", ++ "version_check", ++] ++ ++[[package]] ++name = "ahash" ++version = "0.8.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" ++dependencies = [ ++ "cfg-if", ++ "once_cell", ++ "version_check", ++] ++ ++[[package]] ++name = "aho-corasick" ++version = "1.0.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "0c378d78423fdad8089616f827526ee33c19f2fddbd5de1629152c9593ba4783" ++dependencies = [ ++ "memchr", ++] ++ ++[[package]] ++name = "allocator-api2" ++version = "0.2.16" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" ++ ++[[package]] ++name = "android-tzdata" ++version = "0.1.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" ++ ++[[package]] ++name = "android_system_properties" ++version = "0.1.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" ++dependencies = [ ++ "libc", ++] ++ ++[[package]] ++name = "anstream" ++version = "0.5.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "b1f58811cfac344940f1a400b6e6231ce35171f614f26439e80f8c1465c5cc0c" ++dependencies = [ ++ "anstyle", ++ "anstyle-parse", ++ "anstyle-query", ++ "anstyle-wincon", ++ "colorchoice", ++ "utf8parse", ++] ++ ++[[package]] ++name = "anstyle" ++version = "1.0.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "15c4c2c83f81532e5845a733998b6971faca23490340a418e9b72a3ec9de12ea" ++ ++[[package]] ++name = "anstyle-parse" ++version = "0.2.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" ++dependencies = [ ++ "utf8parse", ++] ++ ++[[package]] ++name = "anstyle-query" ++version = "1.0.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" ++dependencies = [ ++ "windows-sys 0.48.0", ++] ++ ++[[package]] ++name = "anstyle-wincon" ++version = "2.1.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "58f54d10c6dfa51283a066ceab3ec1ab78d13fae00aa49243a45e4571fb79dfd" ++dependencies = [ ++ "anstyle", ++ "windows-sys 0.48.0", ++] ++ ++[[package]] ++name = "anyhow" ++version = "1.0.75" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" ++ ++[[package]] ++name = "async-channel" ++version = "1.9.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" ++dependencies = [ ++ "concurrent-queue", ++ "event-listener", ++ "futures-core", ++] ++ ++[[package]] ++name = "async-executor" ++version = "1.5.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "6fa3dc5f2a8564f07759c008b9109dc0d39de92a88d5588b8a5036d286383afb" ++dependencies = [ ++ "async-lock", ++ "async-task", ++ "concurrent-queue", ++ "fastrand 1.9.0", ++ "futures-lite", ++ "slab", ++] ++ ++[[package]] ++name = "async-global-executor" ++version = "2.3.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" ++dependencies = [ ++ "async-channel", ++ "async-executor", ++ "async-io", ++ "async-lock", ++ "blocking", ++ "futures-lite", ++ "once_cell", ++] ++ ++[[package]] ++name = "async-io" ++version = "1.13.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" ++dependencies = [ ++ "async-lock", ++ "autocfg 1.1.0", ++ "cfg-if", ++ "concurrent-queue", ++ "futures-lite", ++ "log", ++ "parking", ++ "polling", ++ "rustix 0.37.23", ++ "slab", ++ "socket2 0.4.9", ++ "waker-fn", ++] ++ ++[[package]] ++name = "async-lock" ++version = "2.8.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" ++dependencies = [ ++ "event-listener", ++] ++ ++[[package]] ++name = "async-std" ++version = "1.12.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" ++dependencies = [ ++ "async-channel", ++ "async-global-executor", ++ "async-io", ++ "async-lock", ++ "crossbeam-utils", ++ "futures-channel", ++ "futures-core", ++ "futures-io", ++ "futures-lite", ++ "gloo-timers", ++ "kv-log-macro", ++ "log", ++ "memchr", ++ "once_cell", ++ "pin-project-lite", ++ "pin-utils", ++ "slab", ++ "wasm-bindgen-futures", ++] ++ ++[[package]] ++name = "async-stream" ++version = "0.3.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" ++dependencies = [ ++ "async-stream-impl", ++ "futures-core", ++ "pin-project-lite", ++] ++ ++[[package]] ++name = "async-stream-impl" ++version = "0.3.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" ++dependencies = [ ++ "proc-macro2", ++ "quote", ++ "syn 2.0.31", ++] ++ ++[[package]] ++name = "async-task" ++version = "4.4.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" ++ ++[[package]] ++name = "async-trait" ++version = "0.1.73" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" ++dependencies = [ ++ "proc-macro2", ++ "quote", ++ "syn 2.0.31", ++] ++ ++[[package]] ++name = "atoi" ++version = "1.0.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d7c57d12312ff59c811c0643f4d80830505833c9ffaebd193d819392b265be8e" ++dependencies = [ ++ "num-traits", ++] ++ ++[[package]] ++name = "atomic-waker" ++version = "1.1.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" ++ ++[[package]] ++name = "autocfg" ++version = "0.1.8" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "0dde43e75fd43e8a1bf86103336bc699aa8d17ad1be60c76c0bdfd4828e19b78" ++dependencies = [ ++ "autocfg 1.1.0", ++] ++ ++[[package]] ++name = "autocfg" ++version = "1.1.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" ++ ++[[package]] ++name = "axum" ++version = "0.6.20" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" ++dependencies = [ ++ "async-trait", ++ "axum-core", ++ "bitflags 1.3.2", ++ "bytes", ++ "futures-util", ++ "http", ++ "http-body", ++ "hyper", ++ "itoa", ++ "matchit", ++ "memchr", ++ "mime", ++ "percent-encoding", ++ "pin-project-lite", ++ "rustversion", ++ "serde", ++ "sync_wrapper", ++ "tower", ++ "tower-layer", ++ "tower-service", ++] ++ ++[[package]] ++name = "axum-core" ++version = "0.3.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" ++dependencies = [ ++ "async-trait", ++ "bytes", ++ "futures-util", ++ "http", ++ "http-body", ++ "mime", ++ "rustversion", ++ "tower-layer", ++ "tower-service", ++] ++ ++[[package]] ++name = "backtrace" ++version = "0.3.69" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" ++dependencies = [ ++ "addr2line", ++ "cc", ++ "cfg-if", ++ "libc", ++ "miniz_oxide", ++ "object", ++ "rustc-demangle", ++] ++ ++[[package]] ++name = "base64" ++version = "0.13.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" ++ ++[[package]] ++name = "base64" ++version = "0.21.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "414dcefbc63d77c526a76b3afcf6fbb9b5e2791c19c3aa2297733208750c6e53" ++ ++[[package]] ++name = "bech32" ++version = "0.9.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" ++ ++[[package]] ++name = "bitcoin" ++version = "0.29.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "0694ea59225b0c5f3cb405ff3f670e4828358ed26aec49dc352f730f0cb1a8a3" ++dependencies = [ ++ "bech32", ++ "bitcoin_hashes 0.11.0", ++ "secp256k1 0.24.3", ++ "serde", ++] ++ ++[[package]] ++name = "bitcoin_hashes" ++version = "0.10.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "006cc91e1a1d99819bc5b8214be3555c1f0611b169f527a1fdc54ed1f2b745b0" ++dependencies = [ ++ "serde", ++] ++ ++[[package]] ++name = "bitcoin_hashes" ++version = "0.11.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" ++dependencies = [ ++ "serde", ++] ++ ++[[package]] ++name = "bitflags" ++version = "1.3.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" ++ ++[[package]] ++name = "bitflags" ++version = "2.4.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" ++ ++[[package]] ++name = "block-buffer" ++version = "0.10.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" ++dependencies = [ ++ "generic-array", ++] ++ ++[[package]] ++name = "block-padding" ++version = "0.3.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "a8894febbff9f758034a5b8e12d87918f56dfc64a8e1fe757d65e29041538d93" ++dependencies = [ ++ "generic-array", ++] ++ ++[[package]] ++name = "blocking" ++version = "1.3.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "77231a1c8f801696fc0123ec6150ce92cffb8e164a02afb9c8ddee0e9b65ad65" ++dependencies = [ ++ "async-channel", ++ "async-lock", ++ "async-task", ++ "atomic-waker", ++ "fastrand 1.9.0", ++ "futures-lite", ++ "log", ++] ++ ++[[package]] ++name = "bumpalo" ++version = "3.13.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" ++ ++[[package]] ++name = "byteorder" ++version = "1.4.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" ++ ++[[package]] ++name = "bytes" ++version = "1.4.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" ++ ++[[package]] ++name = "cbc" ++version = "0.1.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6" ++dependencies = [ ++ "cipher", ++] ++ ++[[package]] ++name = "cc" ++version = "1.0.83" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" ++dependencies = [ ++ "libc", ++] ++ ++[[package]] ++name = "cfg-if" ++version = "1.0.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" ++ ++[[package]] ++name = "checked_int_cast" ++version = "1.0.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "17cc5e6b5ab06331c33589842070416baa137e8b0eb912b008cfd4a78ada7919" ++ ++[[package]] ++name = "chrono" ++version = "0.4.28" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "95ed24df0632f708f5f6d8082675bef2596f7084dee3dd55f632290bf35bfe0f" ++dependencies = [ ++ "android-tzdata", ++ "iana-time-zone", ++ "js-sys", ++ "num-traits", ++ "time 0.1.43", ++ "wasm-bindgen", ++ "windows-targets 0.48.5", ++] ++ ++[[package]] ++name = "cipher" ++version = "0.4.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" ++dependencies = [ ++ "crypto-common", ++ "inout", ++] ++ ++[[package]] ++name = "clap" ++version = "4.4.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "6a13b88d2c62ff462f88e4a121f17a82c1af05693a2f192b5c38d14de73c19f6" ++dependencies = [ ++ "clap_builder", ++ "clap_derive", ++] ++ ++[[package]] ++name = "clap_builder" ++version = "4.4.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "2bb9faaa7c2ef94b2743a21f5a29e6f0010dff4caa69ac8e9d6cf8b6fa74da08" ++dependencies = [ ++ "anstream", ++ "anstyle", ++ "clap_lex", ++ "strsim", ++] ++ ++[[package]] ++name = "clap_derive" ++version = "4.4.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "0862016ff20d69b84ef8247369fabf5c008a7417002411897d40ee1f4532b873" ++dependencies = [ ++ "heck", ++ "proc-macro2", ++ "quote", ++ "syn 2.0.31", ++] ++ ++[[package]] ++name = "clap_lex" ++version = "0.5.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" ++ ++[[package]] ++name = "cloudabi" ++version = "0.0.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" ++dependencies = [ ++ "bitflags 1.3.2", ++] ++ ++[[package]] ++name = "colorchoice" ++version = "1.0.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" ++ ++[[package]] ++name = "concurrent-queue" ++version = "2.2.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" ++dependencies = [ ++ "crossbeam-utils", ++] ++ ++[[package]] ++name = "config" ++version = "0.12.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "54ad70579325f1a38ea4c13412b82241c5900700a69785d73e2736bd65a33f86" ++dependencies = [ ++ "async-trait", ++ "json5", ++ "lazy_static", ++ "nom", ++ "pathdiff", ++ "ron", ++ "rust-ini", ++ "serde", ++ "serde_json", ++ "toml", ++ "yaml-rust", ++] ++ ++[[package]] ++name = "console" ++version = "0.15.7" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" ++dependencies = [ ++ "encode_unicode", ++ "lazy_static", ++ "libc", ++ "unicode-width", ++ "windows-sys 0.45.0", ++] ++ ++[[package]] ++name = "console-api" ++version = "0.5.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "c2895653b4d9f1538a83970077cb01dfc77a4810524e51a110944688e916b18e" ++dependencies = [ ++ "prost", ++ "prost-types", ++ "tonic 0.9.2", ++ "tracing-core", ++] ++ ++[[package]] ++name = "console-subscriber" ++version = "0.1.10" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d4cf42660ac07fcebed809cfe561dd8730bcd35b075215e6479c516bcd0d11cb" ++dependencies = [ ++ "console-api", ++ "crossbeam-channel", ++ "crossbeam-utils", ++ "futures", ++ "hdrhistogram", ++ "humantime", ++ "prost-types", ++ "serde", ++ "serde_json", ++ "thread_local", ++ "tokio", ++ "tokio-stream", ++ "tonic 0.9.2", ++ "tracing", ++ "tracing-core", ++ "tracing-subscriber", ++] ++ ++[[package]] ++name = "const_format" ++version = "0.2.31" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "c990efc7a285731f9a4378d81aff2f0e85a2c8781a05ef0f8baa8dac54d0ff48" ++dependencies = [ ++ "const_format_proc_macros", ++] ++ ++[[package]] ++name = "const_format_proc_macros" ++version = "0.2.31" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e026b6ce194a874cb9cf32cd5772d1ef9767cc8fcb5765948d74f37a9d8b2bf6" ++dependencies = [ ++ "proc-macro2", ++ "quote", ++ "unicode-xid", ++] ++ ++[[package]] ++name = "core-foundation" ++version = "0.9.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" ++dependencies = [ ++ "core-foundation-sys", ++ "libc", ++] ++ ++[[package]] ++name = "core-foundation-sys" ++version = "0.8.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" ++ ++[[package]] ++name = "cpufeatures" ++version = "0.2.9" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" ++dependencies = [ ++ "libc", ++] ++ ++[[package]] ++name = "crc" ++version = "3.0.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" ++dependencies = [ ++ "crc-catalog", ++] ++ ++[[package]] ++name = "crc-catalog" ++version = "2.2.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" ++ ++[[package]] ++name = "crc32fast" ++version = "1.3.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" ++dependencies = [ ++ "cfg-if", ++] ++ ++[[package]] ++name = "crossbeam-channel" ++version = "0.5.8" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" ++dependencies = [ ++ "cfg-if", ++ "crossbeam-utils", ++] ++ ++[[package]] ++name = "crossbeam-queue" ++version = "0.3.8" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" ++dependencies = [ ++ "cfg-if", ++ "crossbeam-utils", ++] ++ ++[[package]] ++name = "crossbeam-utils" ++version = "0.8.16" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" ++dependencies = [ ++ "cfg-if", ++] ++ ++[[package]] ++name = "crypto-common" ++version = "0.1.6" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" ++dependencies = [ ++ "generic-array", ++ "typenum", ++] ++ ++[[package]] ++name = "dashmap" ++version = "5.5.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" ++dependencies = [ ++ "cfg-if", ++ "hashbrown 0.14.0", ++ "lock_api", ++ "once_cell", ++ "parking_lot_core 0.9.8", ++] ++ ++[[package]] ++name = "deranged" ++version = "0.3.8" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" ++ ++[[package]] ++name = "digest" ++version = "0.10.7" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" ++dependencies = [ ++ "block-buffer", ++ "crypto-common", ++ "subtle", ++] ++ ++[[package]] ++name = "dirs" ++version = "4.0.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" ++dependencies = [ ++ "dirs-sys 0.3.7", ++] ++ ++[[package]] ++name = "dirs" ++version = "5.0.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" ++dependencies = [ ++ "dirs-sys 0.4.1", ++] ++ ++[[package]] ++name = "dirs-sys" ++version = "0.3.7" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" ++dependencies = [ ++ "libc", ++ "redox_users", ++ "winapi", ++] ++ ++[[package]] ++name = "dirs-sys" ++version = "0.4.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" ++dependencies = [ ++ "libc", ++ "option-ext", ++ "redox_users", ++ "windows-sys 0.48.0", ++] ++ ++[[package]] ++name = "dlv-list" ++version = "0.2.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "68df3f2b690c1b86e65ef7830956aededf3cb0a16f898f79b9a6f421a7b6211b" ++dependencies = [ ++ "rand 0.8.5", ++] ++ ++[[package]] ++name = "dotenvy" ++version = "0.15.7" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" ++ ++[[package]] ++name = "either" ++version = "1.9.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" ++ ++[[package]] ++name = "encode_unicode" ++version = "0.3.6" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" ++ ++[[package]] ++name = "equivalent" ++version = "1.0.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" ++ ++[[package]] ++name = "errno" ++version = "0.3.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "136526188508e25c6fef639d7927dfb3e0e3084488bf202267829cf7fc23dbdd" ++dependencies = [ ++ "errno-dragonfly", ++ "libc", ++ "windows-sys 0.48.0", ++] ++ ++[[package]] ++name = "errno-dragonfly" ++version = "0.1.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" ++dependencies = [ ++ "cc", ++ "libc", ++] ++ ++[[package]] ++name = "event-listener" ++version = "2.5.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" ++ ++[[package]] ++name = "fallible-iterator" ++version = "0.2.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" ++ ++[[package]] ++name = "fallible-streaming-iterator" ++version = "0.1.9" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" ++ ++[[package]] ++name = "fastrand" ++version = "1.9.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" ++dependencies = [ ++ "instant", ++] ++ ++[[package]] ++name = "fastrand" ++version = "2.0.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" ++ ++[[package]] ++name = "fixedbitset" ++version = "0.4.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" ++ ++[[package]] ++name = "flate2" ++version = "1.0.27" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" ++dependencies = [ ++ "crc32fast", ++ "miniz_oxide", ++] ++ ++[[package]] ++name = "fnv" ++version = "1.0.7" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" ++ ++[[package]] ++name = "form_urlencoded" ++version = "1.2.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" ++dependencies = [ ++ "percent-encoding", ++] ++ ++[[package]] ++name = "fuchsia-cprng" ++version = "0.1.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" ++ ++[[package]] ++name = "futures" ++version = "0.3.28" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" ++dependencies = [ ++ "futures-channel", ++ "futures-core", ++ "futures-executor", ++ "futures-io", ++ "futures-sink", ++ "futures-task", ++ "futures-util", ++] ++ ++[[package]] ++name = "futures-channel" ++version = "0.3.28" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" ++dependencies = [ ++ "futures-core", ++ "futures-sink", ++] ++ ++[[package]] ++name = "futures-core" ++version = "0.3.28" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" ++ ++[[package]] ++name = "futures-executor" ++version = "0.3.28" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" ++dependencies = [ ++ "futures-core", ++ "futures-task", ++ "futures-util", ++] ++ ++[[package]] ++name = "futures-intrusive" ++version = "0.4.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "a604f7a68fbf8103337523b1fadc8ade7361ee3f112f7c680ad179651616aed5" ++dependencies = [ ++ "futures-core", ++ "lock_api", ++ "parking_lot 0.11.2", ++] ++ ++[[package]] ++name = "futures-io" ++version = "0.3.28" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" ++ ++[[package]] ++name = "futures-lite" ++version = "1.13.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" ++dependencies = [ ++ "fastrand 1.9.0", ++ "futures-core", ++ "futures-io", ++ "memchr", ++ "parking", ++ "pin-project-lite", ++ "waker-fn", ++] ++ ++[[package]] ++name = "futures-macro" ++version = "0.3.28" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" ++dependencies = [ ++ "proc-macro2", ++ "quote", ++ "syn 2.0.31", ++] ++ ++[[package]] ++name = "futures-sink" ++version = "0.3.28" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" ++ ++[[package]] ++name = "futures-task" ++version = "0.3.28" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" ++ ++[[package]] ++name = "futures-timer" ++version = "3.0.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" ++ ++[[package]] ++name = "futures-util" ++version = "0.3.28" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" ++dependencies = [ ++ "futures-channel", ++ "futures-core", ++ "futures-io", ++ "futures-macro", ++ "futures-sink", ++ "futures-task", ++ "memchr", ++ "pin-project-lite", ++ "pin-utils", ++ "slab", ++] ++ ++[[package]] ++name = "generic-array" ++version = "0.14.7" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" ++dependencies = [ ++ "typenum", ++ "version_check", ++] ++ ++[[package]] ++name = "getrandom" ++version = "0.2.10" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" ++dependencies = [ ++ "cfg-if", ++ "js-sys", ++ "libc", ++ "wasi 0.11.0+wasi-snapshot-preview1", ++ "wasm-bindgen", ++] ++ ++[[package]] ++name = "gimli" ++version = "0.28.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" ++ ++[[package]] ++name = "gloo-timers" ++version = "0.2.6" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" ++dependencies = [ ++ "futures-channel", ++ "futures-core", ++ "js-sys", ++ "wasm-bindgen", ++] ++ ++[[package]] ++name = "gnostr-relay" ++version = "0.0.2" ++dependencies = [ ++ "anyhow", ++ "async-std", ++ "async-trait", ++ "bech32", ++ "bitcoin_hashes 0.10.0", ++ "chrono", ++ "clap", ++ "config", ++ "console-subscriber", ++ "const_format", ++ "dirs 5.0.1", ++ "futures", ++ "futures-util", ++ "governor", ++ "hex", ++ "http", ++ "hyper", ++ "hyper-rustls", ++ "indicatif", ++ "lazy_static", ++ "log", ++ "nonzero_ext", ++ "nostr", ++ "parse_duration", ++ "prometheus", ++ "prost", ++ "qrcode", ++ "r2d2", ++ "r2d2_sqlite", ++ "rand 0.8.5", ++ "regex", ++ "rusqlite", ++ "secp256k1 0.21.3", ++ "serde", ++ "serde_json", ++ "sqlx", ++ "thiserror", ++ "tikv-jemallocator", ++ "tokio", ++ "tokio-tungstenite", ++ "tonic 0.8.3", ++ "tonic-build", ++ "tracing", ++ "tracing-appender", ++ "tracing-subscriber", ++ "tungstenite", ++ "url", ++ "uuid", ++] ++ ++[[package]] ++name = "governor" ++version = "0.4.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "19775995ee20209163239355bc3ad2f33f83da35d9ef72dea26e5af753552c87" ++dependencies = [ ++ "dashmap", ++ "futures", ++ "futures-timer", ++ "no-std-compat", ++ "nonzero_ext", ++ "parking_lot 0.12.1", ++ "quanta", ++ "rand 0.8.5", ++ "smallvec", ++] ++ ++[[package]] ++name = "h2" ++version = "0.3.21" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" ++dependencies = [ ++ "bytes", ++ "fnv", ++ "futures-core", ++ "futures-sink", ++ "futures-util", ++ "http", ++ "indexmap 1.9.3", ++ "slab", ++ "tokio", ++ "tokio-util", ++ "tracing", ++] ++ ++[[package]] ++name = "hashbrown" ++version = "0.9.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" ++dependencies = [ ++ "ahash 0.4.7", ++] ++ ++[[package]] ++name = "hashbrown" ++version = "0.11.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" ++dependencies = [ ++ "ahash 0.7.6", ++] ++ ++[[package]] ++name = "hashbrown" ++version = "0.12.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" ++ ++[[package]] ++name = "hashbrown" ++version = "0.14.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" ++dependencies = [ ++ "ahash 0.8.3", ++ "allocator-api2", ++] ++ ++[[package]] ++name = "hashlink" ++version = "0.7.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "7249a3129cbc1ffccd74857f81464a323a152173cdb134e0fd81bc803b29facf" ++dependencies = [ ++ "hashbrown 0.11.2", ++] ++ ++[[package]] ++name = "hashlink" ++version = "0.8.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" ++dependencies = [ ++ "hashbrown 0.14.0", ++] ++ ++[[package]] ++name = "hdrhistogram" ++version = "7.5.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "7f19b9f54f7c7f55e31401bb647626ce0cf0f67b0004982ce815b3ee72a02aa8" ++dependencies = [ ++ "base64 0.13.1", ++ "byteorder", ++ "flate2", ++ "nom", ++ "num-traits", ++] ++ ++[[package]] ++name = "heck" ++version = "0.4.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" ++dependencies = [ ++ "unicode-segmentation", ++] ++ ++[[package]] ++name = "hermit-abi" ++version = "0.3.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" ++ ++[[package]] ++name = "hex" ++version = "0.4.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" ++ ++[[package]] ++name = "hkdf" ++version = "0.12.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" ++dependencies = [ ++ "hmac", ++] ++ ++[[package]] ++name = "hmac" ++version = "0.12.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" ++dependencies = [ ++ "digest", ++] ++ ++[[package]] ++name = "http" ++version = "0.2.9" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" ++dependencies = [ ++ "bytes", ++ "fnv", ++ "itoa", ++] ++ ++[[package]] ++name = "http-body" ++version = "0.4.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" ++dependencies = [ ++ "bytes", ++ "http", ++ "pin-project-lite", ++] ++ ++[[package]] ++name = "httparse" ++version = "1.8.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" ++ ++[[package]] ++name = "httpdate" ++version = "1.0.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" ++ ++[[package]] ++name = "humantime" ++version = "2.1.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" ++ ++[[package]] ++name = "hyper" ++version = "0.14.27" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" ++dependencies = [ ++ "bytes", ++ "futures-channel", ++ "futures-core", ++ "futures-util", ++ "h2", ++ "http", ++ "http-body", ++ "httparse", ++ "httpdate", ++ "itoa", ++ "pin-project-lite", ++ "socket2 0.4.9", ++ "tokio", ++ "tower-service", ++ "tracing", ++ "want", ++] ++ ++[[package]] ++name = "hyper-rustls" ++version = "0.24.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" ++dependencies = [ ++ "futures-util", ++ "http", ++ "hyper", ++ "log", ++ "rustls 0.21.7", ++ "rustls-native-certs", ++ "tokio", ++ "tokio-rustls 0.24.1", ++] ++ ++[[package]] ++name = "hyper-timeout" ++version = "0.4.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" ++dependencies = [ ++ "hyper", ++ "pin-project-lite", ++ "tokio", ++ "tokio-io-timeout", ++] ++ ++[[package]] ++name = "iana-time-zone" ++version = "0.1.57" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" ++dependencies = [ ++ "android_system_properties", ++ "core-foundation-sys", ++ "iana-time-zone-haiku", ++ "js-sys", ++ "wasm-bindgen", ++ "windows", ++] ++ ++[[package]] ++name = "iana-time-zone-haiku" ++version = "0.1.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" ++dependencies = [ ++ "cc", ++] ++ ++[[package]] ++name = "idna" ++version = "0.4.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" ++dependencies = [ ++ "unicode-bidi", ++ "unicode-normalization", ++] ++ ++[[package]] ++name = "indexmap" ++version = "1.9.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" ++dependencies = [ ++ "autocfg 1.1.0", ++ "hashbrown 0.12.3", ++] ++ ++[[package]] ++name = "indexmap" ++version = "2.0.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" ++dependencies = [ ++ "equivalent", ++ "hashbrown 0.14.0", ++] ++ ++[[package]] ++name = "indicatif" ++version = "0.17.6" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "0b297dc40733f23a0e52728a58fa9489a5b7638a324932de16b41adc3ef80730" ++dependencies = [ ++ "console", ++ "instant", ++ "number_prefix", ++ "portable-atomic", ++ "unicode-width", ++] ++ ++[[package]] ++name = "inout" ++version = "0.1.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" ++dependencies = [ ++ "block-padding", ++ "generic-array", ++] ++ ++[[package]] ++name = "instant" ++version = "0.1.12" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" ++dependencies = [ ++ "cfg-if", ++ "js-sys", ++ "wasm-bindgen", ++ "web-sys", ++] ++ ++[[package]] ++name = "io-lifetimes" ++version = "1.0.11" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" ++dependencies = [ ++ "hermit-abi", ++ "libc", ++ "windows-sys 0.48.0", ++] ++ ++[[package]] ++name = "itertools" ++version = "0.10.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" ++dependencies = [ ++ "either", ++] ++ ++[[package]] ++name = "itertools" ++version = "0.11.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" ++dependencies = [ ++ "either", ++] ++ ++[[package]] ++name = "itoa" ++version = "1.0.9" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" ++ ++[[package]] ++name = "js-sys" ++version = "0.3.64" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" ++dependencies = [ ++ "wasm-bindgen", ++] ++ ++[[package]] ++name = "json5" ++version = "0.4.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1" ++dependencies = [ ++ "pest", ++ "pest_derive", ++ "serde", ++] ++ ++[[package]] ++name = "kv-log-macro" ++version = "1.0.7" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" ++dependencies = [ ++ "log", ++] ++ ++[[package]] ++name = "lazy_static" ++version = "1.4.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" ++ ++[[package]] ++name = "libc" ++version = "0.2.147" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" ++ ++[[package]] ++name = "libsqlite3-sys" ++version = "0.23.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d2cafc7c74096c336d9d27145f7ebd4f4b6f95ba16aa5a282387267e6925cb58" ++dependencies = [ ++ "cc", ++ "pkg-config", ++ "vcpkg", ++] ++ ++[[package]] ++name = "linked-hash-map" ++version = "0.5.6" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" ++ ++[[package]] ++name = "linux-raw-sys" ++version = "0.3.8" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" ++ ++[[package]] ++name = "linux-raw-sys" ++version = "0.4.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" ++ ++[[package]] ++name = "lock_api" ++version = "0.4.10" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" ++dependencies = [ ++ "autocfg 1.1.0", ++ "scopeguard", ++] ++ ++[[package]] ++name = "log" ++version = "0.4.20" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" ++dependencies = [ ++ "value-bag", ++] ++ ++[[package]] ++name = "mach" ++version = "0.3.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" ++dependencies = [ ++ "libc", ++] ++ ++[[package]] ++name = "matchers" ++version = "0.1.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" ++dependencies = [ ++ "regex-automata 0.1.10", ++] ++ ++[[package]] ++name = "matchit" ++version = "0.7.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "ed1202b2a6f884ae56f04cff409ab315c5ce26b5e58d7412e484f01fd52f52ef" ++ ++[[package]] ++name = "md-5" ++version = "0.10.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" ++dependencies = [ ++ "digest", ++] ++ ++[[package]] ++name = "memchr" ++version = "2.6.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" ++ ++[[package]] ++name = "mime" ++version = "0.3.17" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" ++ ++[[package]] ++name = "minimal-lexical" ++version = "0.2.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" ++ ++[[package]] ++name = "miniz_oxide" ++version = "0.7.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" ++dependencies = [ ++ "adler", ++] ++ ++[[package]] ++name = "mio" ++version = "0.8.8" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" ++dependencies = [ ++ "libc", ++ "wasi 0.11.0+wasi-snapshot-preview1", ++ "windows-sys 0.48.0", ++] ++ ++[[package]] ++name = "multimap" ++version = "0.8.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" ++ ++[[package]] ++name = "no-std-compat" ++version = "0.4.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c" ++ ++[[package]] ++name = "nom" ++version = "7.1.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" ++dependencies = [ ++ "memchr", ++ "minimal-lexical", ++] ++ ++[[package]] ++name = "nonzero_ext" ++version = "0.3.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" ++ ++[[package]] ++name = "nostr" ++version = "0.18.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "35c0446103768cddfb2bc1b87a52e98c35227b82711c2b3ce7098f8d85d9b0ee" ++dependencies = [ ++ "aes", ++ "base64 0.21.3", ++ "bitcoin", ++ "cbc", ++ "getrandom", ++ "instant", ++ "log", ++ "once_cell", ++ "regex", ++ "serde", ++ "serde_json", ++ "thiserror", ++ "url", ++] ++ ++[[package]] ++name = "nu-ansi-term" ++version = "0.46.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" ++dependencies = [ ++ "overload", ++ "winapi", ++] ++ ++[[package]] ++name = "num" ++version = "0.2.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "b8536030f9fea7127f841b45bb6243b27255787fb4eb83958aa1ef9d2fdc0c36" ++dependencies = [ ++ "num-bigint", ++ "num-complex", ++ "num-integer", ++ "num-iter", ++ "num-rational", ++ "num-traits", ++] ++ ++[[package]] ++name = "num-bigint" ++version = "0.2.6" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" ++dependencies = [ ++ "autocfg 1.1.0", ++ "num-integer", ++ "num-traits", ++] ++ ++[[package]] ++name = "num-complex" ++version = "0.2.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "b6b19411a9719e753aff12e5187b74d60d3dc449ec3f4dc21e3989c3f554bc95" ++dependencies = [ ++ "autocfg 1.1.0", ++ "num-traits", ++] ++ ++[[package]] ++name = "num-integer" ++version = "0.1.45" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" ++dependencies = [ ++ "autocfg 1.1.0", ++ "num-traits", ++] ++ ++[[package]] ++name = "num-iter" ++version = "0.1.43" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" ++dependencies = [ ++ "autocfg 1.1.0", ++ "num-integer", ++ "num-traits", ++] ++ ++[[package]] ++name = "num-rational" ++version = "0.2.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" ++dependencies = [ ++ "autocfg 1.1.0", ++ "num-bigint", ++ "num-integer", ++ "num-traits", ++] ++ ++[[package]] ++name = "num-traits" ++version = "0.2.16" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" ++dependencies = [ ++ "autocfg 1.1.0", ++] ++ ++[[package]] ++name = "num_cpus" ++version = "1.16.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" ++dependencies = [ ++ "hermit-abi", ++ "libc", ++] ++ ++[[package]] ++name = "number_prefix" ++version = "0.4.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" ++ ++[[package]] ++name = "object" ++version = "0.32.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" ++dependencies = [ ++ "memchr", ++] ++ ++[[package]] ++name = "once_cell" ++version = "1.18.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" ++ ++[[package]] ++name = "openssl-probe" ++version = "0.1.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" ++ ++[[package]] ++name = "option-ext" ++version = "0.2.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" ++ ++[[package]] ++name = "ordered-multimap" ++version = "0.3.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "1c672c7ad9ec066e428c00eb917124a06f08db19e2584de982cc34b1f4c12485" ++dependencies = [ ++ "dlv-list", ++ "hashbrown 0.9.1", ++] ++ ++[[package]] ++name = "overload" ++version = "0.1.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" ++ ++[[package]] ++name = "parking" ++version = "2.1.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" ++ ++[[package]] ++name = "parking_lot" ++version = "0.11.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" ++dependencies = [ ++ "instant", ++ "lock_api", ++ "parking_lot_core 0.8.6", ++] ++ ++[[package]] ++name = "parking_lot" ++version = "0.12.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" ++dependencies = [ ++ "lock_api", ++ "parking_lot_core 0.9.8", ++] ++ ++[[package]] ++name = "parking_lot_core" ++version = "0.8.6" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" ++dependencies = [ ++ "cfg-if", ++ "instant", ++ "libc", ++ "redox_syscall 0.2.16", ++ "smallvec", ++ "winapi", ++] ++ ++[[package]] ++name = "parking_lot_core" ++version = "0.9.8" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" ++dependencies = [ ++ "cfg-if", ++ "libc", ++ "redox_syscall 0.3.5", ++ "smallvec", ++ "windows-targets 0.48.5", ++] ++ ++[[package]] ++name = "parse_duration" ++version = "2.1.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "7037e5e93e0172a5a96874380bf73bc6ecef022e26fa25f2be26864d6b3ba95d" ++dependencies = [ ++ "lazy_static", ++ "num", ++ "regex", ++] ++ ++[[package]] ++name = "paste" ++version = "1.0.14" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" ++ ++[[package]] ++name = "pathdiff" ++version = "0.2.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" ++ ++[[package]] ++name = "percent-encoding" ++version = "2.3.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" ++ ++[[package]] ++name = "pest" ++version = "2.7.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d7a4d085fd991ac8d5b05a147b437791b4260b76326baf0fc60cf7c9c27ecd33" ++dependencies = [ ++ "memchr", ++ "thiserror", ++ "ucd-trie", ++] ++ ++[[package]] ++name = "pest_derive" ++version = "2.7.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "a2bee7be22ce7918f641a33f08e3f43388c7656772244e2bbb2477f44cc9021a" ++dependencies = [ ++ "pest", ++ "pest_generator", ++] ++ ++[[package]] ++name = "pest_generator" ++version = "2.7.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d1511785c5e98d79a05e8a6bc34b4ac2168a0e3e92161862030ad84daa223141" ++dependencies = [ ++ "pest", ++ "pest_meta", ++ "proc-macro2", ++ "quote", ++ "syn 2.0.31", ++] ++ ++[[package]] ++name = "pest_meta" ++version = "2.7.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "b42f0394d3123e33353ca5e1e89092e533d2cc490389f2bd6131c43c634ebc5f" ++dependencies = [ ++ "once_cell", ++ "pest", ++ "sha2", ++] ++ ++[[package]] ++name = "petgraph" ++version = "0.6.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" ++dependencies = [ ++ "fixedbitset", ++ "indexmap 2.0.0", ++] ++ ++[[package]] ++name = "pin-project" ++version = "1.1.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" ++dependencies = [ ++ "pin-project-internal", ++] ++ ++[[package]] ++name = "pin-project-internal" ++version = "1.1.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" ++dependencies = [ ++ "proc-macro2", ++ "quote", ++ "syn 2.0.31", ++] ++ ++[[package]] ++name = "pin-project-lite" ++version = "0.2.13" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" ++ ++[[package]] ++name = "pin-utils" ++version = "0.1.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" ++ ++[[package]] ++name = "pkg-config" ++version = "0.3.27" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" ++ ++[[package]] ++name = "polling" ++version = "2.8.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" ++dependencies = [ ++ "autocfg 1.1.0", ++ "bitflags 1.3.2", ++ "cfg-if", ++ "concurrent-queue", ++ "libc", ++ "log", ++ "pin-project-lite", ++ "windows-sys 0.48.0", ++] ++ ++[[package]] ++name = "portable-atomic" ++version = "1.4.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "31114a898e107c51bb1609ffaf55a0e011cf6a4d7f1170d0015a165082c0338b" ++ ++[[package]] ++name = "ppv-lite86" ++version = "0.2.17" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" ++ ++[[package]] ++name = "prettyplease" ++version = "0.1.25" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" ++dependencies = [ ++ "proc-macro2", ++ "syn 1.0.109", ++] ++ ++[[package]] ++name = "proc-macro2" ++version = "1.0.66" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" ++dependencies = [ ++ "unicode-ident", ++] ++ ++[[package]] ++name = "prometheus" ++version = "0.13.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" ++dependencies = [ ++ "cfg-if", ++ "fnv", ++ "lazy_static", ++ "memchr", ++ "parking_lot 0.12.1", ++ "protobuf", ++ "thiserror", ++] ++ ++[[package]] ++name = "prost" ++version = "0.11.9" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" ++dependencies = [ ++ "bytes", ++ "prost-derive", ++] ++ ++[[package]] ++name = "prost-build" ++version = "0.11.9" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" ++dependencies = [ ++ "bytes", ++ "heck", ++ "itertools 0.10.5", ++ "lazy_static", ++ "log", ++ "multimap", ++ "petgraph", ++ "prettyplease", ++ "prost", ++ "prost-types", ++ "regex", ++ "syn 1.0.109", ++ "tempfile", ++ "which", ++] ++ ++[[package]] ++name = "prost-derive" ++version = "0.11.9" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" ++dependencies = [ ++ "anyhow", ++ "itertools 0.10.5", ++ "proc-macro2", ++ "quote", ++ "syn 1.0.109", ++] ++ ++[[package]] ++name = "prost-types" ++version = "0.11.9" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" ++dependencies = [ ++ "prost", ++] ++ ++[[package]] ++name = "protobuf" ++version = "2.28.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" ++ ++[[package]] ++name = "qrcode" ++version = "0.12.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "16d2f1455f3630c6e5107b4f2b94e74d76dea80736de0981fd27644216cff57f" ++dependencies = [ ++ "checked_int_cast", ++] ++ ++[[package]] ++name = "quanta" ++version = "0.9.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "20afe714292d5e879d8b12740aa223c6a88f118af41870e8b6196e39a02238a8" ++dependencies = [ ++ "crossbeam-utils", ++ "libc", ++ "mach", ++ "once_cell", ++ "raw-cpuid", ++ "wasi 0.10.2+wasi-snapshot-preview1", ++ "web-sys", ++ "winapi", ++] ++ ++[[package]] ++name = "quote" ++version = "1.0.33" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" ++dependencies = [ ++ "proc-macro2", ++] ++ ++[[package]] ++name = "r2d2" ++version = "0.8.10" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" ++dependencies = [ ++ "log", ++ "parking_lot 0.12.1", ++ "scheduled-thread-pool", ++] ++ ++[[package]] ++name = "r2d2_sqlite" ++version = "0.19.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "54ca3c9468a76fc2ad724c486a59682fc362efeac7b18d1c012958bc19f34800" ++dependencies = [ ++ "r2d2", ++ "rusqlite", ++] ++ ++[[package]] ++name = "rand" ++version = "0.6.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" ++dependencies = [ ++ "autocfg 0.1.8", ++ "libc", ++ "rand_chacha 0.1.1", ++ "rand_core 0.4.2", ++ "rand_hc", ++ "rand_isaac", ++ "rand_jitter", ++ "rand_os", ++ "rand_pcg", ++ "rand_xorshift", ++ "winapi", ++] ++ ++[[package]] ++name = "rand" ++version = "0.8.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" ++dependencies = [ ++ "libc", ++ "rand_chacha 0.3.1", ++ "rand_core 0.6.4", ++] ++ ++[[package]] ++name = "rand_chacha" ++version = "0.1.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" ++dependencies = [ ++ "autocfg 0.1.8", ++ "rand_core 0.3.1", ++] ++ ++[[package]] ++name = "rand_chacha" ++version = "0.3.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" ++dependencies = [ ++ "ppv-lite86", ++ "rand_core 0.6.4", ++] ++ ++[[package]] ++name = "rand_core" ++version = "0.3.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" ++dependencies = [ ++ "rand_core 0.4.2", ++] ++ ++[[package]] ++name = "rand_core" ++version = "0.4.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" ++ ++[[package]] ++name = "rand_core" ++version = "0.6.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" ++dependencies = [ ++ "getrandom", ++] ++ ++[[package]] ++name = "rand_hc" ++version = "0.1.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" ++dependencies = [ ++ "rand_core 0.3.1", ++] ++ ++[[package]] ++name = "rand_isaac" ++version = "0.1.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" ++dependencies = [ ++ "rand_core 0.3.1", ++] ++ ++[[package]] ++name = "rand_jitter" ++version = "0.1.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" ++dependencies = [ ++ "libc", ++ "rand_core 0.4.2", ++ "winapi", ++] ++ ++[[package]] ++name = "rand_os" ++version = "0.1.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" ++dependencies = [ ++ "cloudabi", ++ "fuchsia-cprng", ++ "libc", ++ "rand_core 0.4.2", ++ "rdrand", ++ "winapi", ++] ++ ++[[package]] ++name = "rand_pcg" ++version = "0.1.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" ++dependencies = [ ++ "autocfg 0.1.8", ++ "rand_core 0.4.2", ++] ++ ++[[package]] ++name = "rand_xorshift" ++version = "0.1.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" ++dependencies = [ ++ "rand_core 0.3.1", ++] ++ ++[[package]] ++name = "raw-cpuid" ++version = "10.7.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" ++dependencies = [ ++ "bitflags 1.3.2", ++] ++ ++[[package]] ++name = "rdrand" ++version = "0.4.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" ++dependencies = [ ++ "rand_core 0.3.1", ++] ++ ++[[package]] ++name = "redox_syscall" ++version = "0.2.16" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" ++dependencies = [ ++ "bitflags 1.3.2", ++] ++ ++[[package]] ++name = "redox_syscall" ++version = "0.3.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" ++dependencies = [ ++ "bitflags 1.3.2", ++] ++ ++[[package]] ++name = "redox_users" ++version = "0.4.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" ++dependencies = [ ++ "getrandom", ++ "redox_syscall 0.2.16", ++ "thiserror", ++] ++ ++[[package]] ++name = "regex" ++version = "1.9.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" ++dependencies = [ ++ "aho-corasick", ++ "memchr", ++ "regex-automata 0.3.8", ++ "regex-syntax 0.7.5", ++] ++ ++[[package]] ++name = "regex-automata" ++version = "0.1.10" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" ++dependencies = [ ++ "regex-syntax 0.6.29", ++] ++ ++[[package]] ++name = "regex-automata" ++version = "0.3.8" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" ++dependencies = [ ++ "aho-corasick", ++ "memchr", ++ "regex-syntax 0.7.5", ++] ++ ++[[package]] ++name = "regex-syntax" ++version = "0.6.29" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" ++ ++[[package]] ++name = "regex-syntax" ++version = "0.7.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" ++ ++[[package]] ++name = "ring" ++version = "0.16.20" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" ++dependencies = [ ++ "cc", ++ "libc", ++ "once_cell", ++ "spin", ++ "untrusted", ++ "web-sys", ++ "winapi", ++] ++ ++[[package]] ++name = "ron" ++version = "0.7.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "88073939a61e5b7680558e6be56b419e208420c2adb92be54921fa6b72283f1a" ++dependencies = [ ++ "base64 0.13.1", ++ "bitflags 1.3.2", ++ "serde", ++] ++ ++[[package]] ++name = "rusqlite" ++version = "0.26.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "4ba4d3462c8b2e4d7f4fcfcf2b296dc6b65404fbbc7b63daa37fd485c149daf7" ++dependencies = [ ++ "bitflags 1.3.2", ++ "fallible-iterator", ++ "fallible-streaming-iterator", ++ "hashlink 0.7.0", ++ "libsqlite3-sys", ++ "memchr", ++ "smallvec", ++] ++ ++[[package]] ++name = "rust-ini" ++version = "0.17.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "63471c4aa97a1cf8332a5f97709a79a4234698de6a1f5087faf66f2dae810e22" ++dependencies = [ ++ "cfg-if", ++ "ordered-multimap", ++] ++ ++[[package]] ++name = "rustc-demangle" ++version = "0.1.23" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" ++ ++[[package]] ++name = "rustix" ++version = "0.37.23" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" ++dependencies = [ ++ "bitflags 1.3.2", ++ "errno", ++ "io-lifetimes", ++ "libc", ++ "linux-raw-sys 0.3.8", ++ "windows-sys 0.48.0", ++] ++ ++[[package]] ++name = "rustix" ++version = "0.38.11" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "c0c3dde1fc030af041adc40e79c0e7fbcf431dd24870053d187d7c66e4b87453" ++dependencies = [ ++ "bitflags 2.4.0", ++ "errno", ++ "libc", ++ "linux-raw-sys 0.4.5", ++ "windows-sys 0.48.0", ++] ++ ++[[package]] ++name = "rustls" ++version = "0.20.9" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "1b80e3dec595989ea8510028f30c408a4630db12c9cbb8de34203b89d6577e99" ++dependencies = [ ++ "log", ++ "ring", ++ "sct", ++ "webpki", ++] ++ ++[[package]] ++name = "rustls" ++version = "0.21.7" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" ++dependencies = [ ++ "log", ++ "ring", ++ "rustls-webpki", ++ "sct", ++] ++ ++[[package]] ++name = "rustls-native-certs" ++version = "0.6.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" ++dependencies = [ ++ "openssl-probe", ++ "rustls-pemfile", ++ "schannel", ++ "security-framework", ++] ++ ++[[package]] ++name = "rustls-pemfile" ++version = "1.0.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" ++dependencies = [ ++ "base64 0.21.3", ++] ++ ++[[package]] ++name = "rustls-webpki" ++version = "0.101.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" ++dependencies = [ ++ "ring", ++ "untrusted", ++] ++ ++[[package]] ++name = "rustversion" ++version = "1.0.14" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" ++ ++[[package]] ++name = "ryu" ++version = "1.0.15" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" ++ ++[[package]] ++name = "schannel" ++version = "0.1.22" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" ++dependencies = [ ++ "windows-sys 0.48.0", ++] ++ ++[[package]] ++name = "scheduled-thread-pool" ++version = "0.2.7" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" ++dependencies = [ ++ "parking_lot 0.12.1", ++] ++ ++[[package]] ++name = "scopeguard" ++version = "1.2.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" ++ ++[[package]] ++name = "sct" ++version = "0.7.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" ++dependencies = [ ++ "ring", ++ "untrusted", ++] ++ ++[[package]] ++name = "secp256k1" ++version = "0.21.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "9c42e6f1735c5f00f51e43e28d6634141f2bcad10931b2609ddd74a86d751260" ++dependencies = [ ++ "bitcoin_hashes 0.10.0", ++ "rand 0.6.5", ++ "secp256k1-sys 0.4.2", ++ "serde", ++] ++ ++[[package]] ++name = "secp256k1" ++version = "0.24.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "6b1629c9c557ef9b293568b338dddfc8208c98a18c59d722a9d53f859d9c9b62" ++dependencies = [ ++ "bitcoin_hashes 0.11.0", ++ "rand 0.8.5", ++ "secp256k1-sys 0.6.1", ++ "serde", ++] ++ ++[[package]] ++name = "secp256k1-sys" ++version = "0.4.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "957da2573cde917463ece3570eab4a0b3f19de6f1646cde62e6fd3868f566036" ++dependencies = [ ++ "cc", ++] ++ ++[[package]] ++name = "secp256k1-sys" ++version = "0.6.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "83080e2c2fc1006e625be82e5d1eb6a43b7fd9578b617fcc55814daf286bba4b" ++dependencies = [ ++ "cc", ++] ++ ++[[package]] ++name = "security-framework" ++version = "2.9.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" ++dependencies = [ ++ "bitflags 1.3.2", ++ "core-foundation", ++ "core-foundation-sys", ++ "libc", ++ "security-framework-sys", ++] ++ ++[[package]] ++name = "security-framework-sys" ++version = "2.9.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" ++dependencies = [ ++ "core-foundation-sys", ++ "libc", ++] ++ ++[[package]] ++name = "serde" ++version = "1.0.188" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" ++dependencies = [ ++ "serde_derive", ++] ++ ++[[package]] ++name = "serde_derive" ++version = "1.0.188" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" ++dependencies = [ ++ "proc-macro2", ++ "quote", ++ "syn 2.0.31", ++] ++ ++[[package]] ++name = "serde_json" ++version = "1.0.105" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360" ++dependencies = [ ++ "indexmap 2.0.0", ++ "itoa", ++ "ryu", ++ "serde", ++] ++ ++[[package]] ++name = "sha-1" ++version = "0.10.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" ++dependencies = [ ++ "cfg-if", ++ "cpufeatures", ++ "digest", ++] ++ ++[[package]] ++name = "sha1" ++version = "0.10.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" ++dependencies = [ ++ "cfg-if", ++ "cpufeatures", ++ "digest", ++] ++ ++[[package]] ++name = "sha2" ++version = "0.10.7" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" ++dependencies = [ ++ "cfg-if", ++ "cpufeatures", ++ "digest", ++] ++ ++[[package]] ++name = "sharded-slab" ++version = "0.1.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" ++dependencies = [ ++ "lazy_static", ++] ++ ++[[package]] ++name = "signal-hook-registry" ++version = "1.4.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" ++dependencies = [ ++ "libc", ++] ++ ++[[package]] ++name = "slab" ++version = "0.4.9" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" ++dependencies = [ ++ "autocfg 1.1.0", ++] ++ ++[[package]] ++name = "smallvec" ++version = "1.11.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" ++ ++[[package]] ++name = "socket2" ++version = "0.4.9" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" ++dependencies = [ ++ "libc", ++ "winapi", ++] ++ ++[[package]] ++name = "socket2" ++version = "0.5.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" ++dependencies = [ ++ "libc", ++ "windows-sys 0.48.0", ++] ++ ++[[package]] ++name = "spin" ++version = "0.5.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" ++ ++[[package]] ++name = "sqlformat" ++version = "0.2.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "6b7b278788e7be4d0d29c0f39497a0eef3fba6bbc8e70d8bf7fde46edeaa9e85" ++dependencies = [ ++ "itertools 0.11.0", ++ "nom", ++ "unicode_categories", ++] ++ ++[[package]] ++name = "sqlx" ++version = "0.6.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "f8de3b03a925878ed54a954f621e64bf55a3c1bd29652d0d1a17830405350188" ++dependencies = [ ++ "sqlx-core", ++ "sqlx-macros", ++] ++ ++[[package]] ++name = "sqlx-core" ++version = "0.6.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "fa8241483a83a3f33aa5fff7e7d9def398ff9990b2752b6c6112b83c6d246029" ++dependencies = [ ++ "ahash 0.7.6", ++ "atoi", ++ "base64 0.13.1", ++ "bitflags 1.3.2", ++ "byteorder", ++ "bytes", ++ "chrono", ++ "crc", ++ "crossbeam-queue", ++ "dirs 4.0.0", ++ "dotenvy", ++ "either", ++ "event-listener", ++ "futures-channel", ++ "futures-core", ++ "futures-intrusive", ++ "futures-util", ++ "hashlink 0.8.4", ++ "hex", ++ "hkdf", ++ "hmac", ++ "indexmap 1.9.3", ++ "itoa", ++ "libc", ++ "log", ++ "md-5", ++ "memchr", ++ "once_cell", ++ "paste", ++ "percent-encoding", ++ "rand 0.8.5", ++ "rustls 0.20.9", ++ "rustls-pemfile", ++ "serde", ++ "serde_json", ++ "sha1", ++ "sha2", ++ "smallvec", ++ "sqlformat", ++ "sqlx-rt", ++ "stringprep", ++ "thiserror", ++ "tokio-stream", ++ "url", ++ "webpki-roots", ++ "whoami", ++] ++ ++[[package]] ++name = "sqlx-macros" ++version = "0.6.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "9966e64ae989e7e575b19d7265cb79d7fc3cbbdf179835cb0d716f294c2049c9" ++dependencies = [ ++ "dotenvy", ++ "either", ++ "heck", ++ "once_cell", ++ "proc-macro2", ++ "quote", ++ "sha2", ++ "sqlx-core", ++ "sqlx-rt", ++ "syn 1.0.109", ++ "url", ++] ++ ++[[package]] ++name = "sqlx-rt" ++version = "0.6.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "804d3f245f894e61b1e6263c84b23ca675d96753b5abfd5cc8597d86806e8024" ++dependencies = [ ++ "once_cell", ++ "tokio", ++ "tokio-rustls 0.23.4", ++] ++ ++[[package]] ++name = "stringprep" ++version = "0.1.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "db3737bde7edce97102e0e2b15365bf7a20bfdb5f60f4f9e8d7004258a51a8da" ++dependencies = [ ++ "unicode-bidi", ++ "unicode-normalization", ++] ++ ++[[package]] ++name = "strsim" ++version = "0.10.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" ++ ++[[package]] ++name = "subtle" ++version = "2.5.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" ++ ++[[package]] ++name = "syn" ++version = "1.0.109" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" ++dependencies = [ ++ "proc-macro2", ++ "quote", ++ "unicode-ident", ++] ++ ++[[package]] ++name = "syn" ++version = "2.0.31" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "718fa2415bcb8d8bd775917a1bf12a7931b6dfa890753378538118181e0cb398" ++dependencies = [ ++ "proc-macro2", ++ "quote", ++ "unicode-ident", ++] ++ ++[[package]] ++name = "sync_wrapper" ++version = "0.1.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" ++ ++[[package]] ++name = "tempfile" ++version = "3.8.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" ++dependencies = [ ++ "cfg-if", ++ "fastrand 2.0.0", ++ "redox_syscall 0.3.5", ++ "rustix 0.38.11", ++ "windows-sys 0.48.0", ++] ++ ++[[package]] ++name = "thiserror" ++version = "1.0.48" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" ++dependencies = [ ++ "thiserror-impl", ++] ++ ++[[package]] ++name = "thiserror-impl" ++version = "1.0.48" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" ++dependencies = [ ++ "proc-macro2", ++ "quote", ++ "syn 2.0.31", ++] ++ ++[[package]] ++name = "thread_local" ++version = "1.1.7" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" ++dependencies = [ ++ "cfg-if", ++ "once_cell", ++] ++ ++[[package]] ++name = "tikv-jemalloc-sys" ++version = "0.5.4+5.3.0-patched" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "9402443cb8fd499b6f327e40565234ff34dbda27460c5b47db0db77443dd85d1" ++dependencies = [ ++ "cc", ++ "libc", ++] ++ ++[[package]] ++name = "tikv-jemallocator" ++version = "0.5.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "965fe0c26be5c56c94e38ba547249074803efd52adfb66de62107d95aab3eaca" ++dependencies = [ ++ "libc", ++ "tikv-jemalloc-sys", ++] ++ ++[[package]] ++name = "time" ++version = "0.1.43" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" ++dependencies = [ ++ "libc", ++ "winapi", ++] ++ ++[[package]] ++name = "time" ++version = "0.3.28" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "17f6bb557fd245c28e6411aa56b6403c689ad95061f50e4be16c274e70a17e48" ++dependencies = [ ++ "deranged", ++ "itoa", ++ "serde", ++ "time-core", ++ "time-macros", ++] ++ ++[[package]] ++name = "time-core" ++version = "0.1.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" ++ ++[[package]] ++name = "time-macros" ++version = "0.2.14" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "1a942f44339478ef67935ab2bbaec2fb0322496cf3cbe84b261e06ac3814c572" ++dependencies = [ ++ "time-core", ++] ++ ++[[package]] ++name = "tinyvec" ++version = "1.6.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" ++dependencies = [ ++ "tinyvec_macros", ++] ++ ++[[package]] ++name = "tinyvec_macros" ++version = "0.1.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" ++ ++[[package]] ++name = "tokio" ++version = "1.32.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" ++dependencies = [ ++ "backtrace", ++ "bytes", ++ "libc", ++ "mio", ++ "num_cpus", ++ "parking_lot 0.12.1", ++ "pin-project-lite", ++ "signal-hook-registry", ++ "socket2 0.5.3", ++ "tokio-macros", ++ "tracing", ++ "windows-sys 0.48.0", ++] ++ ++[[package]] ++name = "tokio-io-timeout" ++version = "1.2.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" ++dependencies = [ ++ "pin-project-lite", ++ "tokio", ++] ++ ++[[package]] ++name = "tokio-macros" ++version = "2.1.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" ++dependencies = [ ++ "proc-macro2", ++ "quote", ++ "syn 2.0.31", ++] ++ ++[[package]] ++name = "tokio-rustls" ++version = "0.23.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" ++dependencies = [ ++ "rustls 0.20.9", ++ "tokio", ++ "webpki", ++] ++ ++[[package]] ++name = "tokio-rustls" ++version = "0.24.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" ++dependencies = [ ++ "rustls 0.21.7", ++ "tokio", ++] ++ ++[[package]] ++name = "tokio-stream" ++version = "0.1.14" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" ++dependencies = [ ++ "futures-core", ++ "pin-project-lite", ++ "tokio", ++] ++ ++[[package]] ++name = "tokio-tungstenite" ++version = "0.17.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" ++dependencies = [ ++ "futures-util", ++ "log", ++ "tokio", ++ "tungstenite", ++] ++ ++[[package]] ++name = "tokio-util" ++version = "0.7.8" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" ++dependencies = [ ++ "bytes", ++ "futures-core", ++ "futures-sink", ++ "pin-project-lite", ++ "tokio", ++ "tracing", ++] ++ ++[[package]] ++name = "toml" ++version = "0.5.11" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" ++dependencies = [ ++ "serde", ++] ++ ++[[package]] ++name = "tonic" ++version = "0.8.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "8f219fad3b929bef19b1f86fbc0358d35daed8f2cac972037ac0dc10bbb8d5fb" ++dependencies = [ ++ "async-stream", ++ "async-trait", ++ "axum", ++ "base64 0.13.1", ++ "bytes", ++ "futures-core", ++ "futures-util", ++ "h2", ++ "http", ++ "http-body", ++ "hyper", ++ "hyper-timeout", ++ "percent-encoding", ++ "pin-project", ++ "prost", ++ "prost-derive", ++ "tokio", ++ "tokio-stream", ++ "tokio-util", ++ "tower", ++ "tower-layer", ++ "tower-service", ++ "tracing", ++ "tracing-futures", ++] ++ ++[[package]] ++name = "tonic" ++version = "0.9.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" ++dependencies = [ ++ "async-trait", ++ "axum", ++ "base64 0.21.3", ++ "bytes", ++ "futures-core", ++ "futures-util", ++ "h2", ++ "http", ++ "http-body", ++ "hyper", ++ "hyper-timeout", ++ "percent-encoding", ++ "pin-project", ++ "prost", ++ "tokio", ++ "tokio-stream", ++ "tower", ++ "tower-layer", ++ "tower-service", ++ "tracing", ++] ++ ++[[package]] ++name = "tonic-build" ++version = "0.8.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "5bf5e9b9c0f7e0a7c027dcfaba7b2c60816c7049171f679d99ee2ff65d0de8c4" ++dependencies = [ ++ "prettyplease", ++ "proc-macro2", ++ "prost-build", ++ "quote", ++ "syn 1.0.109", ++] ++ ++[[package]] ++name = "tower" ++version = "0.4.13" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" ++dependencies = [ ++ "futures-core", ++ "futures-util", ++ "indexmap 1.9.3", ++ "pin-project", ++ "pin-project-lite", ++ "rand 0.8.5", ++ "slab", ++ "tokio", ++ "tokio-util", ++ "tower-layer", ++ "tower-service", ++ "tracing", ++] ++ ++[[package]] ++name = "tower-layer" ++version = "0.3.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" ++ ++[[package]] ++name = "tower-service" ++version = "0.3.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" ++ ++[[package]] ++name = "tracing" ++version = "0.1.37" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" ++dependencies = [ ++ "cfg-if", ++ "pin-project-lite", ++ "tracing-attributes", ++ "tracing-core", ++] ++ ++[[package]] ++name = "tracing-appender" ++version = "0.2.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e" ++dependencies = [ ++ "crossbeam-channel", ++ "time 0.3.28", ++ "tracing-subscriber", ++] ++ ++[[package]] ++name = "tracing-attributes" ++version = "0.1.26" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" ++dependencies = [ ++ "proc-macro2", ++ "quote", ++ "syn 2.0.31", ++] ++ ++[[package]] ++name = "tracing-core" ++version = "0.1.31" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" ++dependencies = [ ++ "once_cell", ++ "valuable", ++] ++ ++[[package]] ++name = "tracing-futures" ++version = "0.2.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" ++dependencies = [ ++ "pin-project", ++ "tracing", ++] ++ ++[[package]] ++name = "tracing-log" ++version = "0.1.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" ++dependencies = [ ++ "lazy_static", ++ "log", ++ "tracing-core", ++] ++ ++[[package]] ++name = "tracing-subscriber" ++version = "0.3.17" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" ++dependencies = [ ++ "matchers", ++ "nu-ansi-term", ++ "once_cell", ++ "regex", ++ "sharded-slab", ++ "smallvec", ++ "thread_local", ++ "tracing", ++ "tracing-core", ++ "tracing-log", ++] ++ ++[[package]] ++name = "try-lock" ++version = "0.2.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" ++ ++[[package]] ++name = "tungstenite" ++version = "0.17.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" ++dependencies = [ ++ "base64 0.13.1", ++ "byteorder", ++ "bytes", ++ "http", ++ "httparse", ++ "log", ++ "rand 0.8.5", ++ "sha-1", ++ "thiserror", ++ "url", ++ "utf-8", ++] ++ ++[[package]] ++name = "typenum" ++version = "1.16.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" ++ ++[[package]] ++name = "ucd-trie" ++version = "0.1.6" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" ++ ++[[package]] ++name = "unicode-bidi" ++version = "0.3.13" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" ++ ++[[package]] ++name = "unicode-ident" ++version = "1.0.11" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" ++ ++[[package]] ++name = "unicode-normalization" ++version = "0.1.22" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" ++dependencies = [ ++ "tinyvec", ++] ++ ++[[package]] ++name = "unicode-segmentation" ++version = "1.10.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" ++ ++[[package]] ++name = "unicode-width" ++version = "0.1.10" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" ++ ++[[package]] ++name = "unicode-xid" ++version = "0.2.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" ++ ++[[package]] ++name = "unicode_categories" ++version = "0.1.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" ++ ++[[package]] ++name = "untrusted" ++version = "0.7.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" ++ ++[[package]] ++name = "url" ++version = "2.4.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" ++dependencies = [ ++ "form_urlencoded", ++ "idna", ++ "percent-encoding", ++ "serde", ++] ++ ++[[package]] ++name = "utf-8" ++version = "0.7.6" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" ++ ++[[package]] ++name = "utf8parse" ++version = "0.2.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" ++ ++[[package]] ++name = "uuid" ++version = "1.4.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" ++dependencies = [ ++ "getrandom", ++] ++ ++[[package]] ++name = "valuable" ++version = "0.1.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" ++ ++[[package]] ++name = "value-bag" ++version = "1.4.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d92ccd67fb88503048c01b59152a04effd0782d035a83a6d256ce6085f08f4a3" ++ ++[[package]] ++name = "vcpkg" ++version = "0.2.15" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" ++ ++[[package]] ++name = "version_check" ++version = "0.9.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" ++ ++[[package]] ++name = "waker-fn" ++version = "1.1.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" ++ ++[[package]] ++name = "want" ++version = "0.3.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" ++dependencies = [ ++ "try-lock", ++] ++ ++[[package]] ++name = "wasi" ++version = "0.10.2+wasi-snapshot-preview1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" ++ ++[[package]] ++name = "wasi" ++version = "0.11.0+wasi-snapshot-preview1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" ++ ++[[package]] ++name = "wasm-bindgen" ++version = "0.2.87" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" ++dependencies = [ ++ "cfg-if", ++ "wasm-bindgen-macro", ++] ++ ++[[package]] ++name = "wasm-bindgen-backend" ++version = "0.2.87" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" ++dependencies = [ ++ "bumpalo", ++ "log", ++ "once_cell", ++ "proc-macro2", ++ "quote", ++ "syn 2.0.31", ++ "wasm-bindgen-shared", ++] ++ ++[[package]] ++name = "wasm-bindgen-futures" ++version = "0.4.37" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" ++dependencies = [ ++ "cfg-if", ++ "js-sys", ++ "wasm-bindgen", ++ "web-sys", ++] ++ ++[[package]] ++name = "wasm-bindgen-macro" ++version = "0.2.87" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" ++dependencies = [ ++ "quote", ++ "wasm-bindgen-macro-support", ++] ++ ++[[package]] ++name = "wasm-bindgen-macro-support" ++version = "0.2.87" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" ++dependencies = [ ++ "proc-macro2", ++ "quote", ++ "syn 2.0.31", ++ "wasm-bindgen-backend", ++ "wasm-bindgen-shared", ++] ++ ++[[package]] ++name = "wasm-bindgen-shared" ++version = "0.2.87" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" ++ ++[[package]] ++name = "web-sys" ++version = "0.3.64" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" ++dependencies = [ ++ "js-sys", ++ "wasm-bindgen", ++] ++ ++[[package]] ++name = "webpki" ++version = "0.22.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "f0e74f82d49d545ad128049b7e88f6576df2da6b02e9ce565c6f533be576957e" ++dependencies = [ ++ "ring", ++ "untrusted", ++] ++ ++[[package]] ++name = "webpki-roots" ++version = "0.22.6" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" ++dependencies = [ ++ "webpki", ++] ++ ++[[package]] ++name = "which" ++version = "4.4.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "8ad25fe5717e59ada8ea33511bbbf7420b11031730a24c65e82428766c307006" ++dependencies = [ ++ "dirs 5.0.1", ++ "either", ++ "once_cell", ++ "rustix 0.38.11", ++] ++ ++[[package]] ++name = "whoami" ++version = "1.4.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" ++dependencies = [ ++ "wasm-bindgen", ++ "web-sys", ++] ++ ++[[package]] ++name = "winapi" ++version = "0.3.9" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" ++dependencies = [ ++ "winapi-i686-pc-windows-gnu", ++ "winapi-x86_64-pc-windows-gnu", ++] ++ ++[[package]] ++name = "winapi-i686-pc-windows-gnu" ++version = "0.4.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" ++ ++[[package]] ++name = "winapi-x86_64-pc-windows-gnu" ++version = "0.4.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" ++ ++[[package]] ++name = "windows" ++version = "0.48.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" ++dependencies = [ ++ "windows-targets 0.48.5", ++] ++ ++[[package]] ++name = "windows-sys" ++version = "0.45.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" ++dependencies = [ ++ "windows-targets 0.42.2", ++] ++ ++[[package]] ++name = "windows-sys" ++version = "0.48.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" ++dependencies = [ ++ "windows-targets 0.48.5", ++] ++ ++[[package]] ++name = "windows-targets" ++version = "0.42.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" ++dependencies = [ ++ "windows_aarch64_gnullvm 0.42.2", ++ "windows_aarch64_msvc 0.42.2", ++ "windows_i686_gnu 0.42.2", ++ "windows_i686_msvc 0.42.2", ++ "windows_x86_64_gnu 0.42.2", ++ "windows_x86_64_gnullvm 0.42.2", ++ "windows_x86_64_msvc 0.42.2", ++] ++ ++[[package]] ++name = "windows-targets" ++version = "0.48.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" ++dependencies = [ ++ "windows_aarch64_gnullvm 0.48.5", ++ "windows_aarch64_msvc 0.48.5", ++ "windows_i686_gnu 0.48.5", ++ "windows_i686_msvc 0.48.5", ++ "windows_x86_64_gnu 0.48.5", ++ "windows_x86_64_gnullvm 0.48.5", ++ "windows_x86_64_msvc 0.48.5", ++] ++ ++[[package]] ++name = "windows_aarch64_gnullvm" ++version = "0.42.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" ++ ++[[package]] ++name = "windows_aarch64_gnullvm" ++version = "0.48.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" ++ ++[[package]] ++name = "windows_aarch64_msvc" ++version = "0.42.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" ++ ++[[package]] ++name = "windows_aarch64_msvc" ++version = "0.48.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" ++ ++[[package]] ++name = "windows_i686_gnu" ++version = "0.42.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" ++ ++[[package]] ++name = "windows_i686_gnu" ++version = "0.48.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" ++ ++[[package]] ++name = "windows_i686_msvc" ++version = "0.42.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" ++ ++[[package]] ++name = "windows_i686_msvc" ++version = "0.48.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" ++ ++[[package]] ++name = "windows_x86_64_gnu" ++version = "0.42.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" ++ ++[[package]] ++name = "windows_x86_64_gnu" ++version = "0.48.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" ++ ++[[package]] ++name = "windows_x86_64_gnullvm" ++version = "0.42.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" ++ ++[[package]] ++name = "windows_x86_64_gnullvm" ++version = "0.48.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" ++ ++[[package]] ++name = "windows_x86_64_msvc" ++version = "0.42.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" ++ ++[[package]] ++name = "windows_x86_64_msvc" ++version = "0.48.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" ++ ++[[package]] ++name = "yaml-rust" ++version = "0.4.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" ++dependencies = [ ++ "linked-hash-map", ++] +diff --git a/Cargo.toml b/Cargo.toml +new file mode 100644 +index 0000000..dd1368b +--- /dev/null ++++ b/Cargo.toml +@@ -0,0 +1,68 @@ ++[workspace]##exclude from gnostr-workspace ++[package] ++name = "gnostr-relay" ++version = "0.0.2" ++edition = "2021" ++authors = ["Greg Heartsfield ", "gnostr "] ++description = "gnostr:git+nostr protocol relay" ++readme = "README.md" ++homepage = "https://gnostr.org" ++repository = "git@github.com:gnostr-org/gnostr-relay.git" ++license = "MIT" ++keywords = ["nostr", "server", "gnostr", "git"] ++categories = ["network-programming", "web-programming", "git", "gnostr"] ++ ++[dependencies] ++clap = { version = "4.0.32", features = ["env", "default", "derive"]} ++tracing = "0.1.37" ++tracing-appender = "0.2.2" ++tracing-subscriber = "0.3.16" ++tokio = { version = "1", features = ["full", "tracing", "signal"] } ++prost = "0.11" ++tonic = "0.8.3" ++console-subscriber = "0.1.8" ++futures = "0.3" ++futures-util = "0.3" ++tokio-tungstenite = "0.17" ++tungstenite = "0.17" ++thiserror = "1" ++uuid = { version = "1.1.2", features = ["v4"] } ++config = { version = "0.12", features = ["toml"] } ++bitcoin_hashes = { version = "0.10", features = ["serde"] } ++secp256k1 = {version = "0.21", features = ["rand", "rand-std", "serde", "bitcoin_hashes"] } ++serde = { version = "1.0", features = ["derive"] } ++serde_json = {version = "1.0", features = ["preserve_order"]} ++hex = "0.4" ++rusqlite = { version = "0.26", features = ["limits","bundled","modern_sqlite", "trace"]} ++r2d2 = "0.8" ++r2d2_sqlite = "0.19" ++lazy_static = "1.4" ++governor = "0.4" ++nonzero_ext = "0.3" ++hyper = { version="0.14", features=["client", "server","http1","http2","tcp"] } ++hyper-rustls = { version = "0.24" } ++http = { version = "0.2" } ++parse_duration = "2" ++rand = "0.8" ++const_format = "0.2.28" ++regex = "1" ++async-trait = "0.1.60" ++async-std = "1.12.0" ++sqlx = { version ="0.6.2", features=["runtime-tokio-rustls", "postgres", "chrono"]} ++chrono = "0.4.23" ++prometheus = "0.13.3" ++indicatif = "0.17.3" ++bech32 = "0.9.1" ++url = "2.3.1" ++qrcode = { version = "0.12.0", default-features = false, features = ["svg"] } ++nostr = { version = "0.18.0", default-features = false, features = ["base", "nip04", "nip19"] } ++dirs = "5.0.1" ++[target.'cfg(not(target_env = "msvc"))'.dependencies] ++tikv-jemallocator = "0.5" ++log = "0.4" ++ ++[dev-dependencies] ++anyhow = "1" ++ ++[build-dependencies] ++tonic-build = { version="0.8.3", features = ["prost"] } +diff --git a/Dockerfile b/Dockerfile +new file mode 100644 +index 0000000..0b0083e +--- /dev/null ++++ b/Dockerfile +@@ -0,0 +1,52 @@ ++FROM docker.io/library/rust:1-bookworm as builder ++RUN apt-get update \ ++ && apt-get install -y cmake protobuf-compiler \ ++ && rm -rf /var/lib/apt/lists/* ++RUN USER=root cargo install cargo-auditable ++RUN USER=root cargo new --bin nostr-rs-relay ++WORKDIR ./nostr-rs-relay ++COPY ./Cargo.toml ./Cargo.toml ++COPY ./Cargo.lock ./Cargo.lock ++# build dependencies only (caching) ++RUN cargo auditable build --release --locked ++# get rid of starter project code ++RUN rm src/*.rs ++ ++# copy project source code ++COPY ./src ./src ++COPY ./proto ./proto ++COPY ./build.rs ./build.rs ++ ++# build auditable release using locked deps ++RUN rm ./target/release/deps/nostr*relay* ++RUN cargo auditable build --release --locked ++ ++FROM docker.io/library/debian:bookworm-slim ++ ++ARG APP=/usr/src/app ++ARG APP_DATA=/usr/src/app/db ++RUN apt-get update \ ++ && apt-get install -y ca-certificates tzdata sqlite3 libc6 \ ++ && rm -rf /var/lib/apt/lists/* ++ ++EXPOSE 8080 ++ ++ENV TZ=Etc/UTC \ ++ APP_USER=appuser ++ ++RUN groupadd $APP_USER \ ++ && useradd -g $APP_USER $APP_USER \ ++ && mkdir -p ${APP} \ ++ && mkdir -p ${APP_DATA} ++ ++COPY --from=builder /nostr-rs-relay/target/release/nostr-rs-relay ${APP}/nostr-rs-relay ++ ++RUN chown -R $APP_USER:$APP_USER ${APP} ++ ++USER $APP_USER ++WORKDIR ${APP} ++ ++ENV RUST_LOG=info,nostr_rs_relay=info ++ENV APP_DATA=${APP_DATA} ++ ++CMD ./nostr-rs-relay --db ${APP_DATA} +diff --git a/GNUmakefile b/GNUmakefile +new file mode 100644 +index 0000000..e9e278d +--- /dev/null ++++ b/GNUmakefile +@@ -0,0 +1,54 @@ ++##make gnostr-test port= ++ifneq ($(port),) ++PORT :=$(port) ++else ++PORT :=8080 ++endif ++export PORT ++ ++.PHONY:- help ++-: ++ @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?##/ {printf "\033[36m%-15s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) ++ @echo ++more:## more help ++ @sed -n 's/^##//p' ${MAKEFILE_LIST} | column -t -s ':' | sed -e 's/^/ /' ++ #$(MAKE) -f Makefile help ++ ++-include Makefile ++ ++gnostr-test:## gnostr-test ++ @$(shell which gnostr-relay) -p $(PORT) 2>/tmp/gnostr-relay || true & ++ @echo $(shell which gnostr-relay) ++ @echo $(shell which gnostr) ++ @echo $(shell which gnostr-weeble) ++ @echo $(shell which gnostr-post-event) ++ gnostr \ ++ --sec $(shell gnostr-sha256 $(shell gnostr-weeble)) \ ++ -t gnostr \ ++ --tag weeble $(shell gnostr-weeble) \ ++ --tag wobble $(shell gnostr-wobble) \ ++ --tag blockheight $(shell gnostr-blockheight) \ ++ --content "$(shell gnostr-weeble)" \ ++ | gnostr-post-event ws://0.0.0.0:$(PORT) ++ ++ ++gnostr-relay-list:## gnostr-relay-list ++ @echo $(shell which gnostr) ++ @echo $(shell which gnostr-weeble) ++ @echo $(shell which gnostr-post-event) ++ gnostr \ ++ --sec $(shell gnostr-sha256 $(shell gnostr-weeble)) \ ++ -t gnostr \ ++ --tag weeble $(shell gnostr-weeble) \ ++ --tag wobble $(shell gnostr-wobble) \ ++ --tag blockheight $(shell gnostr-blockheight) \ ++ --content "" \ ++ | gnostr-post-event ws://0.0.0.0:$(PORT) ++ ++gnostr-test-proxy:## gnsotr-test-proxy ++ @echo $(shell which gnostr) ++ @echo $(shell which gnostr-weeble) ++ @echo $(shell which gnostr-post-event) ++ gnostr --sec $(shell gnostr-sha256 $(shell gnostr-weeble)) | gnostr-post-event ws://0.0.0.0:$(PORT) ++ ++-include cargo.mk +diff --git a/LICENSE b/LICENSE +new file mode 100644 +index 0000000..4ef47c1 +--- /dev/null ++++ b/LICENSE +@@ -0,0 +1,21 @@ ++The MIT License (MIT) ++ ++Copyright (c) 2021 Greg Heartsfield ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ++THE SOFTWARE. +diff --git a/README.md b/README.md +new file mode 100644 +index 0000000..5fbd233 +--- /dev/null ++++ b/README.md +@@ -0,0 +1,167 @@ ++# [nostr-rs-relay](https://git.sr.ht/~gheartsfield/nostr-rs-relay) ++ ++This is a [nostr](https://github.com/nostr-protocol/nostr) relay, ++written in Rust. It currently supports the entire relay protocol, and ++persists data with SQLite. There is experimental support for ++Postgresql. ++ ++The project master repository is available on ++[sourcehut](https://sr.ht/~gheartsfield/nostr-rs-relay/), and is ++mirrored on [GitHub](https://github.com/scsibug/nostr-rs-relay). ++ ++[![builds.sr.ht status](https://builds.sr.ht/~gheartsfield/nostr-rs-relay/commits/master.svg)](https://builds.sr.ht/~gheartsfield/nostr-rs-relay/commits/master?) ++ ++![Github CI](https://github.com/schlunsen/nostr-rs-relay/actions/workflows/ci.yml/badge.svg) ++ ++ ++## Features ++ ++[NIPs](https://github.com/nostr-protocol/nips) with a relay-specific implementation are listed here. ++ ++- [x] NIP-01: [Basic protocol flow description](https://github.com/nostr-protocol/nips/blob/master/01.md) ++ * Core event model ++ * Hide old metadata events ++ * Id/Author prefix search ++- [x] NIP-02: [Contact List and Petnames](https://github.com/nostr-protocol/nips/blob/master/02.md) ++- [ ] NIP-03: [OpenTimestamps Attestations for Events](https://github.com/nostr-protocol/nips/blob/master/03.md) ++- [x] NIP-05: [Mapping Nostr keys to DNS-based internet identifiers](https://github.com/nostr-protocol/nips/blob/master/05.md) ++- [x] NIP-09: [Event Deletion](https://github.com/nostr-protocol/nips/blob/master/09.md) ++- [x] NIP-11: [Relay Information Document](https://github.com/nostr-protocol/nips/blob/master/11.md) ++- [x] NIP-12: [Generic Tag Queries](https://github.com/nostr-protocol/nips/blob/master/12.md) ++- [x] NIP-15: [End of Stored Events Notice](https://github.com/nostr-protocol/nips/blob/master/15.md) ++- [x] NIP-16: [Event Treatment](https://github.com/nostr-protocol/nips/blob/master/16.md) ++- [x] NIP-20: [Command Results](https://github.com/nostr-protocol/nips/blob/master/20.md) ++- [x] NIP-22: [Event `created_at` limits](https://github.com/nostr-protocol/nips/blob/master/22.md) (_future-dated events only_) ++- [ ] NIP-26: [Event Delegation](https://github.com/nostr-protocol/nips/blob/master/26.md) (_implemented, but currently disabled_) ++- [x] NIP-28: [Public Chat](https://github.com/nostr-protocol/nips/blob/master/28.md) ++- [x] NIP-33: [Parameterized Replaceable Events](https://github.com/nostr-protocol/nips/blob/master/33.md) ++- [x] NIP-40: [Expiration Timestamp](https://github.com/nostr-protocol/nips/blob/master/40.md) ++- [x] NIP-42: [Authentication of clients to relays](https://github.com/nostr-protocol/nips/blob/master/42.md) ++ ++## Quick Start ++ ++The provided `Dockerfile` will compile and build the server ++application. Use a bind mount to store the SQLite database outside of ++the container image, and map the container's 8080 port to a host port ++(7000 in the example below). ++ ++The examples below start a rootless podman container, mapping a local ++data directory and config file. ++ ++```console ++$ podman build -t nostr-rs-relay . ++ ++$ mkdir data ++ ++$ podman unshare chown 100:100 data ++ ++$ podman run -it --rm -p 7000:8080 \ ++ --user=100:100 \ ++ -v $(pwd)/data:/usr/src/app/db:Z \ ++ -v $(pwd)/config.toml:/usr/src/app/config.toml:ro,Z \ ++ --name nostr-relay nostr-rs-relay:latest ++ ++Nov 19 15:31:15.013 INFO nostr_rs_relay: Starting up from main ++Nov 19 15:31:15.017 INFO nostr_rs_relay::server: listening on: 0.0.0.0:8080 ++Nov 19 15:31:15.019 INFO nostr_rs_relay::server: db writer created ++Nov 19 15:31:15.019 INFO nostr_rs_relay::server: control message listener started ++Nov 19 15:31:15.019 INFO nostr_rs_relay::db: Built a connection pool "event writer" (min=1, max=4) ++Nov 19 15:31:15.019 INFO nostr_rs_relay::db: opened database "/usr/src/app/db/nostr.db" for writing ++Nov 19 15:31:15.019 INFO nostr_rs_relay::schema: DB version = 0 ++Nov 19 15:31:15.054 INFO nostr_rs_relay::schema: database pragma/schema initialized to v7, and ready ++Nov 19 15:31:15.054 INFO nostr_rs_relay::schema: All migration scripts completed successfully. Welcome to v7. ++Nov 19 15:31:15.521 INFO nostr_rs_relay::db: Built a connection pool "client query" (min=4, max=128) ++``` ++ ++Use a `nostr` client such as ++[`noscl`](https://github.com/fiatjaf/noscl) to publish and query ++events. ++ ++```console ++$ noscl publish "hello world" ++Sent to 'ws://localhost:8090'. ++Seen it on 'ws://localhost:8090'. ++$ noscl home ++Text Note [81cf...2652] from 296a...9b92 5 seconds ago ++ hello world ++``` ++ ++A pre-built container is also available on DockerHub: ++https://hub.docker.com/r/scsibug/nostr-rs-relay ++ ++## Build and Run (without Docker) ++ ++Building `nostr-rs-relay` requires an installation of Cargo & Rust: https://www.rust-lang.org/tools/install ++ ++The following OS packages will be helpful; on Debian/Ubuntu: ++```console ++$ sudo apt-get install build-essential cmake protobuf-compiler pkg-config libssl-dev ++``` ++ ++Clone this repository, and then build a release version of the relay: ++ ++```console ++$ git clone -q https://git.sr.ht/\~gheartsfield/nostr-rs-relay ++$ cd nostr-rs-relay ++$ cargo build -q -r ++``` ++ ++The relay executable is now located in ++`target/release/nostr-rs-relay`. In order to run it with logging ++enabled, execute it with the `RUST_LOG` variable set: ++ ++```console ++$ RUST_LOG=warn,nostr_rs_relay=info ./target/release/nostr-rs-relay ++Dec 26 10:31:56.455 INFO nostr_rs_relay: Starting up from main ++Dec 26 10:31:56.464 INFO nostr_rs_relay::server: listening on: 0.0.0.0:8080 ++Dec 26 10:31:56.466 INFO nostr_rs_relay::server: db writer created ++Dec 26 10:31:56.466 INFO nostr_rs_relay::db: Built a connection pool "event writer" (min=1, max=2) ++Dec 26 10:31:56.466 INFO nostr_rs_relay::db: opened database "./nostr.db" for writing ++Dec 26 10:31:56.466 INFO nostr_rs_relay::schema: DB version = 11 ++Dec 26 10:31:56.467 INFO nostr_rs_relay::db: Built a connection pool "maintenance writer" (min=1, max=2) ++Dec 26 10:31:56.467 INFO nostr_rs_relay::server: control message listener started ++Dec 26 10:31:56.468 INFO nostr_rs_relay::db: Built a connection pool "client query" (min=4, max=8) ++``` ++ ++You now have a running relay, on port `8080`. Use a `nostr` client or ++`websocat` to connect and send/query for events. ++ ++## Configuration ++ ++The sample [`config.toml`](config.toml) file demonstrates the ++configuration available to the relay. This file is optional, but may ++be mounted into a docker container like so: ++ ++```console ++$ docker run -it -p 7000:8080 \ ++ --mount src=$(pwd)/config.toml,target=/usr/src/app/config.toml,type=bind \ ++ --mount src=$(pwd)/data,target=/usr/src/app/db,type=bind \ ++ nostr-rs-relay ++``` ++ ++Options include rate-limiting, event size limits, and network address ++settings. ++ ++## Reverse Proxy Configuration ++ ++For examples of putting the relay behind a reverse proxy (for TLS ++termination, load balancing, and other features), see [Reverse ++Proxy](docs/reverse-proxy.md). ++ ++## Dev Channel ++ ++For development discussions, please feel free to use the [sourcehut ++mailing list](https://lists.sr.ht/~gheartsfield/nostr-rs-relay-devel). ++Or, drop by the [Nostr Telegram Channel](https://t.me/nostr_protocol). ++ ++To chat about `nostr-rs-relay` on `nostr` itself; visit our channel on [anigma](https://anigma.io/) or another client that supports [NIP-28](https://github.com/nostr-protocol/nips/blob/master/28.md) chats: ++ * `2ad246a094fee48c6e455dd13d759d5f41b5a233120f5719d81ebc1935075194` ++ ++License ++--- ++This project is MIT licensed. ++ ++External Documentation and Links ++--- ++ ++* [BlockChainCaffe's Nostr Relay Setup Guide](https://github.com/BlockChainCaffe/Nostr-Relay-Setup-Guide) +diff --git a/build.rs b/build.rs +new file mode 100644 +index 0000000..2dc88f9 +--- /dev/null ++++ b/build.rs +@@ -0,0 +1,7 @@ ++fn main() -> Result<(), Box> { ++ tonic_build::configure() ++ .build_server(false) ++ .protoc_arg("--experimental_allow_proto3_optional") ++ .compile(&["proto/nauthz.proto"], &["proto"])?; ++ Ok(()) ++} +diff --git a/cargo.mk b/cargo.mk +new file mode 100755 +index 0000000..d2e1869 +--- /dev/null ++++ b/cargo.mk +@@ -0,0 +1,35 @@ ++## ++##make cargo-* ++cargo-help:### cargo-help ++ @awk 'BEGIN {FS = ":.*?### "} /^[a-zA-Z_-]+:.*?### / {printf "\033[36m%-15s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) ++cargo-b:cargo-build### cargo b ++cargo-build:### cargo build ++## make cargo-build q=true ++ @. $(HOME)/.cargo/env ++ @RUST_BACKTRACE=all cargo b $(QUIET) ++cargo-i:cargo-install ++cargo-install:### cargo install --path . ++#@. $(HOME)/.cargo/env ++ @cargo install --force --path $(PWD) ++ #@cargo install --locked --path $(PWD) ++cargo-br:cargo-build-release### cargo-br ++## make cargo-br q=true ++cargo-build-release:### cargo-build-release ++## make cargo-build-release q=true ++ @. $(HOME)/.cargo/env ++ @cargo b --release $(QUIET) ++cargo-check:### cargo-check ++ @. $(HOME)/.cargo/env ++ @cargo c ++cargo-bench:### cargo-bench ++ @. $(HOME)/.cargo/env ++ @cargo bench ++cargo-test:### cargo-test ++ @. $(HOME)/.cargo/env ++ @cargo test ++cargo-report:### cargo-report ++ @. $(HOME)/.cargo/env ++ cargo report future-incompatibilities --id 1 ++ ++# vim: set noexpandtab: ++# vim: set setfiletype make +diff --git a/config.toml b/config.toml +new file mode 100644 +index 0000000..d5dbabf +--- /dev/null ++++ b/config.toml +@@ -0,0 +1,234 @@ ++# Nostr-rs-relay configuration ++ ++[info] ++# The advertised URL for the Nostr websocket. ++relay_url = "wss://relay.gnostr.org/" ++ ++# Relay information for clients. Put your unique server name here. ++name = "relay.gnostr.org" ++ ++# Description ++description = "gnostr: a git+nostr utility" ++ ++# Administrative contact pubkey ++#pubkey = "0c2d168a4ae8ca58c9f1ab237b5df682599c6c7ab74307ea8b05684b60405d41" ++ ++# Administrative contact URI ++#contact = "mailto:contact@example.com" ++ ++# Favicon location. Relative to the current directory. Assumes an ++# ICO format. ++#favicon = "favicon.ico" ++ ++# URL of Relay's icon. ++#relay_icon = "https://example.test/img.png" ++ ++[diagnostics] ++# Enable tokio tracing (for use with tokio-console) ++#tracing = false ++ ++[database] ++# Database engine (sqlite/postgres). Defaults to sqlite. ++# Support for postgres is currently experimental. ++#engine = "sqlite" ++ ++# Directory for SQLite files. Defaults to the current directory. Can ++# also be specified (and overriden) with the "--db dirname" command ++# line option. ++#data_directory = "." ++ ++# Use an in-memory database instead of 'nostr.db'. ++# Requires sqlite engine. ++# Caution; this will not survive a process restart! ++#in_memory = false ++ ++# Database connection pool settings for subscribers: ++ ++# Minimum number of SQLite reader connections ++#min_conn = 0 ++ ++# Maximum number of SQLite reader connections. Recommend setting this ++# to approx the number of cores. ++#max_conn = 8 ++ ++# Database connection string. Required for postgres; not used for ++# sqlite. ++#connection = "postgresql://postgres:nostr@localhost:7500/nostr" ++ ++# Optional database connection string for writing. Use this for ++# postgres clusters where you want to separate reads and writes to ++# different nodes. Ignore for single-database instances. ++#connection_write = "postgresql://postgres:nostr@localhost:7500/nostr" ++ ++[logging] ++# Directory to store log files. Log files roll over daily. ++folder_path = "./log" ++file_prefix = "gnostr-relay" ++ ++[grpc] ++# gRPC interfaces for externalized decisions and other extensions to ++# functionality. ++# ++# Events can be authorized through an external service, by providing ++# the URL below. In the event the server is not accessible, events ++# will be permitted. The protobuf3 schema used is available in ++# `proto/nauthz.proto`. ++# event_admission_server = "http://[::1]:50051" ++ ++[network] ++# Bind to this network address ++address = "0.0.0.0" ++ ++# Listen on this port ++port = 8080 ++ ++# If present, read this HTTP header for logging client IP addresses. ++# Examples for common proxies, cloudflare: ++#remote_ip_header = "x-forwarded-for" ++#remote_ip_header = "cf-connecting-ip" ++ ++# Websocket ping interval in seconds, defaults to 5 minutes ++#ping_interval = 300 ++ ++[options] ++# Reject events that have timestamps greater than this many seconds in ++# the future. Recommended to reject anything greater than 30 minutes ++# from the current time, but the default is to allow any date. ++reject_future_seconds = 1800 ++ ++[limits] ++# Limit events created per second, averaged over one minute. Must be ++# an integer. If not set (or set to 0), there is no limit. Note: ++# this is for the server as a whole, not per-connection. ++# ++# Limiting event creation is highly recommended if your relay is ++# public! ++# ++#messages_per_sec = 5 ++ ++# Limit client subscriptions created, averaged over one minute. Must ++# be an integer. If not set (or set to 0), defaults to unlimited. ++# Strongly recommended to set this to a low value such as 10 to ensure ++# fair service. ++#subscriptions_per_min = 0 ++ ++# UNIMPLEMENTED... ++# Limit how many concurrent database connections a client can have. ++# This prevents a single client from starting too many expensive ++# database queries. Must be an integer. If not set (or set to 0), ++# defaults to unlimited (subject to subscription limits). ++#db_conns_per_client = 0 ++ ++# Limit blocking threads used for database connections. Defaults to 16. ++#max_blocking_threads = 16 ++ ++# Limit the maximum size of an EVENT message. Defaults to 128 KB. ++# Set to 0 for unlimited. ++#max_event_bytes = 131072 ++ ++# Maximum WebSocket message in bytes. Defaults to 128 KB. ++#max_ws_message_bytes = 131072 ++ ++# Maximum WebSocket frame size in bytes. Defaults to 128 KB. ++#max_ws_frame_bytes = 131072 ++ ++# Broadcast buffer size, in number of events. This prevents slow ++# readers from consuming memory. ++#broadcast_buffer = 16384 ++ ++# Event persistence buffer size, in number of events. This provides ++# backpressure to senders if writes are slow. ++#event_persist_buffer = 4096 ++ ++# Event kind blacklist. Events with these kinds will be discarded. ++#event_kind_blacklist = [ ++# 70202, ++#] ++ ++# Event kind allowlist. Events other than these kinds will be discarded. ++#event_kind_allowlist = [ ++# 0, 1, 2, 3, 7, 40, 41, 42, 43, 44, 30023, ++#] ++ ++[authorization] ++# Pubkey addresses in this array are whitelisted for event publishing. ++# Only valid events by these authors will be accepted, if the variable ++# is set. ++#pubkey_whitelist = [ ++# "35d26e4690cbe1a898af61cc3515661eb5fa763b57bd0b42e45099c8b32fd50f", ++# "887645fef0ce0c3c1218d2f5d8e6132a19304cdc57cd20281d082f38cfea0072", ++#] ++# Enable NIP-42 authentication ++#nip42_auth = false ++# Send DMs (kind 4 and 44) and gift wraps (kind 1059) only to their authenticated recipients ++#nip42_dms = false ++ ++[verified_users] ++# NIP-05 verification of users. Can be "enabled" to require NIP-05 ++# metadata for event authors, "passive" to perform validation but ++# never block publishing, or "disabled" to do nothing. ++#mode = "disabled" ++ ++# Domain names that will be prevented from publishing events. ++#domain_blacklist = ["wellorder.net"] ++ ++# Domain names that are allowed to publish events. If defined, only ++# events NIP-05 verified authors at these domains are persisted. ++#domain_whitelist = ["example.com"] ++ ++# Consider an pubkey "verified" if we have a successful validation ++# from the NIP-05 domain within this amount of time. Note, if the ++# domain provides a successful response that omits the account, ++# verification is immediately revoked. ++#verify_expiration = "1 week" ++ ++# How long to wait between verification attempts for a specific author. ++#verify_update_frequency = "24 hours" ++ ++# How many consecutive failed checks before we give up on verifying ++# this author. ++#max_consecutive_failures = 20 ++ ++[pay_to_relay] ++# Enable pay to relay ++#enabled = false ++ ++# The cost to be admitted to relay ++#admission_cost = 4200 ++ ++# The cost in sats per post ++#cost_per_event = 0 ++ ++# Url of lnbits api ++#node_url = "" ++ ++# LNBits api secret ++#api_secret = "" ++ ++# Nostr direct message on signup ++#direct_message=true ++ ++# Terms of service ++#terms_message = """ ++#This service (and supporting services) are provided "as is", without warranty of any kind, express or implied. ++# ++#By using this service, you agree: ++#* Not to engage in spam or abuse the relay service ++#* Not to disseminate illegal content ++#* That requests to delete content cannot be guaranteed ++#* To use the service in compliance with all applicable laws ++#* To grant necessary rights to your content for unlimited time ++#* To be of legal age and have capacity to use this service ++#* That the service may be terminated at any time without notice ++#* That the content you publish may be removed at any time without notice ++#* To have your IP address collected to detect abuse or misuse ++#* To cooperate with the relay to combat abuse or misuse ++#* You may be exposed to content that you might find triggering or distasteful ++#* The relay operator is not liable for content produced by users of the relay ++#""" ++ ++# Whether or not new sign ups should be allowed ++#sign_ups = false ++ ++# optional if `direct_message=false` ++#secret_key = "" +diff --git a/contrib/nostr-rs-relay.service b/contrib/nostr-rs-relay.service +new file mode 100644 +index 0000000..601108b +--- /dev/null ++++ b/contrib/nostr-rs-relay.service +@@ -0,0 +1,14 @@ ++[Unit] ++Description=nostr-rs-relay ++ ++[Service] ++User=REPLACE_WITH_YOUR_USERNAME ++WorkingDirectory=/var/lib/nostr-rs-relay ++Environment=RUST_LOG=warn,nostr_rs_relay=info ++ExecStart=/usr/bin/nostr-rs-relay --config /etc/nostr-rs-relay/config.toml ++TimeoutStopSec=10 ++Restart=on-failure ++RestartSec=5 ++ ++[Install] ++WantedBy=multi-user.target +diff --git a/docs/database-maintenance.md b/docs/database-maintenance.md +new file mode 100644 +index 0000000..f68cc90 +--- /dev/null ++++ b/docs/database-maintenance.md +@@ -0,0 +1,129 @@ ++# Database Maintenance ++ ++`nostr-rs-relay` uses the SQLite embedded database to minimize ++dependencies and overall footprint of running a relay. If traffic is ++light, the relay should just run with very little need for ++intervention. For heavily trafficked relays, there are a number of ++steps that the operator may need to take to maintain performance and ++limit disk usage. ++ ++This maintenance guide is current as of version `0.8.2`. Future ++versions may incorporate and automate some of these steps. ++ ++## Backing Up the Database ++ ++To prevent data loss, the database should be backed up regularly. The ++recommended method is to use the `sqlite3` command to perform an ++"Online Backup". This can be done while the relay is running, queries ++can still run and events will be persisted during the backup. ++ ++The following commands will perform a backup of the database to a ++dated file, and then compress to minimize size: ++ ++```console ++BACKUP_FILE=/var/backups/nostr/`date +%Y%m%d_%H%M`.db ++sqlite3 -readonly /apps/nostr-relay/nostr.db ".backup $BACKUP_FILE" ++sqlite3 $BACKUP_FILE "vacuum;" ++bzip2 -9 $BACKUP_FILE ++``` ++ ++Nostr events are very compressible. Expect a compression ratio on the ++order of 4:1, resulting in a 75% space saving. ++ ++## Vacuuming the Database ++ ++As the database is updated, it can become fragmented. Performing a ++full `vacuum` will rebuild the entire database file, and can reduce ++space. Running this may reduce the size of the database file, ++especially if a large amount of data was updated or deleted. ++ ++```console ++vacuum; ++``` ++ ++## Clearing Hidden Events ++ ++When events are deleted, the event is not actually removed from the ++database. Instead, a flag `HIDDEN` is set to true for the event, ++which excludes it from search results. High volume replacements from ++profile or other replaceable events are deleted, not hidden, in the ++current version of the relay. ++ ++In the current version, removing hidden events should not result in ++significant space savings, but it can still be used if there is no ++desire to hold on to events that can never be re-broadcast. ++ ++```console ++PRAGMA foreign_keys = ON; ++delete from event where HIDDEN=true; ++``` ++ ++## Manually Removing Events ++ ++For a variety of reasons, an operator may wish to remove some events ++from the database. The only way of achieving this today is with ++manually run SQL commands. ++ ++It is recommended to have a good backup prior to manually running SQL ++commands! ++ ++In all cases, it is mandatory to enable foreign keys, and this must be ++done for every connection. Otherwise, you will likely orphan rows in ++the `tag` table. ++ ++### Deleting Specific Event ++ ++```console ++PRAGMA foreign_keys = ON; ++delete from event where event_hash=x'00000000000c1271675dc86e3e1dd1336827bccabb90dc4c9d3b4465efefe00e'; ++``` ++ ++### Querying and Deleting All Events for Pubkey ++ ++```console ++PRAGMA foreign_keys = ON; ++ ++select lower(hex(author)) as author, count(*) as c from event group by author order by c asc; ++ ++delete from event where author=x'000000000002c7831d9c5a99f183afc2813a6f69a16edda7f6fc0ed8110566e6'; ++``` ++ ++### Querying and Deleting All Events of a Kind ++ ++ ++```console ++PRAGMA foreign_keys = ON; ++ ++select printf('%7d', kind), count(*) as c from event group by kind order by c; ++ ++delete from event where kind=70202; ++``` ++ ++### Deleting Old Events ++ ++In this scenario, we wish to delete any event that has been stored by ++our relay for more than 1 month. Crucially, this is based on when the ++event was stored, not when the event says it was created. If an event ++has a `created` field of 2 years ago, but was first sent to our relay ++yesterday, it would not be deleted in this scenario. Keep in mind, we ++do not track anything for re-broadcast events that we already have, so ++this is not a very effective way of implementing a "least recently ++seen" policy. ++ ++```console ++PRAGMA foreign_keys = ON; ++ ++DELETE FROM event WHERE first_seen < CAST(strftime('%s', date('now', '-30 day')) AS INT); ++``` ++ ++### Delete Profile Events with No Recent Events ++ ++Many users create profiles, post a "hello world" event, and then never ++appear again (likely using an ephemeral keypair that was lost in the ++browser cache). We can find these accounts and remove them after some ++time. ++ ++```console ++PRAGMA foreign_keys = ON; ++TODO! ++``` +diff --git a/docs/grpc-extensions.md b/docs/grpc-extensions.md +new file mode 100644 +index 0000000..1f44feb +--- /dev/null ++++ b/docs/grpc-extensions.md +@@ -0,0 +1,79 @@ ++# gRPC Extensions Design Document ++ ++The relay will be extensible through gRPC endpoints, definable in the ++main configuration file. These will allow external programs to host ++logic for deciding things such as, should this event be persisted, ++should this connection be allowed, and should this subscription ++request be registered. The primary goal is allow for relay operator ++specific functionality that allows them to serve smaller communities ++and reduce spam and abuse. ++ ++This will likely evolve substantially, the first goal is to get a ++basic one-way service that lets an externalized program decide on ++event persistence. This does not represent the final state of gRPC ++extensibility in `nostr-rs-relay`. ++ ++## Considerations ++ ++Write event latency must not be significantly affected. However, the ++primary reason we are implementing this is spam/abuse protection, so ++we are willing to tolerate some increase in latency if that protects ++us against outages! ++ ++The interface should provide enough information to make simple ++decisions, without burdening the relay to do extra queries. The ++decision endpoint will be mostly responsible for maintaining state and ++gathering additional details. ++ ++## Design Overview ++ ++A gRPC server may be defined in the `config.toml` file. If it exists, ++the relay will attempt to connect to it and send a message for each ++`EVENT` command submitted by clients. If a successful response is ++returned indicating the event is permitted, the relay continues ++processing the event as normal. All existing whitelist, blacklist, ++and `NIP-05` validation checks are still performed and MAY still ++result in the event being rejected. If a successful response is ++returned indicated the decision is anything other than permit, then ++the relay MUST reject the event, and return a command result to the ++user (using `NIP-20`) indicating the event was blocked (optionally ++providing a message). ++ ++In the event there is an error in the gRPC interface, event processing ++proceeds as if gRPC was disabled (fail open). This allows gRPC ++servers to be deployed with minimal chance of causing a full relay ++outage. ++ ++## Design Details ++ ++Currently one procedure call is supported, `EventAdmit`, in the ++`Authorization` service. It accepts the following data in order to ++support authorization decisions: ++ ++- The event itself ++- The client IP that submitted the event ++- The client's HTTP origin header, if one exists ++- The client's HTTP user agent header, if one exists ++- The public key of the client, if `NIP-42` authentication was ++ performed (not supported in the relay yet!) ++- The `NIP-05` associated with the event's public key, if it is known ++ to the relay ++ ++A server providing authorization decisions will return the following: ++ ++- A decision to permit or deny the event ++- An optional message that explains why the event was denied, to be ++ transmitted to the client ++ ++## Security Issues ++ ++There is little attempt to secure this interface, since it is intended ++for use processes running on the same host. It is recommended to ++ensure that the gRPC server providing the API is not exposed to the ++public Internet. Authorization server implementations should have ++their own security reviews performed. ++ ++A slow gRPC server could cause availability issues for event ++processing, since this is performed on a single thread. Avoid any ++expensive or long-running processes that could result from submitted ++events, since any client can initiate a gRPC call to the service. +diff --git a/docs/pay-to-relay.md b/docs/pay-to-relay.md +new file mode 100644 +index 0000000..12723dc +--- /dev/null ++++ b/docs/pay-to-relay.md +@@ -0,0 +1,84 @@ ++# Pay to Relay Design Document ++ ++The relay with use payment as a form of spam prevention. In order to post to the relay a user must pay a set rate. There is also the option to require a payment for each note posted to the relay. There is no cost to read from the relay. ++ ++## Configuration ++ ++Currently, [LNBits](https://github.com/lnbits/lnbits) is implemented as the payment processor. LNBits exposes a simple API for creating invoices, to use this API create a wallet and on the right side find "API info" you will need to add the invoice/read key to this relays config file. ++ ++The below configuration will need to be added to config.toml ++``` ++[pay_to_relay] ++# Enable pay to relay ++enabled = true ++# The cost to be admitted to relay ++admission_cost = 1000 ++# The cost in sats per post ++cost_per_event = 0 ++# Url of lnbits api ++node_url = "https://:5001/api/v1/payments" ++# LNBits api secret ++api_secret = "" ++# Terms of service ++terms_message = """This service .... ++""" ++# Whether or not new sign ups should be allowed ++sign_ups = true ++secret_key = "" ++``` ++ ++The LNBits instance must have a signed HTTPS a self signed certificate will not work. ++ ++## Design Overview ++ ++### Concepts ++ ++All authors are initially not admitted to write to the relay. There are two ways to gain access write to the relay. The first is by attempting to post the the relay, upon receiving an event from an author that is not admitted, the relay will send a direct message including the terms of service of the relay and a lighting invoice for the admission cost. Once this invoice is paid the author can write to the relay. For this method to work the author must be reading from the relay. An author can also pay and accept the terms of service via a webpage `https:///join`. ++ ++## Design Details ++ ++Authors are stored in a dedicated table. This tracks: ++ ++* `pubkey` ++* `is_admitted` whether on no the admission invoice has been paid, accepting the terms of service. ++* `balance` the current balance in sats of the author, used if there is a cost per post ++* `tos_accepted_at` the timestamp of when the author accepted the tos ++ ++Invoice information is stored in a dedicated table. This tracks: ++* `payment_hash` the payment hash of the lighting invoice ++* `pubkey` of the author the invoice is issued to ++* `invoice` bolt11 invoice ++* `amount` in sats ++* `status` (Paid/Unpaid/Expired) ++* `description` ++* `created_at` timestamp of creation ++* `confirmed_at` timestamp of payment ++ ++### Event Handling ++ ++If "pay to relay" is enabled, all incoming events are evaluated to determine whether the author is on the relay's whitelist or if they have paid the admission fee and accepted the terms. If "pay per note" is enabled, there is an additional check to ensure that the author has enough balance, which is then reduced by the cost per note. If the author is on the whitelist, this balance check is not necessary. ++ ++### Integration ++ ++We have an existing database writer thread, which receives events and ++attempts to persist them to disk. Once validated and persisted, these ++events are broadcast to all subscribers. ++ ++When "pay to relay" is enabled, the writer must check if the author is admitted to post. If the author is not admitted to post the event is forwarded to the payment module. Where an invoice is generated, persisted and broadcast as an direct message to the author. ++ ++### Threat Scenarios ++ ++Some of these mitigation's are fully implemented, others are documented ++simply to demonstrate a mitigation is possible. ++ ++### Sign up Spamming ++ ++*Threat*: An attacker generates a large number of new pubkeys publishing to the relays. Causing a large number of new invoices to be created for each new pubkey. ++ ++*Mitigation*: Rate limit number of new sign ups ++ ++### Admitted Author Spamming ++ ++*Threat*: An attacker gains write access by paying the admission fee, and then floods the relay with a large number of spam events. ++ ++*Mitigation*: The attacker's admission can be revoked and their admission fee will not be refunded. Enabling "cost per event" and increasing the admission cost can also discourage this type of behavior. +diff --git a/docs/reverse-proxy.md b/docs/reverse-proxy.md +new file mode 100644 +index 0000000..27f77b0 +--- /dev/null ++++ b/docs/reverse-proxy.md +@@ -0,0 +1,199 @@ ++# Reverse Proxy Setup Guide ++ ++It is recommended to run `nostr-rs-relay` behind a reverse proxy such ++as `haproxy`, `nginx` or `traefik` to provide TLS termination. Simple examples ++for `haproxy`, `nginx` and `traefik` configurations are documented here. ++ ++## Minimal HAProxy Configuration ++ ++Assumptions: ++ ++* HAProxy version is `2.4.10` or greater (older versions not tested). ++* Hostname for the relay is `relay.example.com`. ++* Your relay should be available over wss://relay.example.com ++* Your (NIP-11) relay info page should be available on https://relay.example.com ++* SSL certificate is located in `/etc/certs/example.com.pem`. ++* Relay is running on port 8080. ++* Limit connections to 400 concurrent. ++* HSTS (HTTP Strict Transport Security) is desired. ++* Only TLS 1.2 or greater is allowed. ++ ++``` ++global ++ ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256 ++ ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets ++ ++frontend fe_prod ++ mode http ++ bind :443 ssl crt /etc/certs/example.com.pem alpn h2,http/1.1 ++ bind :80 ++ http-request set-header X-Forwarded-Proto https if { ssl_fc } ++ redirect scheme https code 301 if !{ ssl_fc } ++ acl host_relay hdr(host) -i -m beg relay.example.com ++ use_backend relay if host_relay ++ # HSTS (1 year) ++ http-response set-header Strict-Transport-Security max-age=31536000 ++ ++backend relay ++ mode http ++ timeout connect 5s ++ timeout client 50s ++ timeout server 50s ++ timeout tunnel 1h ++ timeout client-fin 30s ++ option tcp-check ++ default-server maxconn 400 check inter 20s fastinter 1s ++ server relay 127.0.0.1:8080 ++``` ++ ++### HAProxy Notes ++ ++You may experience WebSocket connection problems with Firefox if ++HTTP/2 is enabled, for older versions of HAProxy (2.3.x). Either ++disable HTTP/2 (`h2`), or upgrade HAProxy. ++ ++## Bare-bones Nginx Configuration ++ ++Assumptions: ++ ++* `Nginx` version is `1.18.0` (other versions not tested). ++* Hostname for the relay is `relay.example.com`. ++* SSL certificate and key are located at `/etc/letsencrypt/live/relay.example.com/`. ++* Relay is running on port `8080`. ++ ++``` ++http { ++ server { ++ listen 443 ssl; ++ server_name relay.example.com; ++ ssl_certificate /etc/letsencrypt/live/relay.example.com/fullchain.pem; ++ ssl_certificate_key /etc/letsencrypt/live/relay.example.com/privkey.pem; ++ ssl_protocols TLSv1.3 TLSv1.2; ++ ssl_prefer_server_ciphers on; ++ ssl_ecdh_curve secp521r1:secp384r1; ++ ssl_ciphers EECDH+AESGCM:EECDH+AES256; ++ ++ # Optional Diffie-Helmann parameters ++ # Generate with openssl dhparam -out /etc/ssl/certs/dhparam.pem 4096 ++ #ssl_dhparam /etc/ssl/certs/dhparam.pem; ++ ++ ssl_session_cache shared:TLS:2m; ++ ssl_buffer_size 4k; ++ ++ # OCSP stapling ++ ssl_stapling on; ++ ssl_stapling_verify on; ++ resolver 1.1.1.1 1.0.0.1 [2606:4700:4700::1111] [2606:4700:4700::1001]; # Cloudflare ++ ++ # Set HSTS to 365 days ++ add_header Strict-Transport-Security 'max-age=31536000; includeSubDomains; preload' always; ++ keepalive_timeout 70; ++ ++ location / { ++ proxy_pass http://localhost:8080; ++ proxy_http_version 1.1; ++ proxy_read_timeout 1d; ++ proxy_send_timeout 1d; ++ proxy_set_header Upgrade $http_upgrade; ++ proxy_set_header Connection "Upgrade"; ++ proxy_set_header Host $host; ++ } ++ } ++} ++``` ++ ++### Nginx Notes ++ ++The above configuration was tested on `nginx` `1.18.0` on `Ubuntu` `20.04` and `22.04` ++ ++For help installing `nginx` on `Ubuntu`, see [this guide](https://www.digitalocean.com/community/tutorials/how-to-install-nginx-on-ubuntu-20-04). ++ ++For guidance on using `letsencrypt` to obtain a cert on `Ubuntu`, including an `nginx` plugin, see [this post](https://www.digitalocean.com/community/tutorials/how-to-secure-nginx-with-let-s-encrypt-on-ubuntu-20-04). ++ ++ ++## Example Traefik Configuration ++ ++Assumptions: ++ ++* `Traefik` version is `2.9` (other versions not tested). ++* `Traefik` is used for provisioning of Let's Encrypt certificates. ++* `Traefik` is running in `Docker`, using `docker compose` and labels for the static configuration. An equivalent setup using a Traefik config file is possible too (but not covered here). ++* Strict Transport Security is enabled. ++* Hostname for the relay is `relay.example.com`, email address for ACME certificates provider is `name@example.com`. ++* ipv6 is enabled, a viable private ipv6 subnet is specified in the example below. ++* Relay is running on port `8080`. ++ ++``` ++version: '3' ++ ++networks: ++ nostr: ++ enable_ipv6: true ++ ipam: ++ config: ++ - subnet: fd00:db8:a::/64 ++ gateway: fd00:db8:a::1 ++ ++services: ++ traefik: ++ image: traefik:v2.9 ++ networks: ++ nostr: ++ command: ++ - "--log.level=ERROR" ++ # letsencrypt configuration ++ - "--certificatesResolvers.http.acme.email==name@example.com" ++ - "--certificatesResolvers.http.acme.storage=/certs/acme.json" ++ - "--certificatesResolvers.http.acme.httpChallenge.entryPoint=http" ++ # define entrypoints ++ - "--entryPoints.http.address=:80" ++ - "--entryPoints.http.http.redirections.entryPoint.to=https" ++ - "--entryPoints.http.http.redirections.entryPoint.scheme=https" ++ - "--entryPoints.https.address=:443" ++ - "--entryPoints.https.forwardedHeaders.insecure=true" ++ - "--entryPoints.https.proxyProtocol.insecure=true" ++ # docker provider (get configuration from container labels) ++ - "--providers.docker.endpoint=unix:///var/run/docker.sock" ++ - "--providers.docker.exposedByDefault=false" ++ - "--providers.file.directory=/config" ++ - "--providers.file.watch=true" ++ ports: ++ - "80:80" ++ - "443:443" ++ volumes: ++ - "/var/run/docker.sock:/var/run/docker.sock:ro" ++ - "$(pwd)/traefik/certs:/certs" ++ - "$(pwd)/traefik/config:/config" ++ logging: ++ driver: "local" ++ restart: always ++ ++ # example nostr config. only labels: section is relevant for Traefik config ++ nostr: ++ image: nostr-rs-relay:latest ++ container_name: nostr-relay ++ networks: ++ nostr: ++ restart: always ++ user: 100:100 ++ volumes: ++ - '$(pwd)/nostr/data:/usr/src/app/db:Z' ++ - '$(pwd)/nostr/config/config.toml:/usr/src/app/config.toml:ro,Z' ++ labels: ++ - "traefik.enable=true" ++ - "traefik.http.routers.nostr.entrypoints=https" ++ - "traefik.http.routers.nostr.rule=Host(`relay.example.com`)" ++ - "traefik.http.routers.nostr.tls.certresolver=http" ++ - "traefik.http.routers.nostr.service=nostr" ++ - "traefik.http.services.nostr.loadbalancer.server.port=8080" ++ - "traefik.http.services.nostr.loadbalancer.passHostHeader=true" ++ - "traefik.http.middlewares.nostr.headers.sslredirect=true" ++ - "traefik.http.middlewares.nostr.headers.stsincludesubdomains=true" ++ - "traefik.http.middlewares.nostr.headers.stspreload=true" ++ - "traefik.http.middlewares.nostr.headers.stsseconds=63072000" ++ - "traefik.http.routers.nostr.middlewares=nostr" ++``` ++ ++### Traefik Notes ++ ++Traefik will take care of the provisioning and renewal of certificates. In case of an ipv4-only relay, simply detele the `enable_ipv6:` and `ipam:` entries in the `networks:` section of the docker-compose file. +diff --git a/docs/run-as-linux-system-process.md b/docs/run-as-linux-system-process.md +new file mode 100644 +index 0000000..6d9cf27 +--- /dev/null ++++ b/docs/run-as-linux-system-process.md +@@ -0,0 +1,39 @@ ++# Run as a linux system process ++ ++Docker makes it easy to spin up and down environments but it's also possible to run `nostr-rs-relay` as a systemd linux process. ++This guide assumes you're on a Linux machine and that Rust is already installed. ++ ++## Instructions ++ ++### Build nostr-rs-relay from source ++Start by building the application from source. Here is how to do that: ++1. `git clone https://github.com/scsibug/nostr-rs-relay.git` ++2. `cd nostr-rs-relay` ++3. `cargo build --release` ++ ++### Place the files where they belong ++We want to place the nostr-rs-relay binary and the config.toml file where they belong. While still in the root level of the nostr-rs-relay folder you cloned in last step, run the following commands: ++1. `sudo cp target/release/nostr-rs-relay /usr/local/bin/` ++2. `sudo mkdir /etc/nostr-rs-relay` ++2. `sudo cp config.toml /etc/nostr-rs-relay` ++ ++### Create the Systemd service file ++We need to create a new Systemd service file. These files are placed in the `/etc/systemd/system/` folder where you will find many other services running. ++ ++1. `sudo vim /etc/systemd/system/nostr-rs-relay.service` ++2. Paste in the contents of [this service file](../contrib/nostr-rs-relay.service). Remember to replace the `User` value with your own username. ++3. Save the file and exit your text editor ++ ++ ++### Run the service ++To get the service running, we need to reload the systemd daemon and enable the service. ++ ++1. `sudo systemctl daemon-reload` ++2. `sudo systemctl start nostr-rs-relay.service` ++3. `sudo systemctl status nostr-rs-relay.service` ++ ++ ++### Tips ++ ++#### Logs ++The application will write logs to the journal. To read it, execute `sudo journalctl -f -u nostr-rs-relay` +diff --git a/docs/user-verification-nip05.md b/docs/user-verification-nip05.md +new file mode 100644 +index 0000000..005e52a +--- /dev/null ++++ b/docs/user-verification-nip05.md +@@ -0,0 +1,248 @@ ++# Author Verification Design Document ++ ++The relay will use NIP-05 DNS-based author verification to limit which ++authors can publish events to a relay. This document describes how ++this feature will operate. ++ ++## Considerations ++ ++DNS-based author verification is designed to be deployed in relays that ++want to prevent spam, so there should be strong protections to prevent ++unauthorized authors from persisting data. This includes data needed to ++verify new authors. ++ ++There should be protections in place to ensure the relay cannot be ++used to spam or flood other webservers. Additionally, there should be ++protections against server-side request forgery (SSRF). ++ ++## Design Overview ++ ++### Concepts ++ ++All authors are initially "unverified". Unverified authors that submit ++appropriate `NIP-05` metadata events become "candidates" for ++verification. A candidate author becomes verified when the relay ++inspects a kind `0` metadata event for the author with a `nip05` field, ++and follows the procedure in `NIP-05` to successfully associate the ++author with an internet identifier. ++ ++The `NIP-05` procedure verifies an author for a fixed period of time, ++configurable by the relay operator. If this "verification expiration ++time" (`verify_expiration`) is exceeded without being refreshed, they ++are once again unverified. ++ ++Verified authors have their status regularly and automatically updated ++through scheduled polling to their verified domain, this process is ++"re-verification". It is performed based on the configuration setting ++`verify_update_frequency`, which defines how long the relay waits ++between verification attempts (whether the result was success or ++failure). ++ ++Authors may change their verification data (the internet identifier from ++`NIP-05`) with a new metadata event, which then requires ++re-verification. Their old verification remains valid until ++expiration. ++ ++Performing candidate author verification is a best-effort activity and ++may be significantly rate-limited to prevent relays being used to ++attack other hosts. Candidate verification (untrusted authors) should ++never impact re-verification (trusted authors). ++ ++## Operating Modes ++ ++The relay may operate in one of three modes. "Disabled" performs no ++validation activities, and will never permit or deny events based on ++an author's NIP-05 metadata. "Passive" performs NIP-05 validation, ++but does not permit or deny events based on the validity or presence ++of NIP-05 metadata. "Enabled" will require current and valid NIP-05 ++metadata for any events to be persisted. "Enabled" mode will ++additionally consider domain whitelist/blacklist configuration data to ++restrict which author's events are persisted. ++ ++## Design Details ++ ++### Data Storage ++ ++Verification is stored in a dedicated table. This tracks: ++ ++* `nip05` identifier ++* most recent verification timestamp ++* most recent verification failure timestamp ++* reference to the metadata event (used for tracking `created_at` and ++ `pubkey`) ++ ++### Event Handling ++ ++All events are first validated to ensure the signature is valid. ++ ++Incoming events of kind _other_ than metadata (kind `0`) submitted by ++clients will be evaluated as follows. ++ ++* If the event's author has a current verification, the event is ++ persisted as normal. ++* If the event's author has either no verification, or the ++ verification is expired, the event is rejected. ++ ++If the event is a metadata event, we handle it differently. ++ ++We first determine the verification status of the event's pubkey. ++ ++* If the event author is unverified, AND the event contains a `nip05` ++ key, we consider this a verification candidate. ++* If the event author is unverified, AND the event does not contain a ++ `nip05` key, this is not a candidate, and the event is dropped. ++ ++* If the event author is verified, AND the event contains a `nip05` ++ key that is identical to the currently stored value, no special ++ action is needed. ++* If the event author is verified, AND the event contains a different ++ `nip05` than was previously verified, with a more recent timestamp, ++ we need to re-verify. ++* If the event author is verified, AND the event is missing a `nip05` ++ key, and the event timestamp is more recent than what was verified, ++ we do nothing. The current verification will be allowed to expire. ++ ++### Candidate Verification ++ ++When a candidate verification is requested, a rate limit will be ++utilized. If the rate limit is exceeded, new candidate verification ++requests will be dropped. In practice, this is implemented by a ++size-limited channel that drops events that exceed a threshold. ++ ++Candidates are never persisted in the database. ++ ++### Re-Verification ++ ++Re-verification is straightforward when there has been no change to ++the `nip05` key. A new request to the `nip05` domain is performed, ++and if successful, the verification timestamp is updated to the ++current time. If the request fails due to a timeout or server error, ++the failure timestamp is updated instead. ++ ++When the the `nip05` key has changed and this event is more recent, we ++will create a new verification record, and delete all other records ++for the same name. ++ ++Regarding creating new records vs. updating: We never update the event ++reference or `nip05` identifier in a verification record. Every update ++either reset the last failure or last success timestamp. ++ ++### Determining Verification Status ++ ++In determining if an event is from a verified author, the following ++procedure should be used: ++ ++Join the verification table with the event table, to provide ++verification data alongside the event `created_at` and `pubkey` ++metadata. Find the most recent verification record for the author, ++based on the `created_at` time. ++ ++Reject the record if the success timestamp is not within our ++configured expiration time. ++ ++Reject records with disallowed domains, based on any whitelists or ++blacklists in effect. ++ ++If a result remains, the author is treated as verified. ++ ++This does give a time window for authors transitioning their verified ++status between domains. There may be a period of time in which there ++are multiple valid rows in the verification table for a given author. ++ ++### Cleaning Up Inactive Verifications ++ ++After a author verification has expired, we will continue to check for ++it to become valid again. After a configurable number of attempts, we ++should simply forget it, and reclaim the space. ++ ++### Addition of Domain Whitelist/Blacklist ++ ++A set of whitelisted or blacklisted domains may be provided. If both ++are provided, only the whitelist is used. In this context, domains ++are either "allowed" (present on a whitelist and NOT present on a ++blacklist), or "denied" (NOT present on a whitelist and present on a ++blacklist). ++ ++The processes outlined so far are modified in the presence of these ++options: ++ ++* Only authors with allowed domains can become candidates for ++ verification. ++* Verification status queries additionally filter out any denied ++ domains. ++* Re-verification processes only proceed with allowed domains. ++ ++### Integration ++ ++We have an existing database writer thread, which receives events and ++attempts to persist them to disk. Once validated and persisted, these ++events are broadcast to all subscribers. ++ ++When verification is enabled, the writer must check to ensure a valid, ++unexpired verification record exists for the author. All metadata ++events (regardless of verification status) are forwarded to a verifier ++module. If the verifier determines a new verification record is ++needed, it is also responsible for persisting and broadcasting the ++event, just as the database writer would have done. ++ ++## Threat Scenarios ++ ++Some of these mitigations are fully implemented, others are documented ++simply to demonstrate a mitigation is possible. ++ ++### Domain Spamming ++ ++*Threat*: A author with a high-volume of events creates a metadata event ++with a bogus domain, causing the relay to generate significant ++unwanted traffic to a target. ++ ++*Mitigation*: Rate limiting for all candidate verification will limit ++external requests to a reasonable amount. Currently, this is a simple ++delay that slows down the HTTP task. ++ ++### Denial of Service for Legitimate Authors ++ ++*Threat*: A author with a high-volume of events creates a metadata event ++with a domain that is invalid for them, _but which is used by other ++legitimate authors_. This triggers rate-limiting against the legitimate ++domain, and blocks authors from updating their own metadata. ++ ++*Mitigation*: Rate limiting should only apply to candidates, so any ++existing verified authors have priority for re-verification. New ++authors will be affected, as we can not distinguish between the threat ++and a legitimate author. _(Unimplemented)_ ++ ++### Denial of Service by Consuming Storage ++ ++*Threat*: A author creates a high volume of random metadata events with ++unique domains, in order to cause us to store large amounts of data ++for to-be-verified authors. ++ ++*Mitigation*: No data is stored for candidate authors. This makes it ++harder for new authors to become verified, but is effective at ++preventing this attack. ++ ++### Metadata Replay for Verified Author ++ ++*Threat*: Attacker replays out-of-date metadata event for a author, to ++cause a verification to fail. ++ ++*Mitigation*: New metadata events have their signed timestamp compared ++against the signed timestamp of the event that has most recently ++verified them. If the metadata event is older, it is discarded. ++ ++### Server-Side Request Forgery via Metadata ++ ++*Threat*: Attacker includes malicious data in the `nip05` event, which ++is used to generate HTTP requests against potentially internal ++resources. Either leaking data, or invoking webservices beyond their ++own privileges. ++ ++*Mitigation*: Consider detecting and dropping when the `nip05` field ++is an IP address. Allow the relay operator to utilize the `blacklist` ++or `whitelist` to constrain hosts that will be contacted. Most ++importantly, the verification process is hardcoded to only make ++requests to a known url path ++(`.well-known/nostr.json?name=`). The `` ++component is restricted to a basic ASCII subset (preventing additional ++URL components). +diff --git a/examples/nauthz/Cargo.lock b/examples/nauthz/Cargo.lock +new file mode 100644 +index 0000000..cc93a59 +--- /dev/null ++++ b/examples/nauthz/Cargo.lock +@@ -0,0 +1,1010 @@ ++# This file is automatically @generated by Cargo. ++# It is not intended for manual editing. ++version = 3 ++ ++[[package]] ++name = "anyhow" ++version = "1.0.69" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" ++ ++[[package]] ++name = "async-stream" ++version = "0.3.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e" ++dependencies = [ ++ "async-stream-impl", ++ "futures-core", ++] ++ ++[[package]] ++name = "async-stream-impl" ++version = "0.3.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" ++dependencies = [ ++ "proc-macro2", ++ "quote", ++ "syn", ++] ++ ++[[package]] ++name = "async-trait" ++version = "0.1.64" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "1cd7fce9ba8c3c042128ce72d8b2ddbf3a05747efb67ea0313c635e10bda47a2" ++dependencies = [ ++ "proc-macro2", ++ "quote", ++ "syn", ++] ++ ++[[package]] ++name = "autocfg" ++version = "1.1.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" ++ ++[[package]] ++name = "axum" ++version = "0.6.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e5694b64066a2459918d8074c2ce0d5a88f409431994c2356617c8ae0c4721fc" ++dependencies = [ ++ "async-trait", ++ "axum-core", ++ "bitflags", ++ "bytes", ++ "futures-util", ++ "http", ++ "http-body", ++ "hyper", ++ "itoa", ++ "matchit", ++ "memchr", ++ "mime", ++ "percent-encoding", ++ "pin-project-lite", ++ "rustversion", ++ "serde", ++ "sync_wrapper", ++ "tower", ++ "tower-http", ++ "tower-layer", ++ "tower-service", ++] ++ ++[[package]] ++name = "axum-core" ++version = "0.3.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "1cae3e661676ffbacb30f1a824089a8c9150e71017f7e1e38f2aa32009188d34" ++dependencies = [ ++ "async-trait", ++ "bytes", ++ "futures-util", ++ "http", ++ "http-body", ++ "mime", ++ "rustversion", ++ "tower-layer", ++ "tower-service", ++] ++ ++[[package]] ++name = "base64" ++version = "0.13.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" ++ ++[[package]] ++name = "bitflags" ++version = "1.3.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" ++ ++[[package]] ++name = "bytes" ++version = "1.4.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" ++ ++[[package]] ++name = "cfg-if" ++version = "1.0.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" ++ ++[[package]] ++name = "either" ++version = "1.8.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" ++ ++[[package]] ++name = "fastrand" ++version = "1.8.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" ++dependencies = [ ++ "instant", ++] ++ ++[[package]] ++name = "fixedbitset" ++version = "0.4.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" ++ ++[[package]] ++name = "fnv" ++version = "1.0.7" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" ++ ++[[package]] ++name = "futures-channel" ++version = "0.3.26" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" ++dependencies = [ ++ "futures-core", ++] ++ ++[[package]] ++name = "futures-core" ++version = "0.3.26" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608" ++ ++[[package]] ++name = "futures-sink" ++version = "0.3.26" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364" ++ ++[[package]] ++name = "futures-task" ++version = "0.3.26" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366" ++ ++[[package]] ++name = "futures-util" ++version = "0.3.26" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1" ++dependencies = [ ++ "futures-core", ++ "futures-task", ++ "pin-project-lite", ++ "pin-utils", ++] ++ ++[[package]] ++name = "getrandom" ++version = "0.2.8" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" ++dependencies = [ ++ "cfg-if", ++ "libc", ++ "wasi", ++] ++ ++[[package]] ++name = "h2" ++version = "0.3.15" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" ++dependencies = [ ++ "bytes", ++ "fnv", ++ "futures-core", ++ "futures-sink", ++ "futures-util", ++ "http", ++ "indexmap", ++ "slab", ++ "tokio", ++ "tokio-util", ++ "tracing", ++] ++ ++[[package]] ++name = "hashbrown" ++version = "0.12.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" ++ ++[[package]] ++name = "heck" ++version = "0.4.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" ++ ++[[package]] ++name = "hermit-abi" ++version = "0.2.6" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" ++dependencies = [ ++ "libc", ++] ++ ++[[package]] ++name = "http" ++version = "0.2.8" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" ++dependencies = [ ++ "bytes", ++ "fnv", ++ "itoa", ++] ++ ++[[package]] ++name = "http-body" ++version = "0.4.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" ++dependencies = [ ++ "bytes", ++ "http", ++ "pin-project-lite", ++] ++ ++[[package]] ++name = "http-range-header" ++version = "0.3.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" ++ ++[[package]] ++name = "httparse" ++version = "1.8.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" ++ ++[[package]] ++name = "httpdate" ++version = "1.0.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" ++ ++[[package]] ++name = "hyper" ++version = "0.14.24" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "5e011372fa0b68db8350aa7a248930ecc7839bf46d8485577d69f117a75f164c" ++dependencies = [ ++ "bytes", ++ "futures-channel", ++ "futures-core", ++ "futures-util", ++ "h2", ++ "http", ++ "http-body", ++ "httparse", ++ "httpdate", ++ "itoa", ++ "pin-project-lite", ++ "socket2", ++ "tokio", ++ "tower-service", ++ "tracing", ++ "want", ++] ++ ++[[package]] ++name = "hyper-timeout" ++version = "0.4.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" ++dependencies = [ ++ "hyper", ++ "pin-project-lite", ++ "tokio", ++ "tokio-io-timeout", ++] ++ ++[[package]] ++name = "indexmap" ++version = "1.9.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" ++dependencies = [ ++ "autocfg", ++ "hashbrown", ++] ++ ++[[package]] ++name = "instant" ++version = "0.1.12" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" ++dependencies = [ ++ "cfg-if", ++] ++ ++[[package]] ++name = "itertools" ++version = "0.10.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" ++dependencies = [ ++ "either", ++] ++ ++[[package]] ++name = "itoa" ++version = "1.0.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" ++ ++[[package]] ++name = "lazy_static" ++version = "1.4.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" ++ ++[[package]] ++name = "libc" ++version = "0.2.139" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" ++ ++[[package]] ++name = "log" ++version = "0.4.17" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" ++dependencies = [ ++ "cfg-if", ++] ++ ++[[package]] ++name = "matchit" ++version = "0.7.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" ++ ++[[package]] ++name = "memchr" ++version = "2.5.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" ++ ++[[package]] ++name = "mime" ++version = "0.3.16" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" ++ ++[[package]] ++name = "mio" ++version = "0.8.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" ++dependencies = [ ++ "libc", ++ "log", ++ "wasi", ++ "windows-sys", ++] ++ ++[[package]] ++name = "multimap" ++version = "0.8.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" ++ ++[[package]] ++name = "nauthz-server" ++version = "0.1.0" ++dependencies = [ ++ "prost", ++ "tokio", ++ "tonic", ++ "tonic-build", ++] ++ ++[[package]] ++name = "num_cpus" ++version = "1.15.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" ++dependencies = [ ++ "hermit-abi", ++ "libc", ++] ++ ++[[package]] ++name = "once_cell" ++version = "1.17.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" ++ ++[[package]] ++name = "percent-encoding" ++version = "2.2.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" ++ ++[[package]] ++name = "petgraph" ++version = "0.6.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" ++dependencies = [ ++ "fixedbitset", ++ "indexmap", ++] ++ ++[[package]] ++name = "pin-project" ++version = "1.0.12" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" ++dependencies = [ ++ "pin-project-internal", ++] ++ ++[[package]] ++name = "pin-project-internal" ++version = "1.0.12" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" ++dependencies = [ ++ "proc-macro2", ++ "quote", ++ "syn", ++] ++ ++[[package]] ++name = "pin-project-lite" ++version = "0.2.9" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" ++ ++[[package]] ++name = "pin-utils" ++version = "0.1.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" ++ ++[[package]] ++name = "ppv-lite86" ++version = "0.2.17" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" ++ ++[[package]] ++name = "prettyplease" ++version = "0.1.23" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e97e3215779627f01ee256d2fad52f3d95e8e1c11e9fc6fd08f7cd455d5d5c78" ++dependencies = [ ++ "proc-macro2", ++ "syn", ++] ++ ++[[package]] ++name = "proc-macro2" ++version = "1.0.51" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" ++dependencies = [ ++ "unicode-ident", ++] ++ ++[[package]] ++name = "prost" ++version = "0.11.6" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "21dc42e00223fc37204bd4aa177e69420c604ca4a183209a8f9de30c6d934698" ++dependencies = [ ++ "bytes", ++ "prost-derive", ++] ++ ++[[package]] ++name = "prost-build" ++version = "0.11.6" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "a3f8ad728fb08fe212df3c05169e940fbb6d9d16a877ddde14644a983ba2012e" ++dependencies = [ ++ "bytes", ++ "heck", ++ "itertools", ++ "lazy_static", ++ "log", ++ "multimap", ++ "petgraph", ++ "prettyplease", ++ "prost", ++ "prost-types", ++ "regex", ++ "syn", ++ "tempfile", ++ "which", ++] ++ ++[[package]] ++name = "prost-derive" ++version = "0.11.6" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "8bda8c0881ea9f722eb9629376db3d0b903b462477c1aafcb0566610ac28ac5d" ++dependencies = [ ++ "anyhow", ++ "itertools", ++ "proc-macro2", ++ "quote", ++ "syn", ++] ++ ++[[package]] ++name = "prost-types" ++version = "0.11.6" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "a5e0526209433e96d83d750dd81a99118edbc55739e7e61a46764fd2ad537788" ++dependencies = [ ++ "bytes", ++ "prost", ++] ++ ++[[package]] ++name = "quote" ++version = "1.0.23" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" ++dependencies = [ ++ "proc-macro2", ++] ++ ++[[package]] ++name = "rand" ++version = "0.8.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" ++dependencies = [ ++ "libc", ++ "rand_chacha", ++ "rand_core", ++] ++ ++[[package]] ++name = "rand_chacha" ++version = "0.3.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" ++dependencies = [ ++ "ppv-lite86", ++ "rand_core", ++] ++ ++[[package]] ++name = "rand_core" ++version = "0.6.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" ++dependencies = [ ++ "getrandom", ++] ++ ++[[package]] ++name = "redox_syscall" ++version = "0.2.16" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" ++dependencies = [ ++ "bitflags", ++] ++ ++[[package]] ++name = "regex" ++version = "1.7.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" ++dependencies = [ ++ "regex-syntax", ++] ++ ++[[package]] ++name = "regex-syntax" ++version = "0.6.28" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" ++ ++[[package]] ++name = "remove_dir_all" ++version = "0.5.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" ++dependencies = [ ++ "winapi", ++] ++ ++[[package]] ++name = "rustversion" ++version = "1.0.11" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" ++ ++[[package]] ++name = "serde" ++version = "1.0.152" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" ++ ++[[package]] ++name = "slab" ++version = "0.4.7" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" ++dependencies = [ ++ "autocfg", ++] ++ ++[[package]] ++name = "socket2" ++version = "0.4.7" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" ++dependencies = [ ++ "libc", ++ "winapi", ++] ++ ++[[package]] ++name = "syn" ++version = "1.0.107" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" ++dependencies = [ ++ "proc-macro2", ++ "quote", ++ "unicode-ident", ++] ++ ++[[package]] ++name = "sync_wrapper" ++version = "0.1.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" ++ ++[[package]] ++name = "tempfile" ++version = "3.3.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" ++dependencies = [ ++ "cfg-if", ++ "fastrand", ++ "libc", ++ "redox_syscall", ++ "remove_dir_all", ++ "winapi", ++] ++ ++[[package]] ++name = "tokio" ++version = "1.25.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af" ++dependencies = [ ++ "autocfg", ++ "bytes", ++ "libc", ++ "memchr", ++ "mio", ++ "num_cpus", ++ "pin-project-lite", ++ "socket2", ++ "tokio-macros", ++ "windows-sys", ++] ++ ++[[package]] ++name = "tokio-io-timeout" ++version = "1.2.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" ++dependencies = [ ++ "pin-project-lite", ++ "tokio", ++] ++ ++[[package]] ++name = "tokio-macros" ++version = "1.8.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" ++dependencies = [ ++ "proc-macro2", ++ "quote", ++ "syn", ++] ++ ++[[package]] ++name = "tokio-stream" ++version = "0.1.11" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" ++dependencies = [ ++ "futures-core", ++ "pin-project-lite", ++ "tokio", ++] ++ ++[[package]] ++name = "tokio-util" ++version = "0.7.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" ++dependencies = [ ++ "bytes", ++ "futures-core", ++ "futures-sink", ++ "pin-project-lite", ++ "tokio", ++ "tracing", ++] ++ ++[[package]] ++name = "tonic" ++version = "0.8.3" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "8f219fad3b929bef19b1f86fbc0358d35daed8f2cac972037ac0dc10bbb8d5fb" ++dependencies = [ ++ "async-stream", ++ "async-trait", ++ "axum", ++ "base64", ++ "bytes", ++ "futures-core", ++ "futures-util", ++ "h2", ++ "http", ++ "http-body", ++ "hyper", ++ "hyper-timeout", ++ "percent-encoding", ++ "pin-project", ++ "prost", ++ "prost-derive", ++ "tokio", ++ "tokio-stream", ++ "tokio-util", ++ "tower", ++ "tower-layer", ++ "tower-service", ++ "tracing", ++ "tracing-futures", ++] ++ ++[[package]] ++name = "tonic-build" ++version = "0.8.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "5bf5e9b9c0f7e0a7c027dcfaba7b2c60816c7049171f679d99ee2ff65d0de8c4" ++dependencies = [ ++ "prettyplease", ++ "proc-macro2", ++ "prost-build", ++ "quote", ++ "syn", ++] ++ ++[[package]] ++name = "tower" ++version = "0.4.13" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" ++dependencies = [ ++ "futures-core", ++ "futures-util", ++ "indexmap", ++ "pin-project", ++ "pin-project-lite", ++ "rand", ++ "slab", ++ "tokio", ++ "tokio-util", ++ "tower-layer", ++ "tower-service", ++ "tracing", ++] ++ ++[[package]] ++name = "tower-http" ++version = "0.3.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" ++dependencies = [ ++ "bitflags", ++ "bytes", ++ "futures-core", ++ "futures-util", ++ "http", ++ "http-body", ++ "http-range-header", ++ "pin-project-lite", ++ "tower", ++ "tower-layer", ++ "tower-service", ++] ++ ++[[package]] ++name = "tower-layer" ++version = "0.3.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" ++ ++[[package]] ++name = "tower-service" ++version = "0.3.2" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" ++ ++[[package]] ++name = "tracing" ++version = "0.1.37" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" ++dependencies = [ ++ "cfg-if", ++ "log", ++ "pin-project-lite", ++ "tracing-attributes", ++ "tracing-core", ++] ++ ++[[package]] ++name = "tracing-attributes" ++version = "0.1.23" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" ++dependencies = [ ++ "proc-macro2", ++ "quote", ++ "syn", ++] ++ ++[[package]] ++name = "tracing-core" ++version = "0.1.30" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" ++dependencies = [ ++ "once_cell", ++] ++ ++[[package]] ++name = "tracing-futures" ++version = "0.2.5" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" ++dependencies = [ ++ "pin-project", ++ "tracing", ++] ++ ++[[package]] ++name = "try-lock" ++version = "0.2.4" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" ++ ++[[package]] ++name = "unicode-ident" ++version = "1.0.6" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" ++ ++[[package]] ++name = "want" ++version = "0.3.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" ++dependencies = [ ++ "log", ++ "try-lock", ++] ++ ++[[package]] ++name = "wasi" ++version = "0.11.0+wasi-snapshot-preview1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" ++ ++[[package]] ++name = "which" ++version = "4.4.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" ++dependencies = [ ++ "either", ++ "libc", ++ "once_cell", ++] ++ ++[[package]] ++name = "winapi" ++version = "0.3.9" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" ++dependencies = [ ++ "winapi-i686-pc-windows-gnu", ++ "winapi-x86_64-pc-windows-gnu", ++] ++ ++[[package]] ++name = "winapi-i686-pc-windows-gnu" ++version = "0.4.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" ++ ++[[package]] ++name = "winapi-x86_64-pc-windows-gnu" ++version = "0.4.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" ++ ++[[package]] ++name = "windows-sys" ++version = "0.42.0" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" ++dependencies = [ ++ "windows_aarch64_gnullvm", ++ "windows_aarch64_msvc", ++ "windows_i686_gnu", ++ "windows_i686_msvc", ++ "windows_x86_64_gnu", ++ "windows_x86_64_gnullvm", ++ "windows_x86_64_msvc", ++] ++ ++[[package]] ++name = "windows_aarch64_gnullvm" ++version = "0.42.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" ++ ++[[package]] ++name = "windows_aarch64_msvc" ++version = "0.42.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" ++ ++[[package]] ++name = "windows_i686_gnu" ++version = "0.42.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" ++ ++[[package]] ++name = "windows_i686_msvc" ++version = "0.42.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" ++ ++[[package]] ++name = "windows_x86_64_gnu" ++version = "0.42.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" ++ ++[[package]] ++name = "windows_x86_64_gnullvm" ++version = "0.42.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" ++ ++[[package]] ++name = "windows_x86_64_msvc" ++version = "0.42.1" ++source = "registry+https://github.com/rust-lang/crates.io-index" ++checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" +diff --git a/examples/nauthz/Cargo.toml b/examples/nauthz/Cargo.toml +new file mode 100644 +index 0000000..70b6c32 +--- /dev/null ++++ b/examples/nauthz/Cargo.toml +@@ -0,0 +1,13 @@ ++[package] ++name = "nauthz-server" ++version = "0.1.0" ++edition = "2021" ++ ++[dependencies] ++# Common dependencies ++tokio = { version = "1.0", features = ["rt-multi-thread", "macros"] } ++prost = "0.11" ++tonic = "0.8.3" ++ ++[build-dependencies] ++tonic-build = { version="0.8.3", features = ["prost"] } +diff --git a/examples/nauthz/build.rs b/examples/nauthz/build.rs +new file mode 100644 +index 0000000..ada482b +--- /dev/null ++++ b/examples/nauthz/build.rs +@@ -0,0 +1,7 @@ ++fn main() -> Result<(), Box> { ++ tonic_build::configure() ++ .build_server(true) ++ .protoc_arg("--experimental_allow_proto3_optional") ++ .compile(&["../../proto/nauthz.proto"], &["../../proto"])?; ++ Ok(()) ++} +diff --git a/examples/nauthz/src/main.rs b/examples/nauthz/src/main.rs +new file mode 100644 +index 0000000..4778181 +--- /dev/null ++++ b/examples/nauthz/src/main.rs +@@ -0,0 +1,60 @@ ++use tonic::{transport::Server, Request, Response, Status}; ++ ++use nauthz_grpc::authorization_server::{Authorization, AuthorizationServer}; ++use nauthz_grpc::{Decision, EventReply, EventRequest}; ++ ++pub mod nauthz_grpc { ++ tonic::include_proto!("nauthz"); ++} ++ ++#[derive(Default)] ++pub struct EventAuthz { ++ allowed_kinds: Vec, ++} ++ ++#[tonic::async_trait] ++impl Authorization for EventAuthz { ++ async fn event_admit( ++ &self, ++ request: Request, ++ ) -> Result, Status> { ++ let reply; ++ let req = request.into_inner(); ++ let event = req.event.unwrap(); ++ let content_prefix: String = event.content.chars().take(40).collect(); ++ println!("recvd event, [kind={}, origin={:?}, nip05_domain={:?}, tag_count={}, content_sample={:?}]", ++ event.kind, req.origin, req.nip05.map(|x| x.domain), event.tags.len(), content_prefix); ++ // Permit any event with a whitelisted kind ++ if self.allowed_kinds.contains(&event.kind) { ++ println!("This looks fine! (kind={})", event.kind); ++ reply = nauthz_grpc::EventReply { ++ decision: Decision::Permit as i32, ++ message: None, ++ }; ++ } else { ++ println!("Blocked! (kind={})", event.kind); ++ reply = nauthz_grpc::EventReply { ++ decision: Decision::Deny as i32, ++ message: Some(format!("kind {} not permitted", event.kind)), ++ }; ++ } ++ Ok(Response::new(reply)) ++ } ++} ++ ++#[tokio::main] ++async fn main() -> Result<(), Box> { ++ let addr = "[::1]:50051".parse().unwrap(); ++ ++ // A simple authorization engine that allows kinds 0-3 ++ let checker = EventAuthz { ++ allowed_kinds: vec![0, 1, 2, 3], ++ }; ++ println!("EventAuthz Server listening on {}", addr); ++ // Start serving ++ Server::builder() ++ .add_service(AuthorizationServer::new(checker)) ++ .serve(addr) ++ .await?; ++ Ok(()) ++} +diff --git a/proto/nauthz.proto b/proto/nauthz.proto +new file mode 100644 +index 0000000..829b273 +--- /dev/null ++++ b/proto/nauthz.proto +@@ -0,0 +1,60 @@ ++syntax = "proto3"; ++ ++// Nostr Authorization Services ++package nauthz; ++ ++// Authorization for actions against a relay ++service Authorization { ++ // Determine if an event should be admitted to the relay ++ rpc EventAdmit(EventRequest) returns (EventReply) {} ++} ++ ++message Event { ++ bytes id = 1; // 32-byte SHA256 hash of serialized event ++ bytes pubkey = 2; // 32-byte public key of event creator ++ fixed64 created_at = 3; // UNIX timestamp provided by event creator ++ uint64 kind = 4; // event kind ++ string content = 5; // arbitrary event contents ++ repeated TagEntry tags = 6; // event tag array ++ bytes sig = 7; // 32-byte signature of the event id ++ // Individual values for a single tag ++ message TagEntry { ++ repeated string values = 1; ++ } ++} ++ ++// Event data and metadata for authorization decisions ++message EventRequest { ++ Event event = ++ 1; // the event to be admitted for further relay processing ++ optional string ip_addr = ++ 2; // IP address of the client that submitted the event ++ optional string origin = ++ 3; // HTTP origin header from the client, if one exists ++ optional string user_agent = ++ 4; // HTTP user-agent header from the client, if one exists ++ optional bytes auth_pubkey = ++ 5; // the public key associated with a NIP-42 AUTH'd session, if ++ // authentication occurred ++ optional Nip05Name nip05 = ++ 6; // NIP-05 address associated with the event pubkey, if it is ++ // known and has been validated by the relay ++ // A NIP_05 verification record ++ message Nip05Name { ++ string local = 1; ++ string domain = 2; ++ } ++} ++ ++// A permit or deny decision ++enum Decision { ++ DECISION_UNSPECIFIED = 0; ++ DECISION_PERMIT = 1; // Admit this event for further processing ++ DECISION_DENY = 2; // Deny persisting or propagating this event ++} ++ ++// Response to a event authorization request ++message EventReply { ++ Decision decision = 1; // decision to enforce ++ optional string message = 2; // informative message for the client ++} +diff --git a/rustfmt.toml b/rustfmt.toml +new file mode 100644 +index 0000000..01f4d94 +--- /dev/null ++++ b/rustfmt.toml +@@ -0,0 +1,4 @@ ++edition = "2021" ++#max_width = 140 ++#chain_width = 100 ++#fn_call_width = 100 +diff --git a/src/bin/bulkloader.rs b/src/bin/bulkloader.rs +new file mode 100644 +index 0000000..78ed335 +--- /dev/null ++++ b/src/bin/bulkloader.rs +@@ -0,0 +1,181 @@ ++use gnostr_relay::config; ++use gnostr_relay::error::{Error, Result}; ++use gnostr_relay::event::{single_char_tagname, Event}; ++use gnostr_relay::repo::sqlite::{build_pool, PooledConnection}; ++use gnostr_relay::repo::sqlite_migration::{curr_db_version, DB_VERSION}; ++use gnostr_relay::utils::is_lower_hex; ++use rusqlite::params; ++use rusqlite::{OpenFlags, Transaction}; ++use std::io; ++use std::path::Path; ++use std::sync::mpsc; ++use std::thread; ++use tracing::info; ++ ++/// Bulk load JSONL data from STDIN to the database specified in config.toml (or ./nostr.db as a default). ++/// The database must already exist, this will not create a new one. ++/// Tested against schema v13. ++ ++pub fn main() -> Result<()> { ++ let _trace_sub = tracing_subscriber::fmt::try_init(); ++ println!("Nostr-rs-relay Bulk Loader"); ++ // check for a database file, or create one. ++ let settings = config::Settings::new(&None)?; ++ if !Path::new(&settings.database.data_directory).is_dir() { ++ info!("Database directory does not exist"); ++ return Err(Error::DatabaseDirError); ++ } ++ // Get a database pool ++ let pool = build_pool( ++ "bulk-loader", ++ &settings, ++ OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE, ++ 1, ++ 4, ++ false, ++ ); ++ { ++ // check for database schema version ++ let mut conn: PooledConnection = pool.get()?; ++ let version = curr_db_version(&mut conn)?; ++ info!("current version is: {:?}", version); ++ // ensure the schema version is current. ++ if version != DB_VERSION { ++ info!("version is not current, exiting"); ++ panic!("cannot write to schema other than v{DB_VERSION}"); ++ } ++ } ++ // this channel will contain parsed events ready to be inserted ++ let (event_tx, event_rx) = mpsc::sync_channel(100_000); ++ // Thread for reading events ++ let _stdin_reader_handler = thread::spawn(move || { ++ let stdin = io::stdin(); ++ for readline in stdin.lines() { ++ if let Ok(line) = readline { ++ // try to parse a nostr event ++ let eres: Result = serde_json::from_str(&line); ++ if let Ok(mut e) = eres { ++ if let Ok(()) = e.validate() { ++ e.build_index(); ++ //debug!("Event: {:?}", e); ++ event_tx.send(Some(e)).ok(); ++ } else { ++ info!("could not validate event"); ++ } ++ } else { ++ info!("error reading event: {:?}", eres); ++ } ++ } else { ++ // error reading ++ info!("error reading: {:?}", readline); ++ } ++ } ++ info!("finished parsing events"); ++ event_tx.send(None).ok(); ++ let ok: Result<()> = Ok(()); ++ ok ++ }); ++ let mut conn: PooledConnection = pool.get()?; ++ let mut events_read = 0; ++ let event_batch_size = 50_000; ++ let mut new_events = 0; ++ let mut has_more_events = true; ++ while has_more_events { ++ // begin a transaction ++ let tx = conn.transaction()?; ++ // read in batch_size events and commit ++ for _ in 0..event_batch_size { ++ match event_rx.recv() { ++ Ok(Some(e)) => { ++ events_read += 1; ++ // ignore ephemeral events ++ if !(e.kind >= 20000 && e.kind < 30000) { ++ match write_event(&tx, e) { ++ Ok(c) => { ++ new_events += c; ++ } ++ Err(e) => { ++ info!("error inserting event: {:?}", e); ++ } ++ } ++ } ++ } ++ Ok(None) => { ++ // signal that the sender will never produce more ++ // events ++ has_more_events = false; ++ break; ++ } ++ Err(_) => { ++ info!("sender is closed"); ++ // sender is done ++ } ++ } ++ } ++ info!("committed {} events...", new_events); ++ tx.commit()?; ++ conn.execute_batch("pragma wal_checkpoint(truncate)")?; ++ } ++ info!("processed {} events", events_read); ++ info!("stored {} new events", new_events); ++ // get a connection for writing events ++ // read standard in. ++ info!("finished reading input"); ++ Ok(()) ++} ++ ++/// Write an event and update the tag table. ++/// Assumes the event has its index built. ++fn write_event(tx: &Transaction, e: Event) -> Result { ++ let id_blob = hex::decode(&e.id).ok(); ++ let pubkey_blob: Option> = hex::decode(&e.pubkey).ok(); ++ let delegator_blob: Option> = e.delegated_by.as_ref().and_then(|d| hex::decode(d).ok()); ++ let event_str = serde_json::to_string(&e).ok(); ++ // ignore if the event hash is a duplicate. ++ let ins_count = tx.execute( ++ "INSERT OR IGNORE INTO event (event_hash, created_at, kind, author, delegated_by, content, first_seen, hidden) VALUES (?1, ?2, ?3, ?4, ?5, ?6, strftime('%s','now'), FALSE);", ++ params![id_blob, e.created_at, e.kind, pubkey_blob, delegator_blob, event_str] ++ )?; ++ if ins_count == 0 { ++ return Ok(0); ++ } ++ // we want to capture the event_id that had the tag, the tag name, and the tag hex value. ++ let event_id = tx.last_insert_rowid(); ++ // look at each event, and each tag, creating new tag entries if appropriate. ++ for t in e.tags.iter().filter(|x| x.len() > 1) { ++ let tagname = t.get(0).unwrap(); ++ let tagnamechar_opt = single_char_tagname(tagname); ++ if tagnamechar_opt.is_none() { ++ continue; ++ } ++ // safe because len was > 1 ++ let tagval = t.get(1).unwrap(); ++ // insert as BLOB if we can restore it losslessly. ++ // this means it needs to be even length and lowercase. ++ if (tagval.len() % 2 == 0) && is_lower_hex(tagval) { ++ tx.execute( ++ "INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);", ++ params![event_id, tagname, hex::decode(tagval).ok()], ++ )?; ++ } else { ++ // otherwise, insert as text ++ tx.execute( ++ "INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);", ++ params![event_id, tagname, &tagval], ++ )?; ++ } ++ } ++ if e.is_replaceable() { ++ //let query = "SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1;"; ++ //let count: usize = tx.query_row(query, params![e.kind, pubkey_blob], |row| row.get(0))?; ++ //info!("found {} rows that /would/ be preserved", count); ++ match tx.execute( ++ "DELETE FROM event WHERE kind=? and author=? and id NOT IN (SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1);", ++ params![e.kind, pubkey_blob, e.kind, pubkey_blob], ++ ) { ++ Ok(_) => {}, ++ Err(x) => {info!("error deleting replaceable event: {:?}",x);} ++ } ++ } ++ Ok(ins_count) ++} +diff --git a/src/cli.rs b/src/cli.rs +new file mode 100644 +index 0000000..910016c +--- /dev/null ++++ b/src/cli.rs +@@ -0,0 +1,27 @@ ++use clap::Parser; ++ ++#[derive(Parser)] ++#[command(about = "gnostr: git+nostr rust relay", author = env!("CARGO_PKG_AUTHORS"), version = env!("CARGO_PKG_VERSION"))] ++pub struct CLIArgs { ++ #[arg( ++ short, ++ long, ++ help = "Use the as the location of the database", ++ required = false ++ )] ++ pub db: Option, ++ #[arg( ++ short, ++ long, ++ help = "Use the as the location of the config file", ++ required = false ++ )] ++ pub port: Option, ++ #[arg( ++ short, ++ long, ++ help = "Use the as the listening port ", ++ required = false ++ )] ++ pub config: Option, ++} +diff --git a/src/close.rs b/src/close.rs +new file mode 100644 +index 0000000..388c192 +--- /dev/null ++++ b/src/close.rs +@@ -0,0 +1,32 @@ ++//! Subscription close request parsing ++//! ++//! Representation and parsing of `CLOSE` messages sent from clients. ++use crate::error::{Error, Result}; ++use serde::{Deserialize, Serialize}; ++ ++/// Close command in network format ++#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] ++pub struct CloseCmd { ++ /// Protocol command, expected to always be "CLOSE". ++ cmd: String, ++ /// The subscription identifier being closed. ++ id: String, ++} ++ ++/// Identifier of the subscription to be closed. ++#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] ++pub struct Close { ++ /// The subscription identifier being closed. ++ pub id: String, ++} ++ ++impl From for Result { ++ fn from(cc: CloseCmd) -> Result { ++ // ensure command is correct ++ if cc.cmd == "CLOSE" { ++ Ok(Close { id: cc.id }) ++ } else { ++ Err(Error::CommandUnknownError) ++ } ++ } ++} +diff --git a/src/config.rs b/src/config.rs +new file mode 100644 +index 0000000..f2e8e13 +--- /dev/null ++++ b/src/config.rs +@@ -0,0 +1,352 @@ ++//! Configuration file and settings management ++use crate::payment::Processor; ++use config::{Config, ConfigError, File}; ++use serde::{Deserialize, Serialize}; ++use std::time::Duration; ++ ++#[derive(Debug, Serialize, Deserialize, Clone)] ++#[allow(unused)] ++pub struct Info { ++ pub relay_url: Option, ++ pub name: Option, ++ pub description: Option, ++ pub pubkey: Option, ++ pub contact: Option, ++ pub favicon: Option, ++ pub relay_icon: Option, ++} ++ ++#[derive(Debug, Clone, Serialize, Deserialize)] ++#[allow(unused)] ++pub struct Database { ++ pub data_directory: String, ++ pub engine: String, ++ pub in_memory: bool, ++ pub min_conn: u32, ++ pub max_conn: u32, ++ pub connection: String, ++ pub connection_write: Option, ++} ++ ++#[derive(Debug, Clone, Serialize, Deserialize)] ++#[allow(unused)] ++pub struct Grpc { ++ pub event_admission_server: Option, ++} ++ ++#[derive(Debug, Clone, Serialize, Deserialize)] ++#[allow(unused)] ++pub struct Network { ++ pub port: u16, ++ pub address: String, ++ pub remote_ip_header: Option, // retrieve client IP from this HTTP header if present ++ pub ping_interval_seconds: u32, ++} ++ ++#[derive(Debug, Clone, Serialize, Deserialize)] ++#[allow(unused)] ++pub struct Options { ++ pub reject_future_seconds: Option, // if defined, reject any events with a timestamp more than X seconds in the future ++} ++ ++#[derive(Debug, Clone, Serialize, Deserialize)] ++#[allow(unused)] ++pub struct Retention { ++ // TODO: implement ++ pub max_events: Option, // max events ++ pub max_bytes: Option, // max size ++ pub persist_days: Option, // oldest message ++ pub whitelist_addresses: Option>, // whitelisted addresses (never delete) ++} ++ ++#[derive(Debug, Clone, Serialize, Deserialize)] ++#[allow(unused)] ++pub struct Limits { ++ pub messages_per_sec: Option, // Artificially slow down event writing to limit disk consumption (averaged over 1 minute) ++ pub subscriptions_per_min: Option, // Artificially slow down request (db query) creation to prevent abuse (averaged over 1 minute) ++ pub db_conns_per_client: Option, // How many concurrent database queries (not subscriptions) may a client have? ++ pub max_blocking_threads: usize, ++ pub max_event_bytes: Option, // Maximum size of an EVENT message ++ pub max_ws_message_bytes: Option, ++ pub max_ws_frame_bytes: Option, ++ pub broadcast_buffer: usize, // events to buffer for subscribers (prevents slow readers from consuming memory) ++ pub event_persist_buffer: usize, // events to buffer for database commits (block senders if database writes are too slow) ++ pub event_kind_blacklist: Option>, ++ pub event_kind_allowlist: Option>, ++} ++ ++#[derive(Debug, Clone, Serialize, Deserialize)] ++#[allow(unused)] ++pub struct Authorization { ++ pub pubkey_whitelist: Option>, // If present, only allow these pubkeys to publish events ++ pub nip42_auth: bool, // if true enables NIP-42 authentication ++ pub nip42_dms: bool, // if true send DMs only to their authenticated recipients ++} ++ ++#[derive(Debug, Clone, Serialize, Deserialize)] ++#[allow(unused)] ++pub struct PayToRelay { ++ pub enabled: bool, ++ pub admission_cost: u64, // Cost to have pubkey whitelisted ++ pub cost_per_event: u64, // Cost author to pay per event ++ pub node_url: String, ++ pub api_secret: String, ++ pub terms_message: String, ++ pub sign_ups: bool, // allow new users to sign up to relay ++ pub direct_message: bool, // Send direct message to user with invoice and terms ++ pub secret_key: Option, ++ pub processor: Processor, ++} ++ ++#[derive(Debug, Clone, Serialize, Deserialize)] ++#[allow(unused)] ++pub struct Diagnostics { ++ pub tracing: bool, // enables tokio console-subscriber ++} ++ ++#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Copy)] ++#[serde(rename_all = "lowercase")] ++pub enum VerifiedUsersMode { ++ Enabled, ++ Passive, ++ Disabled, ++} ++ ++#[derive(Debug, Clone, Serialize, Deserialize)] ++#[allow(unused)] ++pub struct VerifiedUsers { ++ pub mode: VerifiedUsersMode, // Mode of operation: "enabled" (enforce) or "passive" (check only). If none, this is simply disabled. ++ pub domain_whitelist: Option>, // If present, only allow verified users from these domains can publish events ++ pub domain_blacklist: Option>, // If present, allow all verified users from any domain except these ++ pub verify_expiration: Option, // how long a verification is cached for before no longer being used ++ pub verify_update_frequency: Option, // how often to attempt to update verification ++ pub verify_expiration_duration: Option, // internal result of parsing verify_expiration ++ pub verify_update_frequency_duration: Option, // internal result of parsing verify_update_frequency ++ pub max_consecutive_failures: usize, // maximum number of verification failures in a row, before ceasing future checks ++} ++ ++impl VerifiedUsers { ++ pub fn init(&mut self) { ++ self.verify_expiration_duration = self.verify_expiration_duration(); ++ self.verify_update_frequency_duration = self.verify_update_duration(); ++ } ++ ++ #[must_use] ++ pub fn is_enabled(&self) -> bool { ++ self.mode == VerifiedUsersMode::Enabled ++ } ++ ++ #[must_use] ++ pub fn is_active(&self) -> bool { ++ self.mode == VerifiedUsersMode::Enabled || self.mode == VerifiedUsersMode::Passive ++ } ++ ++ #[must_use] ++ pub fn is_passive(&self) -> bool { ++ self.mode == VerifiedUsersMode::Passive ++ } ++ ++ #[must_use] ++ pub fn verify_expiration_duration(&self) -> Option { ++ self.verify_expiration ++ .as_ref() ++ .and_then(|x| parse_duration::parse(x).ok()) ++ } ++ ++ #[must_use] ++ pub fn verify_update_duration(&self) -> Option { ++ self.verify_update_frequency ++ .as_ref() ++ .and_then(|x| parse_duration::parse(x).ok()) ++ } ++ ++ #[must_use] ++ pub fn is_valid(&self) -> bool { ++ self.verify_expiration_duration().is_some() && self.verify_update_duration().is_some() ++ } ++} ++ ++#[derive(Debug, Clone, Serialize, Deserialize)] ++#[allow(unused)] ++pub struct Logging { ++ pub folder_path: Option, ++ pub file_prefix: Option, ++} ++ ++#[derive(Debug, Clone, Serialize, Deserialize)] ++#[allow(unused)] ++pub struct Settings { ++ pub info: Info, ++ pub diagnostics: Diagnostics, ++ pub database: Database, ++ pub grpc: Grpc, ++ pub network: Network, ++ pub limits: Limits, ++ pub authorization: Authorization, ++ pub pay_to_relay: PayToRelay, ++ pub verified_users: VerifiedUsers, ++ pub retention: Retention, ++ pub options: Options, ++ pub logging: Logging, ++} ++ ++impl Settings { ++ pub fn new(config_file_name: &Option) -> Result { ++ let default_settings = Self::default(); ++ // attempt to construct settings with file ++ let from_file = Self::new_from_default(&default_settings, config_file_name); ++ match from_file { ++ Err(e) => { ++ // pass up the parse error if the config file was specified, ++ // otherwise use the default config (with a warning). ++ if config_file_name.is_some() { ++ Err(e) ++ } else { ++ eprintln!("Error reading config file ({:?})", e); ++ eprintln!("WARNING: Default configuration settings will be used"); ++ Ok(default_settings) ++ } ++ } ++ ok => ok, ++ } ++ } ++ ++ fn new_from_default( ++ default: &Settings, ++ config_file_name: &Option, ++ ) -> Result { ++ let default_config_file_name = "config.toml".to_string(); ++ let config: &String = match config_file_name { ++ Some(value) => value, ++ None => &default_config_file_name, ++ }; ++ let builder = Config::builder(); ++ let config: Config = builder ++ // use defaults ++ .add_source(Config::try_from(default)?) ++ // override with file contents ++ .add_source(File::with_name(config)) ++ .build()?; ++ let mut settings: Settings = config.try_deserialize()?; ++ // ensure connection pool size is logical ++ assert!( ++ settings.database.min_conn <= settings.database.max_conn, ++ "Database min_conn setting ({}) cannot exceed max_conn ({})", ++ settings.database.min_conn, ++ settings.database.max_conn ++ ); ++ // ensure durations parse ++ assert!( ++ settings.verified_users.is_valid(), ++ "VerifiedUsers time settings could not be parsed" ++ ); ++ // initialize durations for verified users ++ settings.verified_users.init(); ++ ++ // Validate pay to relay settings ++ if settings.pay_to_relay.enabled { ++ assert_ne!(settings.pay_to_relay.api_secret, ""); ++ // Should check that url is valid ++ assert_ne!(settings.pay_to_relay.node_url, ""); ++ assert_ne!(settings.pay_to_relay.terms_message, ""); ++ ++ if settings.pay_to_relay.direct_message { ++ assert_ne!( ++ settings.pay_to_relay.secret_key, ++ Some("".to_string()) ++ ); ++ assert!(settings.pay_to_relay.secret_key.is_some()); ++ } ++ } ++ ++ Ok(settings) ++ } ++} ++ ++impl Default for Settings { ++ fn default() -> Self { ++ Settings { ++ info: Info { ++ relay_url: None, ++ name: Some("a gnostr relay".to_owned()), ++ description: None, ++ pubkey: None, ++ contact: None, ++ favicon: None, ++ relay_icon: None, ++ }, ++ diagnostics: Diagnostics { tracing: false }, ++ database: Database { ++ data_directory: ".".to_owned(), ++ engine: "sqlite".to_owned(), ++ in_memory: false, ++ min_conn: 4, ++ max_conn: 8, ++ connection: "".to_owned(), ++ connection_write: None, ++ }, ++ grpc: Grpc { ++ event_admission_server: None, ++ }, ++ network: Network { ++ port: 8080, ++ ping_interval_seconds: 300, ++ address: "0.0.0.0".to_owned(), ++ remote_ip_header: None, ++ }, ++ limits: Limits { ++ messages_per_sec: None, ++ subscriptions_per_min: None, ++ db_conns_per_client: None, ++ max_blocking_threads: 16, ++ max_event_bytes: Some(2 << 17), // 128K ++ max_ws_message_bytes: Some(2 << 17), // 128K ++ max_ws_frame_bytes: Some(2 << 17), // 128K ++ broadcast_buffer: 16384, ++ event_persist_buffer: 4096, ++ event_kind_blacklist: None, ++ event_kind_allowlist: None, ++ }, ++ authorization: Authorization { ++ pubkey_whitelist: None, // Allow any address to publish ++ nip42_auth: false, // Disable NIP-42 authentication ++ nip42_dms: false, // Send DMs to everybody ++ }, ++ pay_to_relay: PayToRelay { ++ enabled: false, ++ admission_cost: 4200, ++ cost_per_event: 0, ++ terms_message: "".to_string(), ++ node_url: "".to_string(), ++ api_secret: "".to_string(), ++ sign_ups: false, ++ direct_message: true, ++ secret_key: None, ++ processor: Processor::LNBits, ++ }, ++ verified_users: VerifiedUsers { ++ mode: VerifiedUsersMode::Disabled, ++ domain_whitelist: None, ++ domain_blacklist: None, ++ verify_expiration: Some("1 week".to_owned()), ++ verify_update_frequency: Some("1 day".to_owned()), ++ verify_expiration_duration: None, ++ verify_update_frequency_duration: None, ++ max_consecutive_failures: 20, ++ }, ++ retention: Retention { ++ max_events: None, // max events ++ max_bytes: None, // max size ++ persist_days: None, // oldest message ++ whitelist_addresses: None, // whitelisted addresses (never delete) ++ }, ++ options: Options { ++ reject_future_seconds: None, // Reject events in the future if defined ++ }, ++ logging: Logging { ++ folder_path: None, ++ file_prefix: None, ++ }, ++ } ++ } ++} +diff --git a/src/conn.rs b/src/conn.rs +new file mode 100644 +index 0000000..2640d53 +--- /dev/null ++++ b/src/conn.rs +@@ -0,0 +1,229 @@ ++//! Client connection state ++use std::collections::HashMap; ++ ++use tracing::{debug, trace}; ++use uuid::Uuid; ++ ++use crate::close::Close; ++use crate::conn::Nip42AuthState::{AuthPubkey, Challenge, NoAuth}; ++use crate::error::Error; ++use crate::error::Result; ++use crate::event::Event; ++use crate::subscription::Subscription; ++use crate::utils::{host_str, unix_time}; ++ ++/// A subscription identifier has a maximum length ++const MAX_SUBSCRIPTION_ID_LEN: usize = 256; ++ ++/// NIP-42 authentication state ++pub enum Nip42AuthState { ++ /// The client is not authenticated yet ++ NoAuth, ++ /// The AUTH challenge sent ++ Challenge(String), ++ /// The client is authenticated ++ AuthPubkey(String), ++} ++ ++/// State for a client connection ++pub struct ClientConn { ++ /// Client IP (either from socket, or configured proxy header ++ client_ip_addr: String, ++ /// Unique client identifier generated at connection time ++ client_id: Uuid, ++ /// The current set of active client subscriptions ++ subscriptions: HashMap, ++ /// Per-connection maximum concurrent subscriptions ++ max_subs: usize, ++ /// NIP-42 AUTH ++ auth: Nip42AuthState, ++} ++ ++impl Default for ClientConn { ++ fn default() -> Self { ++ Self::new("unknown".to_owned()) ++ } ++} ++ ++impl ClientConn { ++ /// Create a new, empty connection state. ++ #[must_use] ++ pub fn new(client_ip_addr: String) -> Self { ++ let client_id = Uuid::new_v4(); ++ ClientConn { ++ client_ip_addr, ++ client_id, ++ subscriptions: HashMap::new(), ++ max_subs: 32, ++ auth: NoAuth, ++ } ++ } ++ ++ #[must_use] ++ pub fn subscriptions(&self) -> &HashMap { ++ &self.subscriptions ++ } ++ ++ /// Check if the given subscription already exists ++ #[must_use] ++ pub fn has_subscription(&self, sub: &Subscription) -> bool { ++ self.subscriptions.values().any(|x| x == sub) ++ } ++ ++ /// Get a short prefix of the client's unique identifier, suitable ++ /// for logging. ++ #[must_use] ++ pub fn get_client_prefix(&self) -> String { ++ self.client_id.to_string().chars().take(8).collect() ++ } ++ ++ #[must_use] ++ pub fn ip(&self) -> &str { ++ &self.client_ip_addr ++ } ++ ++ #[must_use] ++ pub fn auth_pubkey(&self) -> Option<&String> { ++ match &self.auth { ++ AuthPubkey(pubkey) => Some(pubkey), ++ _ => None, ++ } ++ } ++ ++ #[must_use] ++ pub fn auth_challenge(&self) -> Option<&String> { ++ match &self.auth { ++ Challenge(pubkey) => Some(pubkey), ++ _ => None, ++ } ++ } ++ ++ /// Add a new subscription for this connection. ++ /// # Errors ++ /// ++ /// Will return `Err` if the client has too many subscriptions, or ++ /// if the provided name is excessively long. ++ pub fn subscribe(&mut self, s: Subscription) -> Result<()> { ++ let k = s.get_id(); ++ let sub_id_len = k.len(); ++ // prevent arbitrarily long subscription identifiers from ++ // being used. ++ if sub_id_len > MAX_SUBSCRIPTION_ID_LEN { ++ debug!( ++ "ignoring sub request with excessive length: ({})", ++ sub_id_len ++ ); ++ return Err(Error::SubIdMaxLengthError); ++ } ++ // check if an existing subscription exists, and replace if so ++ if self.subscriptions.contains_key(&k) { ++ self.subscriptions.remove(&k); ++ self.subscriptions.insert(k, s.clone()); ++ trace!( ++ "replaced existing subscription (cid: {}, sub: {:?})", ++ self.get_client_prefix(), ++ s.get_id() ++ ); ++ return Ok(()); ++ } ++ ++ // check if there is room for another subscription. ++ if self.subscriptions.len() >= self.max_subs { ++ return Err(Error::SubMaxExceededError); ++ } ++ // add subscription ++ self.subscriptions.insert(k, s); ++ trace!( ++ "registered new subscription, currently have {} active subs (cid: {})", ++ self.subscriptions.len(), ++ self.get_client_prefix(), ++ ); ++ Ok(()) ++ } ++ ++ /// Remove the subscription for this connection. ++ pub fn unsubscribe(&mut self, c: &Close) { ++ // TODO: return notice if subscription did not exist. ++ self.subscriptions.remove(&c.id); ++ trace!( ++ "removed subscription, currently have {} active subs (cid: {})", ++ self.subscriptions.len(), ++ self.get_client_prefix(), ++ ); ++ } ++ ++ pub fn generate_auth_challenge(&mut self) { ++ self.auth = Challenge(Uuid::new_v4().to_string()); ++ } ++ ++ pub fn authenticate(&mut self, event: &Event, relay_url: &str) -> Result<()> { ++ match &self.auth { ++ Challenge(_) => (), ++ AuthPubkey(_) => { ++ // already authenticated ++ return Ok(()); ++ } ++ NoAuth => { ++ // unexpected AUTH request ++ return Err(Error::AuthFailure); ++ } ++ } ++ match event.validate() { ++ Ok(_) => { ++ if event.kind != 22242 { ++ return Err(Error::AuthFailure); ++ } ++ ++ let curr_time = unix_time(); ++ let past_cutoff = curr_time - 600; // 10 minutes ++ let future_cutoff = curr_time + 600; // 10 minutes ++ if event.created_at < past_cutoff || event.created_at > future_cutoff { ++ return Err(Error::AuthFailure); ++ } ++ ++ let mut challenge: Option<&str> = None; ++ let mut relay: Option<&str> = None; ++ ++ for tag in &event.tags { ++ if tag.len() == 2 && tag.get(0) == Some(&"challenge".into()) { ++ challenge = tag.get(1).map(|x| x.as_str()); ++ } ++ if tag.len() == 2 && tag.get(0) == Some(&"relay".into()) { ++ relay = tag.get(1).map(|x| x.as_str()); ++ } ++ } ++ ++ match (challenge, &self.auth) { ++ (Some(received_challenge), Challenge(sent_challenge)) => { ++ if received_challenge != sent_challenge { ++ return Err(Error::AuthFailure); ++ } ++ } ++ (_, _) => { ++ return Err(Error::AuthFailure); ++ } ++ } ++ ++ match (relay.and_then(host_str), host_str(relay_url)) { ++ (Some(received_relay), Some(our_relay)) => { ++ if received_relay != our_relay { ++ return Err(Error::AuthFailure); ++ } ++ } ++ (_, _) => { ++ return Err(Error::AuthFailure); ++ } ++ } ++ ++ self.auth = AuthPubkey(event.pubkey.clone()); ++ trace!( ++ "authenticated pubkey {} (cid: {})", ++ event.pubkey.chars().take(8).collect::(), ++ self.get_client_prefix() ++ ); ++ Ok(()) ++ } ++ Err(_) => Err(Error::AuthFailure), ++ } ++ } ++} +diff --git a/src/db.rs b/src/db.rs +new file mode 100644 +index 0000000..b7db56a +--- /dev/null ++++ b/src/db.rs +@@ -0,0 +1,478 @@ ++//! Event persistence and querying ++use crate::config::Settings; ++use crate::error::{Error, Result}; ++use crate::event::Event; ++use crate::nauthz; ++use crate::notice::Notice; ++use crate::payment::PaymentMessage; ++use crate::repo::postgres::{PostgresPool, PostgresRepo}; ++use crate::repo::sqlite::SqliteRepo; ++use crate::repo::NostrRepo; ++use crate::server::NostrMetrics; ++use governor::clock::Clock; ++use governor::{Quota, RateLimiter}; ++use log::LevelFilter; ++use nostr::key::FromPkStr; ++use nostr::key::Keys; ++use r2d2; ++use sqlx::pool::PoolOptions; ++use sqlx::postgres::PgConnectOptions; ++use sqlx::ConnectOptions; ++use std::sync::Arc; ++use std::thread; ++use std::time::{Duration, Instant}; ++use tracing::{debug, info, trace, warn}; ++ ++pub type SqlitePool = r2d2::Pool; ++pub type PooledConnection = r2d2::PooledConnection; ++ ++/// Events submitted from a client, with a return channel for notices ++pub struct SubmittedEvent { ++ pub event: Event, ++ pub notice_tx: tokio::sync::mpsc::Sender, ++ pub source_ip: String, ++ pub origin: Option, ++ pub user_agent: Option, ++ pub auth_pubkey: Option>, ++} ++ ++/// Database file ++pub const DB_FILE: &str = "nostr.db"; ++ ++/// Build repo ++/// # Panics ++/// ++/// Will panic if the pool could not be created. ++pub async fn build_repo(settings: &Settings, metrics: NostrMetrics) -> Arc { ++ match settings.database.engine.as_str() { ++ "sqlite" => Arc::new(build_sqlite_pool(settings, metrics).await), ++ "postgres" => Arc::new(build_postgres_pool(settings, metrics).await), ++ _ => panic!("Unknown database engine"), ++ } ++} ++ ++async fn build_sqlite_pool(settings: &Settings, metrics: NostrMetrics) -> SqliteRepo { ++ let repo = SqliteRepo::new(settings, metrics); ++ repo.start().await.ok(); ++ repo.migrate_up().await.ok(); ++ repo ++} ++ ++async fn build_postgres_pool(settings: &Settings, metrics: NostrMetrics) -> PostgresRepo { ++ let mut options: PgConnectOptions = settings.database.connection.as_str().parse().unwrap(); ++ options.log_statements(LevelFilter::Debug); ++ options.log_slow_statements(LevelFilter::Warn, Duration::from_secs(60)); ++ ++ let pool: PostgresPool = PoolOptions::new() ++ .max_connections(settings.database.max_conn) ++ .min_connections(settings.database.min_conn) ++ .idle_timeout(Duration::from_secs(60)) ++ .connect_with(options) ++ .await ++ .unwrap(); ++ ++ let write_pool: PostgresPool = match &settings.database.connection_write { ++ Some(cfg_write) => { ++ let mut options_write: PgConnectOptions = cfg_write.as_str().parse().unwrap(); ++ options_write.log_statements(LevelFilter::Debug); ++ options_write.log_slow_statements(LevelFilter::Warn, Duration::from_secs(60)); ++ ++ PoolOptions::new() ++ .max_connections(settings.database.max_conn) ++ .min_connections(settings.database.min_conn) ++ .idle_timeout(Duration::from_secs(60)) ++ .connect_with(options_write) ++ .await ++ .unwrap() ++ } ++ None => pool.clone(), ++ }; ++ ++ let repo = PostgresRepo::new(pool, write_pool, metrics); ++ ++ // Panic on migration failure ++ let version = repo.migrate_up().await.unwrap(); ++ info!("Postgres migration completed, at v{}", version); ++ // startup scheduled tasks ++ repo.start().await.ok(); ++ repo ++} ++ ++/// Spawn a database writer that persists events to the `SQLite` store. ++pub async fn db_writer( ++ repo: Arc, ++ settings: Settings, ++ mut event_rx: tokio::sync::mpsc::Receiver, ++ bcast_tx: tokio::sync::broadcast::Sender, ++ metadata_tx: tokio::sync::broadcast::Sender, ++ payment_tx: tokio::sync::broadcast::Sender, ++ mut shutdown: tokio::sync::broadcast::Receiver<()>, ++) -> Result<()> { ++ // are we performing NIP-05 checking? ++ let nip05_active = settings.verified_users.is_active(); ++ // are we requriing NIP-05 user verification? ++ let nip05_enabled = settings.verified_users.is_enabled(); ++ ++ let pay_to_relay_enabled = settings.pay_to_relay.enabled; ++ let cost_per_event = settings.pay_to_relay.cost_per_event; ++ debug!("Pay to relay: {}", pay_to_relay_enabled); ++ ++ //upgrade_db(&mut pool.get()?)?; ++ ++ // Make a copy of the whitelist ++ let whitelist = &settings.authorization.pubkey_whitelist.clone(); ++ ++ // get rate limit settings ++ let rps_setting = settings.limits.messages_per_sec; ++ let mut most_recent_rate_limit = Instant::now(); ++ let mut lim_opt = None; ++ let clock = governor::clock::QuantaClock::default(); ++ if let Some(rps) = rps_setting { ++ if rps > 0 { ++ info!("Enabling rate limits for event creation ({}/sec)", rps); ++ let quota = core::num::NonZeroU32::new(rps * 60).unwrap(); ++ lim_opt = Some(RateLimiter::direct(Quota::per_minute(quota))); ++ } ++ } ++ // create a client if GRPC is enabled. ++ // Check with externalized event admitter service, if one is defined. ++ let mut grpc_client = if let Some(svr) = settings.grpc.event_admission_server { ++ Some(nauthz::EventAuthzService::connect(&svr).await) ++ } else { ++ None ++ }; ++ ++ //let gprc_client = settings.grpc.event_admission_server.map(|s| { ++ // event_admitter_connect(&s); ++ // }); ++ ++ loop { ++ if shutdown.try_recv().is_ok() { ++ info!("shutting down database writer"); ++ break; ++ } ++ // call blocking read on channel ++ let next_event = event_rx.recv().await; ++ // if the channel has closed, we will never get work ++ if next_event.is_none() { ++ break; ++ } ++ // track if an event write occurred; this is used to ++ // update the rate limiter ++ let mut event_write = false; ++ let subm_event = next_event.unwrap(); ++ let event = subm_event.event; ++ let notice_tx = subm_event.notice_tx; ++ ++ // Check that event kind isn't blacklisted ++ let kinds_blacklist = &settings.limits.event_kind_blacklist.clone(); ++ if let Some(event_kind_blacklist) = kinds_blacklist { ++ if event_kind_blacklist.contains(&event.kind) { ++ debug!( ++ "rejecting event: {}, blacklisted kind: {}", ++ &event.get_event_id_prefix(), ++ &event.kind ++ ); ++ notice_tx ++ .try_send(Notice::blocked(event.id, "event kind is blocked by relay")) ++ .ok(); ++ continue; ++ } ++ } ++ ++ // Check that event kind isn't allowlisted ++ let kinds_allowlist = &settings.limits.event_kind_allowlist.clone(); ++ if let Some(event_kind_allowlist) = kinds_allowlist { ++ if !event_kind_allowlist.contains(&event.kind) { ++ debug!( ++ "rejecting event: {}, allowlist kind: {}", ++ &event.get_event_id_prefix(), ++ &event.kind ++ ); ++ notice_tx ++ .try_send(Notice::blocked(event.id, "event kind is blocked by relay")) ++ .ok(); ++ continue; ++ } ++ } ++ ++ // Set to none until balance is got from db ++ // Will stay none if user in whitelisted and does not have to pay to post ++ // When pay to relay is enabled the whitelist is not a list of who can post ++ // It is a list of who can post for free ++ let mut user_balance: Option = None; ++ if !pay_to_relay_enabled { ++ // check if this event is authorized. ++ if let Some(allowed_addrs) = whitelist { ++ // TODO: incorporate delegated pubkeys ++ // if the event address is not in allowed_addrs. ++ if !allowed_addrs.contains(&event.pubkey) { ++ debug!( ++ "rejecting event: {}, unauthorized author", ++ event.get_event_id_prefix() ++ ); ++ notice_tx ++ .try_send(Notice::blocked( ++ event.id, ++ "pubkey is not allowed to publish to this relay", ++ )) ++ .ok(); ++ continue; ++ } ++ } ++ } else { ++ // If the user is on whitelist there is no need to check if the user is admitted or has balance to post ++ if whitelist.is_none() ++ || (whitelist.is_some() && !whitelist.as_ref().unwrap().contains(&event.pubkey)) ++ { ++ let key = Keys::from_pk_str(&event.pubkey).unwrap(); ++ match repo.get_account_balance(&key).await { ++ Ok((user_admitted, balance)) => { ++ // Checks to make sure user is admitted ++ if !user_admitted { ++ debug!("user: {}, is not admitted", &event.pubkey); ++ ++ // If the user is in DB but not admitted ++ // Send meeage to payment thread to check if outstanding invoice has been paid ++ payment_tx ++ .send(PaymentMessage::CheckAccount(event.pubkey)) ++ .ok(); ++ notice_tx ++ .try_send(Notice::blocked(event.id, "User is not admitted")) ++ .ok(); ++ continue; ++ } ++ ++ // Checks that user has enough balance to post ++ // TODO: this should send an invoice to user to top up ++ if balance < cost_per_event { ++ debug!("user: {}, does not have a balance", &event.pubkey,); ++ notice_tx ++ .try_send(Notice::blocked(event.id, "Insufficient balance")) ++ .ok(); ++ continue; ++ } ++ user_balance = Some(balance); ++ debug!("User balance: {:?}", user_balance); ++ } ++ Err( ++ Error::SqlError(rusqlite::Error::QueryReturnedNoRows) ++ | Error::SqlxError(sqlx::Error::RowNotFound), ++ ) => { ++ // User does not exist ++ info!("Unregistered user"); ++ if settings.pay_to_relay.sign_ups { ++ payment_tx ++ .send(PaymentMessage::NewAccount(event.pubkey)) ++ .ok(); ++ } ++ let msg = "Pubkey not registered"; ++ notice_tx.try_send(Notice::error(event.id, msg)).ok(); ++ continue; ++ } ++ Err(err) => { ++ warn!("Error checking admission status: {:?}", err); ++ let msg = "relay experienced an error checking your admission status"; ++ notice_tx.try_send(Notice::error(event.id, msg)).ok(); ++ // Other error ++ continue; ++ } ++ } ++ } ++ } ++ ++ // send any metadata events to the NIP-05 verifier ++ if nip05_active && event.is_kind_metadata() { ++ // we are sending this prior to even deciding if we ++ // persist it. this allows the nip05 module to ++ // inspect it, update if necessary, or persist a new ++ // event and broadcast it itself. ++ metadata_tx.send(event.clone()).ok(); ++ } ++ ++ // get a validation result for use in verification and GPRC ++ let validation = if nip05_active { ++ Some(repo.get_latest_user_verification(&event.pubkey).await) ++ } else { ++ None ++ }; ++ ++ // check for NIP-05 verification ++ if nip05_enabled && validation.is_some() { ++ match validation.as_ref().unwrap() { ++ Ok(uv) => { ++ if uv.is_valid(&settings.verified_users) { ++ info!( ++ "new event from verified author ({:?},{:?})", ++ uv.name.to_string(), ++ event.get_author_prefix() ++ ); ++ } else { ++ info!( ++ "rejecting event, author ({:?} / {:?}) verification invalid (expired/wrong domain)", ++ uv.name.to_string(), ++ event.get_author_prefix() ++ ); ++ notice_tx ++ .try_send(Notice::blocked( ++ event.id, ++ "NIP-05 verification is no longer valid (expired/wrong domain)", ++ )) ++ .ok(); ++ continue; ++ } ++ } ++ Err( ++ Error::SqlError(rusqlite::Error::QueryReturnedNoRows) ++ | Error::SqlxError(sqlx::Error::RowNotFound), ++ ) => { ++ debug!( ++ "no verification records found for pubkey: {:?}", ++ event.get_author_prefix() ++ ); ++ notice_tx ++ .try_send(Notice::blocked( ++ event.id, ++ "NIP-05 verification needed to publish events", ++ )) ++ .ok(); ++ continue; ++ } ++ Err(e) => { ++ warn!("checking nip05 verification status failed: {:?}", e); ++ continue; ++ } ++ } ++ } ++ ++ // nip05 address ++ let nip05_address: Option = ++ validation.and_then(|x| x.ok().map(|y| y.name)); ++ ++ // GRPC check ++ if let Some(ref mut c) = grpc_client { ++ trace!("checking if grpc permits"); ++ let grpc_start = Instant::now(); ++ let decision_res = c ++ .admit_event( ++ &event, ++ &subm_event.source_ip, ++ subm_event.origin, ++ subm_event.user_agent, ++ nip05_address, ++ subm_event.auth_pubkey, ++ ) ++ .await; ++ match decision_res { ++ Ok(decision) => { ++ if !decision.permitted() { ++ // GPRC returned a decision to reject this event ++ info!( ++ "GRPC rejected event: {:?} (kind: {}) from: {:?} in: {:?} (IP: {:?})", ++ event.get_event_id_prefix(), ++ event.kind, ++ event.get_author_prefix(), ++ grpc_start.elapsed(), ++ subm_event.source_ip ++ ); ++ notice_tx ++ .try_send(Notice::blocked( ++ event.id, ++ &decision.message().unwrap_or_default(), ++ )) ++ .ok(); ++ continue; ++ } ++ } ++ Err(e) => { ++ warn!("GRPC server error: {:?}", e); ++ } ++ } ++ } ++ ++ // TODO: cache recent list of authors to remove a DB call. ++ let start = Instant::now(); ++ if event.is_ephemeral() { ++ bcast_tx.send(event.clone()).ok(); ++ debug!( ++ "published ephemeral event: {:?} from: {:?} in: {:?}", ++ event.get_event_id_prefix(), ++ event.get_author_prefix(), ++ start.elapsed() ++ ); ++ event_write = true; ++ } else { ++ match repo.write_event(&event).await { ++ Ok(updated) => { ++ if updated == 0 { ++ trace!("ignoring duplicate or deleted event"); ++ notice_tx.try_send(Notice::duplicate(event.id)).ok(); ++ } else { ++ info!( ++ "persisted event: {:?} (kind: {}) from: {:?} in: {:?} (IP: {:?})", ++ event.get_event_id_prefix(), ++ event.kind, ++ event.get_author_prefix(), ++ start.elapsed(), ++ subm_event.source_ip, ++ ); ++ event_write = true; ++ // send this out to all clients ++ bcast_tx.send(event.clone()).ok(); ++ notice_tx.try_send(Notice::saved(event.id)).ok(); ++ } ++ } ++ Err(err) => { ++ warn!("event insert failed: {:?}", err); ++ let msg = "relay experienced an error trying to publish the latest event"; ++ notice_tx.try_send(Notice::error(event.id, msg)).ok(); ++ } ++ } ++ } ++ ++ // use rate limit, if defined, and if an event was actually written. ++ if event_write { ++ // If pay to relay is diabaled or the cost per event is 0 ++ // No need to update user balance ++ if pay_to_relay_enabled && cost_per_event > 0 { ++ // If the user balance is some, user was not on whitelist ++ // Their balance should be reduced by the cost per event ++ if let Some(_balance) = user_balance { ++ let pubkey = Keys::from_pk_str(&event.pubkey)?; ++ repo.update_account_balance(&pubkey, false, cost_per_event) ++ .await?; ++ } ++ } ++ if let Some(ref lim) = lim_opt { ++ if let Err(n) = lim.check() { ++ let wait_for = n.wait_time_from(clock.now()); ++ // check if we have recently logged rate ++ // limits, but print out a message only once ++ // per second. ++ if most_recent_rate_limit.elapsed().as_secs() > 10 { ++ warn!( ++ "rate limit reached for event creation (sleep for {:?}) (suppressing future messages for 10 seconds)", ++ wait_for ++ ); ++ // reset last rate limit message ++ most_recent_rate_limit = Instant::now(); ++ } ++ // block event writes, allowing them to queue up ++ thread::sleep(wait_for); ++ continue; ++ } ++ } ++ } ++ } ++ info!("database connection closed"); ++ Ok(()) ++} ++ ++/// Serialized event associated with a specific subscription request. ++#[derive(PartialEq, Eq, Debug, Clone)] ++pub struct QueryResult { ++ /// Subscription identifier ++ pub sub_id: String, ++ /// Serialized event ++ pub event: String, ++} +diff --git a/src/delegation.rs b/src/delegation.rs +new file mode 100644 +index 0000000..cf7bd4e +--- /dev/null ++++ b/src/delegation.rs +@@ -0,0 +1,406 @@ ++//! Event parsing and validation ++use crate::error::Error; ++use crate::error::Result; ++use crate::event::Event; ++use bitcoin_hashes::{sha256, Hash}; ++use lazy_static::lazy_static; ++use regex::Regex; ++use secp256k1::{schnorr, Secp256k1, VerifyOnly, XOnlyPublicKey}; ++use serde::{Deserialize, Serialize}; ++use std::str::FromStr; ++use tracing::{debug, info}; ++ ++// This handles everything related to delegation, in particular the ++// condition/rune parsing and logic. ++ ++// Conditions are poorly specified, so we will implement the minimum ++// necessary for now. ++ ++// fields MUST be either "kind" or "created_at". ++// operators supported are ">", "<", "=", "!". ++// no operations on 'content' are supported. ++ ++// this allows constraints for: ++// valid date ranges (valid from X->Y dates). ++// specific kinds (publish kind=1,5) ++// kind ranges (publish ephemeral events, kind>19999&kind<30001) ++ ++// for more complex scenarios (allow delegatee to publish ephemeral ++// AND replacement events), it may be necessary to generate and use ++// different condition strings, since we do not support grouping or ++// "OR" logic. ++ ++lazy_static! { ++ /// Secp256k1 verification instance. ++ pub static ref SECP: Secp256k1 = Secp256k1::verification_only(); ++} ++ ++#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] ++pub enum Field { ++ Kind, ++ CreatedAt, ++} ++ ++impl FromStr for Field { ++ type Err = Error; ++ fn from_str(value: &str) -> Result { ++ if value == "kind" { ++ Ok(Field::Kind) ++ } else if value == "created_at" { ++ Ok(Field::CreatedAt) ++ } else { ++ Err(Error::DelegationParseError) ++ } ++ } ++} ++ ++#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] ++pub enum Operator { ++ LessThan, ++ GreaterThan, ++ Equals, ++ NotEquals, ++} ++impl FromStr for Operator { ++ type Err = Error; ++ fn from_str(value: &str) -> Result { ++ if value == "<" { ++ Ok(Operator::LessThan) ++ } else if value == ">" { ++ Ok(Operator::GreaterThan) ++ } else if value == "=" { ++ Ok(Operator::Equals) ++ } else if value == "!" { ++ Ok(Operator::NotEquals) ++ } else { ++ Err(Error::DelegationParseError) ++ } ++ } ++} ++ ++#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] ++pub struct ConditionQuery { ++ pub conditions: Vec, ++} ++ ++impl ConditionQuery { ++ #[must_use] ++ pub fn allows_event(&self, event: &Event) -> bool { ++ // check each condition, to ensure that the event complies ++ // with the restriction. ++ for c in &self.conditions { ++ if !c.allows_event(event) { ++ // any failing conditions invalidates the delegation ++ // on this event ++ return false; ++ } ++ } ++ // delegation was permitted unconditionally, or all conditions ++ // were true ++ true ++ } ++} ++ ++// Verify that the delegator approved the delegation; return a ConditionQuery if so. ++#[must_use] ++pub fn validate_delegation( ++ delegator: &str, ++ delegatee: &str, ++ cond_query: &str, ++ sigstr: &str, ++) -> Option { ++ // form the token ++ let tok = format!("nostr:delegation:{delegatee}:{cond_query}"); ++ // form SHA256 hash ++ let digest: sha256::Hash = sha256::Hash::hash(tok.as_bytes()); ++ let sig = schnorr::Signature::from_str(sigstr).unwrap(); ++ if let Ok(msg) = secp256k1::Message::from_slice(digest.as_ref()) { ++ if let Ok(pubkey) = XOnlyPublicKey::from_str(delegator) { ++ let verify = SECP.verify_schnorr(&sig, &msg, &pubkey); ++ if verify.is_ok() { ++ // return the parsed condition query ++ cond_query.parse::().ok() ++ } else { ++ debug!("client sent an delegation signature that did not validate"); ++ None ++ } ++ } else { ++ debug!("client sent malformed delegation pubkey"); ++ None ++ } ++ } else { ++ info!("error converting delegation digest to secp256k1 message"); ++ None ++ } ++} ++ ++/// Parsed delegation condition ++/// see ++/// An example complex condition would be: `kind=1,2,3&created_at<1665265999` ++#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] ++pub struct Condition { ++ pub field: Field, ++ pub operator: Operator, ++ pub values: Vec, ++} ++ ++impl Condition { ++ /// Check if this condition allows the given event to be delegated ++ #[must_use] ++ pub fn allows_event(&self, event: &Event) -> bool { ++ // determine what the right-hand side of the operator is ++ let resolved_field = match &self.field { ++ Field::Kind => event.kind, ++ Field::CreatedAt => event.created_at, ++ }; ++ match &self.operator { ++ Operator::LessThan => { ++ // the less-than operator is only valid for single values. ++ if self.values.len() == 1 { ++ if let Some(v) = self.values.first() { ++ return resolved_field < *v; ++ } ++ } ++ } ++ Operator::GreaterThan => { ++ // the greater-than operator is only valid for single values. ++ if self.values.len() == 1 { ++ if let Some(v) = self.values.first() { ++ return resolved_field > *v; ++ } ++ } ++ } ++ Operator::Equals => { ++ // equals is interpreted as "must be equal to at least one provided value" ++ return self.values.iter().any(|&x| resolved_field == x); ++ } ++ Operator::NotEquals => { ++ // not-equals is interpreted as "must not be equal to any provided value" ++ // this is the one case where an empty list of values could be allowed; even though it is a pointless restriction. ++ return self.values.iter().all(|&x| resolved_field != x); ++ } ++ } ++ false ++ } ++} ++ ++fn str_to_condition(cs: &str) -> Option { ++ // a condition is a string (alphanum+underscore), an operator (<>=!), and values (num+comma) ++ lazy_static! { ++ static ref RE: Regex = Regex::new("([[:word:]]+)([<>=!]+)([,[[:digit:]]]*)").unwrap(); ++ } ++ // match against the regex ++ let caps = RE.captures(cs)?; ++ let field = caps.get(1)?.as_str().parse::().ok()?; ++ let operator = caps.get(2)?.as_str().parse::().ok()?; ++ // values are just comma separated numbers, but all must be parsed ++ let rawvals = caps.get(3)?.as_str(); ++ let values = rawvals ++ .split_terminator(',') ++ .map(|n| n.parse::().ok()) ++ .collect::>>()?; ++ // convert field string into Field ++ Some(Condition { ++ field, ++ operator, ++ values, ++ }) ++} ++ ++/// Parse a condition query from a string slice ++impl FromStr for ConditionQuery { ++ type Err = Error; ++ fn from_str(value: &str) -> Result { ++ // split the string with '&' ++ let mut conditions = vec![]; ++ let condstrs = value.split_terminator('&'); ++ // parse each individual condition ++ for c in condstrs { ++ conditions.push(str_to_condition(c).ok_or(Error::DelegationParseError)?); ++ } ++ Ok(ConditionQuery { conditions }) ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use super::*; ++ ++ // parse condition strings ++ #[test] ++ fn parse_empty() -> Result<()> { ++ // given an empty condition query, produce an empty vector ++ let empty_cq = ConditionQuery { conditions: vec![] }; ++ let parsed = "".parse::()?; ++ assert_eq!(parsed, empty_cq); ++ Ok(()) ++ } ++ ++ // parse field 'kind' ++ #[test] ++ fn test_kind_field_parse() -> Result<()> { ++ let field = "kind".parse::()?; ++ assert_eq!(field, Field::Kind); ++ Ok(()) ++ } ++ // parse field 'created_at' ++ #[test] ++ fn test_created_at_field_parse() -> Result<()> { ++ let field = "created_at".parse::()?; ++ assert_eq!(field, Field::CreatedAt); ++ Ok(()) ++ } ++ // parse unknown field ++ #[test] ++ fn unknown_field_parse() { ++ let field = "unk".parse::(); ++ assert!(field.is_err()); ++ } ++ ++ // parse a full conditional query with an empty array ++ #[test] ++ fn parse_kind_equals_empty() -> Result<()> { ++ // given an empty condition query, produce an empty vector ++ let kind_cq = ConditionQuery { ++ conditions: vec![Condition { ++ field: Field::Kind, ++ operator: Operator::Equals, ++ values: vec![], ++ }], ++ }; ++ let parsed = "kind=".parse::()?; ++ assert_eq!(parsed, kind_cq); ++ Ok(()) ++ } ++ // parse a full conditional query with a single value ++ #[test] ++ fn parse_kind_equals_singleval() -> Result<()> { ++ // given an empty condition query, produce an empty vector ++ let kind_cq = ConditionQuery { ++ conditions: vec![Condition { ++ field: Field::Kind, ++ operator: Operator::Equals, ++ values: vec![1], ++ }], ++ }; ++ let parsed = "kind=1".parse::()?; ++ assert_eq!(parsed, kind_cq); ++ Ok(()) ++ } ++ // parse a full conditional query with multiple values ++ #[test] ++ fn parse_kind_equals_multival() -> Result<()> { ++ // given an empty condition query, produce an empty vector ++ let kind_cq = ConditionQuery { ++ conditions: vec![Condition { ++ field: Field::Kind, ++ operator: Operator::Equals, ++ values: vec![1, 2, 4], ++ }], ++ }; ++ let parsed = "kind=1,2,4".parse::()?; ++ assert_eq!(parsed, kind_cq); ++ Ok(()) ++ } ++ // parse multiple conditions ++ #[test] ++ fn parse_multi_conditions() -> Result<()> { ++ // given an empty condition query, produce an empty vector ++ let cq = ConditionQuery { ++ conditions: vec![ ++ Condition { ++ field: Field::Kind, ++ operator: Operator::GreaterThan, ++ values: vec![10000], ++ }, ++ Condition { ++ field: Field::Kind, ++ operator: Operator::LessThan, ++ values: vec![20000], ++ }, ++ Condition { ++ field: Field::Kind, ++ operator: Operator::NotEquals, ++ values: vec![10001], ++ }, ++ Condition { ++ field: Field::CreatedAt, ++ operator: Operator::LessThan, ++ values: vec![1_665_867_123], ++ }, ++ ], ++ }; ++ let parsed = ++ "kind>10000&kind<20000&kind!10001&created_at<1665867123".parse::()?; ++ assert_eq!(parsed, cq); ++ Ok(()) ++ } ++ // Check for condition logic on event w/ empty values ++ #[test] ++ fn condition_with_empty_values() { ++ let mut c = Condition { ++ field: Field::Kind, ++ operator: Operator::GreaterThan, ++ values: vec![], ++ }; ++ let e = Event::simple_event(); ++ assert!(!c.allows_event(&e)); ++ c.operator = Operator::LessThan; ++ assert!(!c.allows_event(&e)); ++ c.operator = Operator::Equals; ++ assert!(!c.allows_event(&e)); ++ // Not Equals applied to an empty list *is* allowed ++ // (pointless, but logically valid). ++ c.operator = Operator::NotEquals; ++ assert!(c.allows_event(&e)); ++ } ++ ++ // Check for condition logic on event w/ single value ++ #[test] ++ fn condition_kind_gt_event_single() { ++ let c = Condition { ++ field: Field::Kind, ++ operator: Operator::GreaterThan, ++ values: vec![10], ++ }; ++ let mut e = Event::simple_event(); ++ // kind is not greater than 10, not allowed ++ e.kind = 1; ++ assert!(!c.allows_event(&e)); ++ // kind is greater than 10, allowed ++ e.kind = 100; ++ assert!(c.allows_event(&e)); ++ // kind is 10, not allowed ++ e.kind = 10; ++ assert!(!c.allows_event(&e)); ++ } ++ // Check for condition logic on event w/ multi values ++ #[test] ++ fn condition_with_multi_values() { ++ let mut c = Condition { ++ field: Field::Kind, ++ operator: Operator::Equals, ++ values: vec![0, 10, 20], ++ }; ++ let mut e = Event::simple_event(); ++ // Allow if event kind is in list for Equals ++ e.kind = 10; ++ assert!(c.allows_event(&e)); ++ // Deny if event kind is not in list for Equals ++ e.kind = 11; ++ assert!(!c.allows_event(&e)); ++ // Deny if event kind is in list for NotEquals ++ e.kind = 10; ++ c.operator = Operator::NotEquals; ++ assert!(!c.allows_event(&e)); ++ // Allow if event kind is not in list for NotEquals ++ e.kind = 99; ++ c.operator = Operator::NotEquals; ++ assert!(c.allows_event(&e)); ++ // Always deny if GreaterThan/LessThan for a list ++ c.operator = Operator::LessThan; ++ assert!(!c.allows_event(&e)); ++ c.operator = Operator::GreaterThan; ++ assert!(!c.allows_event(&e)); ++ } ++} +diff --git a/src/error.rs b/src/error.rs +new file mode 100644 +index 0000000..ecfa97f +--- /dev/null ++++ b/src/error.rs +@@ -0,0 +1,192 @@ ++//! Error handling ++use std::result; ++use thiserror::Error; ++use tungstenite::error::Error as WsError; ++ ++/// Simple `Result` type for errors in this module ++pub type Result = result::Result; ++ ++/// Custom error type for Nostr ++#[derive(Error, Debug)] ++pub enum Error { ++ #[error("Protocol parse error")] ++ ProtoParseError, ++ #[error("Connection error")] ++ ConnError, ++ #[error("Client write error")] ++ ConnWriteError, ++ #[error("EVENT parse failed")] ++ EventParseFailed, ++ #[error("CLOSE message parse failed")] ++ CloseParseFailed, ++ #[error("Event invalid signature")] ++ EventInvalidSignature, ++ #[error("Event invalid id")] ++ EventInvalidId, ++ #[error("Event malformed pubkey")] ++ EventMalformedPubkey, ++ #[error("Event could not canonicalize")] ++ EventCouldNotCanonicalize, ++ #[error("Event too large")] ++ EventMaxLengthError(usize), ++ #[error("Subscription identifier max length exceeded")] ++ SubIdMaxLengthError, ++ #[error("Maximum concurrent subscription count reached")] ++ SubMaxExceededError, ++ // this should be used if the JSON is invalid ++ #[error("JSON parsing failed")] ++ JsonParseFailed(serde_json::Error), ++ #[error("WebSocket proto error")] ++ WebsocketError(WsError), ++ #[error("Command unknown")] ++ CommandUnknownError, ++ #[error("SQL error")] ++ SqlError(rusqlite::Error), ++ #[error("Config error")] ++ ConfigError(config::ConfigError), ++ #[error("Data directory does not exist")] ++ DatabaseDirError, ++ #[error("Database Connection Pool Error")] ++ DatabasePoolError(r2d2::Error), ++ #[error("SQL error")] ++ SqlxError(sqlx::Error), ++ #[error("Database Connection Pool Error")] ++ SqlxDatabasePoolError(sqlx::Error), ++ #[error("Custom Error : {0}")] ++ CustomError(String), ++ #[error("Task join error")] ++ JoinError, ++ #[error("Hyper Client error")] ++ HyperError(hyper::Error), ++ #[error("Hex encoding error")] ++ HexError(hex::FromHexError), ++ #[error("Delegation parse error")] ++ DelegationParseError, ++ #[error("Channel closed error")] ++ ChannelClosed, ++ #[error("Authz error")] ++ AuthzError, ++ #[error("Tonic GRPC error")] ++ TonicError(tonic::Status), ++ #[error("Invalid AUTH message")] ++ AuthFailure, ++ #[error("I/O Error")] ++ IoError(std::io::Error), ++ #[error("Event builder error")] ++ EventError(nostr::event::builder::Error), ++ #[error("Nostr key error")] ++ NostrKeyError(nostr::key::Error), ++ #[error("Payment hash mismatch")] ++ PaymentHash, ++ #[error("Error parsing url")] ++ URLParseError(url::ParseError), ++ #[error("HTTP error")] ++ HTTPError(http::Error), ++ #[error("Unknown/Undocumented")] ++ UnknownError, ++} ++ ++//impl From> for Error { ++// fn from(e: Box) -> Self { ++// Error::CustomError("error".to_owned()) ++// } ++//} ++ ++impl From for Error { ++ fn from(h: hex::FromHexError) -> Self { ++ Error::HexError(h) ++ } ++} ++ ++impl From for Error { ++ fn from(h: hyper::Error) -> Self { ++ Error::HyperError(h) ++ } ++} ++ ++impl From for Error { ++ fn from(d: r2d2::Error) -> Self { ++ Error::DatabasePoolError(d) ++ } ++} ++ ++impl From for Error { ++ /// Wrap SQL error ++ fn from(_j: tokio::task::JoinError) -> Self { ++ Error::JoinError ++ } ++} ++ ++impl From for Error { ++ /// Wrap SQL error ++ fn from(r: rusqlite::Error) -> Self { ++ Error::SqlError(r) ++ } ++} ++ ++impl From for Error { ++ fn from(d: sqlx::Error) -> Self { ++ Error::SqlxDatabasePoolError(d) ++ } ++} ++ ++impl From for Error { ++ /// Wrap JSON error ++ fn from(r: serde_json::Error) -> Self { ++ Error::JsonParseFailed(r) ++ } ++} ++ ++impl From for Error { ++ /// Wrap Websocket error ++ fn from(r: WsError) -> Self { ++ Error::WebsocketError(r) ++ } ++} ++ ++impl From for Error { ++ /// Wrap Config error ++ fn from(r: config::ConfigError) -> Self { ++ Error::ConfigError(r) ++ } ++} ++ ++impl From for Error { ++ /// Wrap Config error ++ fn from(r: tonic::Status) -> Self { ++ Error::TonicError(r) ++ } ++} ++ ++impl From for Error { ++ fn from(r: std::io::Error) -> Self { ++ Error::IoError(r) ++ } ++} ++impl From for Error { ++ /// Wrap event builder error ++ fn from(r: nostr::event::builder::Error) -> Self { ++ Error::EventError(r) ++ } ++} ++ ++impl From for Error { ++ /// Wrap nostr key error ++ fn from(r: nostr::key::Error) -> Self { ++ Error::NostrKeyError(r) ++ } ++} ++ ++impl From for Error { ++ /// Wrap nostr key error ++ fn from(r: url::ParseError) -> Self { ++ Error::URLParseError(r) ++ } ++} ++ ++impl From for Error { ++ /// Wrap nostr key error ++ fn from(r: http::Error) -> Self { ++ Error::HTTPError(r) ++ } ++} +diff --git a/src/event.rs b/src/event.rs +new file mode 100644 +index 0000000..5fb3542 +--- /dev/null ++++ b/src/event.rs +@@ -0,0 +1,794 @@ ++//! Event parsing and validation ++use crate::delegation::validate_delegation; ++use crate::error::Error::{ ++ CommandUnknownError, EventCouldNotCanonicalize, EventInvalidId, EventInvalidSignature, ++ EventMalformedPubkey, ++}; ++use crate::error::Result; ++use crate::event::EventWrapper::WrappedAuth; ++use crate::event::EventWrapper::WrappedEvent; ++use crate::nip05; ++use crate::utils::unix_time; ++use bitcoin_hashes::{sha256, Hash}; ++use lazy_static::lazy_static; ++use secp256k1::{schnorr, Secp256k1, VerifyOnly, XOnlyPublicKey}; ++use serde::{Deserialize, Deserializer, Serialize}; ++use serde_json::value::Value; ++use serde_json::Number; ++use std::collections::HashMap; ++use std::collections::HashSet; ++use std::str::FromStr; ++use tracing::{debug, info}; ++ ++lazy_static! { ++ /// Secp256k1 verification instance. ++ pub static ref SECP: Secp256k1 = Secp256k1::verification_only(); ++} ++ ++/// Event command in network format. ++#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] ++pub struct EventCmd { ++ cmd: String, // expecting static "EVENT" ++ event: Event, ++} ++ ++impl EventCmd { ++ #[must_use] ++ pub fn event_id(&self) -> &str { ++ &self.event.id ++ } ++} ++ ++/// Parsed nostr event. ++#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] ++pub struct Event { ++ pub id: String, ++ pub pubkey: String, ++ #[serde(skip)] ++ pub delegated_by: Option, ++ pub created_at: u64, ++ pub kind: u64, ++ #[serde(deserialize_with = "tag_from_string")] ++ // NOTE: array-of-arrays may need to be more general than a string container ++ pub tags: Vec>, ++ pub content: String, ++ pub sig: String, ++ // Optimization for tag search, built on demand. ++ #[serde(skip)] ++ pub tagidx: Option>>, ++} ++ ++/// Simple tag type for array of array of strings. ++type Tag = Vec>; ++ ++/// Deserializer that ensures we always have a [`Tag`]. ++fn tag_from_string<'de, D>(deserializer: D) -> Result ++where ++ D: Deserializer<'de>, ++{ ++ let opt = Option::deserialize(deserializer)?; ++ Ok(opt.unwrap_or_default()) ++} ++ ++/// Attempt to form a single-char tag name. ++#[must_use] ++pub fn single_char_tagname(tagname: &str) -> Option { ++ // We return the tag character if and only if the tagname consists ++ // of a single char. ++ let mut tagnamechars = tagname.chars(); ++ let firstchar = tagnamechars.next(); ++ match firstchar { ++ Some(_) => { ++ // check second char ++ if tagnamechars.next().is_none() { ++ firstchar ++ } else { ++ None ++ } ++ } ++ None => None, ++ } ++} ++ ++pub enum EventWrapper { ++ WrappedEvent(Event), ++ WrappedAuth(Event), ++} ++ ++/// Convert network event to parsed/validated event. ++impl From for Result { ++ fn from(ec: EventCmd) -> Result { ++ // ensure command is correct ++ if ec.cmd == "EVENT" { ++ ec.event.validate().map(|_| { ++ let mut e = ec.event; ++ e.build_index(); ++ e.update_delegation(); ++ WrappedEvent(e) ++ }) ++ } else if ec.cmd == "AUTH" { ++ // we don't want to validate the event here, because NIP-42 can be disabled ++ // it will be validated later during the authentication process ++ Ok(WrappedAuth(ec.event)) ++ } else { ++ Err(CommandUnknownError) ++ } ++ } ++} ++ ++impl Event { ++ #[cfg(test)] ++ #[must_use] ++ pub fn simple_event() -> Event { ++ Event { ++ id: "0".to_owned(), ++ pubkey: "0".to_owned(), ++ delegated_by: None, ++ created_at: 0, ++ kind: 0, ++ tags: vec![], ++ content: "".to_owned(), ++ sig: "0".to_owned(), ++ tagidx: None, ++ } ++ } ++ ++ #[must_use] ++ pub fn is_kind_metadata(&self) -> bool { ++ self.kind == 0 ++ } ++ ++ /// Should this event be persisted? ++ #[must_use] ++ pub fn is_ephemeral(&self) -> bool { ++ self.kind >= 20000 && self.kind < 30000 ++ } ++ ++ /// Is this event currently expired? ++ pub fn is_expired(&self) -> bool { ++ if let Some(exp) = self.expiration() { ++ exp <= unix_time() ++ } else { ++ false ++ } ++ } ++ ++ /// Determine the time at which this event should expire ++ pub fn expiration(&self) -> Option { ++ let default = "".to_string(); ++ let dvals: Vec<&String> = self ++ .tags ++ .iter() ++ .filter(|x| !x.is_empty()) ++ .filter(|x| x.get(0).unwrap() == "expiration") ++ .map(|x| x.get(1).unwrap_or(&default)) ++ .take(1) ++ .collect(); ++ let val_first = dvals.get(0); ++ val_first.and_then(|t| t.parse::().ok()) ++ } ++ ++ /// Should this event be replaced with newer timestamps from same author? ++ #[must_use] ++ pub fn is_replaceable(&self) -> bool { ++ self.kind == 0 ++ || self.kind == 3 ++ || self.kind == 41 ++ || (self.kind >= 10000 && self.kind < 20000) ++ } ++ ++ /// Should this event be replaced with newer timestamps from same author, for distinct `d` tag values? ++ #[must_use] ++ pub fn is_param_replaceable(&self) -> bool { ++ self.kind >= 30000 && self.kind < 40000 ++ } ++ ++ /// Should this event be replaced with newer timestamps from same author, for distinct `d` tag values? ++ #[must_use] ++ pub fn distinct_param(&self) -> Option { ++ if self.is_param_replaceable() { ++ let default = "".to_string(); ++ let dvals: Vec<&String> = self ++ .tags ++ .iter() ++ .filter(|x| !x.is_empty()) ++ .filter(|x| x.get(0).unwrap() == "d") ++ .map(|x| x.get(1).unwrap_or(&default)) ++ .take(1) ++ .collect(); ++ let dval_first = dvals.get(0); ++ match dval_first { ++ Some(_) => dval_first.map(|x| x.to_string()), ++ None => Some(default), ++ } ++ } else { ++ None ++ } ++ } ++ ++ /// Pull a NIP-05 Name out of the event, if one exists ++ #[must_use] ++ pub fn get_nip05_addr(&self) -> Option { ++ if self.is_kind_metadata() { ++ // very quick check if we should attempt to parse this json ++ if self.content.contains("\"nip05\"") { ++ // Parse into JSON ++ let md_parsed: Value = serde_json::from_str(&self.content).ok()?; ++ let md_map = md_parsed.as_object()?; ++ let nip05_str = md_map.get("nip05")?.as_str()?; ++ return nip05::Nip05Name::try_from(nip05_str).ok(); ++ } ++ } ++ None ++ } ++ ++ // is this event delegated (properly)? ++ // does the signature match, and are conditions valid? ++ // if so, return an alternate author for the event ++ #[must_use] ++ pub fn delegated_author(&self) -> Option { ++ // is there a delegation tag? ++ let delegation_tag: Vec = self ++ .tags ++ .iter() ++ .filter(|x| x.len() == 4) ++ .filter(|x| x.get(0).unwrap() == "delegation") ++ .take(1) ++ .next()? ++ .clone(); // get first tag ++ ++ //let delegation_tag = self.tag_values_by_name("delegation"); ++ // delegation tags should have exactly 3 elements after the name (pubkey, condition, sig) ++ // the event is signed by the delagatee ++ let delegatee = &self.pubkey; ++ // the delegation tag references the claimed delagator ++ let delegator: &str = delegation_tag.get(1)?; ++ let querystr: &str = delegation_tag.get(2)?; ++ let sig: &str = delegation_tag.get(3)?; ++ ++ // attempt to get a condition query; this requires the delegation to have a valid signature. ++ if let Some(cond_query) = validate_delegation(delegator, delegatee, querystr, sig) { ++ // The signature was valid, now we ensure the delegation ++ // condition is valid for this event: ++ if cond_query.allows_event(self) { ++ // since this is allowed, we will provide the delegatee ++ Some(delegator.into()) ++ } else { ++ debug!("an event failed to satisfy delegation conditions"); ++ None ++ } ++ } else { ++ debug!("event had had invalid delegation signature"); ++ None ++ } ++ } ++ ++ /// Update delegation status ++ pub fn update_delegation(&mut self) { ++ self.delegated_by = self.delegated_author(); ++ } ++ /// Build an event tag index ++ pub fn build_index(&mut self) { ++ // if there are no tags; just leave the index as None ++ if self.tags.is_empty() { ++ return; ++ } ++ // otherwise, build an index ++ let mut idx: HashMap> = HashMap::new(); ++ // iterate over tags that have at least 2 elements ++ for t in self.tags.iter().filter(|x| x.len() > 1) { ++ let tagname = t.get(0).unwrap(); ++ let tagnamechar_opt = single_char_tagname(tagname); ++ if tagnamechar_opt.is_none() { ++ continue; ++ } ++ let tagnamechar = tagnamechar_opt.unwrap(); ++ let tagval = t.get(1).unwrap(); ++ // ensure a vector exists for this tag ++ idx.entry(tagnamechar).or_insert_with(HashSet::new); ++ // get the tag vec and insert entry ++ let idx_tag_vec = idx.get_mut(&tagnamechar).expect("could not get tag vector"); ++ idx_tag_vec.insert(tagval.clone()); ++ } ++ // save the tag structure ++ self.tagidx = Some(idx); ++ } ++ ++ /// Create a short event identifier, suitable for logging. ++ #[must_use] ++ pub fn get_event_id_prefix(&self) -> String { ++ self.id.chars().take(8).collect() ++ } ++ #[must_use] ++ pub fn get_author_prefix(&self) -> String { ++ self.pubkey.chars().take(8).collect() ++ } ++ ++ /// Retrieve tag initial values across all tags matching the name ++ #[must_use] ++ pub fn tag_values_by_name(&self, tag_name: &str) -> Vec { ++ self.tags ++ .iter() ++ .filter(|x| x.len() > 1) ++ .filter(|x| x.get(0).unwrap() == tag_name) ++ .map(|x| x.get(1).unwrap().clone()) ++ .collect() ++ } ++ ++ #[must_use] ++ pub fn is_valid_timestamp(&self, reject_future_seconds: Option) -> bool { ++ if let Some(allowable_future) = reject_future_seconds { ++ let curr_time = unix_time(); ++ // calculate difference, plus how far future we allow ++ if curr_time + (allowable_future as u64) < self.created_at { ++ let delta = self.created_at - curr_time; ++ debug!( ++ "event is too far in the future ({} seconds), rejecting", ++ delta ++ ); ++ return false; ++ } ++ } ++ true ++ } ++ ++ /// Check if this event has a valid signature. ++ pub fn validate(&self) -> Result<()> { ++ // TODO: return a Result with a reason for invalid events ++ // validation is performed by: ++ // * parsing JSON string into event fields ++ // * create an array: ++ // ** [0, pubkey-hex-string, created-at-num, kind-num, tags-array-of-arrays, content-string] ++ // * serialize with no spaces/newlines ++ let c_opt = self.to_canonical(); ++ if c_opt.is_none() { ++ debug!("could not canonicalize"); ++ return Err(EventCouldNotCanonicalize); ++ } ++ let c = c_opt.unwrap(); ++ // * compute the sha256sum. ++ let digest: sha256::Hash = sha256::Hash::hash(c.as_bytes()); ++ let hex_digest = format!("{digest:x}"); ++ // * ensure the id matches the computed sha256sum. ++ if self.id != hex_digest { ++ debug!("event id does not match digest"); ++ return Err(EventInvalidId); ++ } ++ // * validate the message digest (sig) using the pubkey & computed sha256 message hash. ++ let sig = schnorr::Signature::from_str(&self.sig).map_err(|_| EventInvalidSignature)?; ++ if let Ok(msg) = secp256k1::Message::from_slice(digest.as_ref()) { ++ if let Ok(pubkey) = XOnlyPublicKey::from_str(&self.pubkey) { ++ SECP.verify_schnorr(&sig, &msg, &pubkey) ++ .map_err(|_| EventInvalidSignature) ++ } else { ++ debug!("client sent malformed pubkey"); ++ Err(EventMalformedPubkey) ++ } ++ } else { ++ info!("error converting digest to secp256k1 message"); ++ Err(EventInvalidSignature) ++ } ++ } ++ ++ /// Convert event to canonical representation for signing. ++ pub fn to_canonical(&self) -> Option { ++ // create a JsonValue for each event element ++ let mut c: Vec = vec![]; ++ // id must be set to 0 ++ let id = Number::from(0_u64); ++ c.push(serde_json::Value::Number(id)); ++ // public key ++ c.push(Value::String(self.pubkey.clone())); ++ // creation time ++ let created_at = Number::from(self.created_at); ++ c.push(serde_json::Value::Number(created_at)); ++ // kind ++ let kind = Number::from(self.kind); ++ c.push(serde_json::Value::Number(kind)); ++ // tags ++ c.push(self.tags_to_canonical()); ++ // content ++ c.push(Value::String(self.content.clone())); ++ serde_json::to_string(&Value::Array(c)).ok() ++ } ++ ++ /// Convert tags to a canonical form for signing. ++ fn tags_to_canonical(&self) -> Value { ++ let mut tags = Vec::::new(); ++ // iterate over self tags, ++ for t in &self.tags { ++ // each tag is a vec of strings ++ let mut a = Vec::::new(); ++ for v in t.iter() { ++ a.push(serde_json::Value::String(v.clone())); ++ } ++ tags.push(serde_json::Value::Array(a)); ++ } ++ serde_json::Value::Array(tags) ++ } ++ ++ /// Determine if the given tag and value set intersect with tags in this event. ++ #[must_use] ++ pub fn generic_tag_val_intersect(&self, tagname: char, check: &HashSet) -> bool { ++ match &self.tagidx { ++ // check if this is indexable tagname ++ Some(idx) => match idx.get(&tagname) { ++ Some(valset) => { ++ let common = valset.intersection(check); ++ common.count() > 0 ++ } ++ None => false, ++ }, ++ None => false, ++ } ++ } ++} ++ ++impl From for Event { ++ fn from(nostr_event: nostr::Event) -> Self { ++ Event { ++ id: nostr_event.id.to_hex(), ++ pubkey: nostr_event.pubkey.to_string(), ++ created_at: nostr_event.created_at.as_u64(), ++ kind: nostr_event.kind.as_u64(), ++ tags: nostr_event.tags.iter().map(|x| x.as_vec()).collect(), ++ content: nostr_event.content, ++ sig: nostr_event.sig.to_string(), ++ delegated_by: None, ++ tagidx: None, ++ } ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use super::*; ++ ++ #[test] ++ fn event_creation() { ++ // create an event ++ let event = Event::simple_event(); ++ assert_eq!(event.id, "0"); ++ } ++ ++ #[test] ++ fn event_serialize() -> Result<()> { ++ // serialize an event to JSON string ++ let event = Event::simple_event(); ++ let j = serde_json::to_string(&event)?; ++ assert_eq!(j, "{\"id\":\"0\",\"pubkey\":\"0\",\"created_at\":0,\"kind\":0,\"tags\":[],\"content\":\"\",\"sig\":\"0\"}"); ++ Ok(()) ++ } ++ ++ #[test] ++ fn empty_event_tag_match() { ++ let event = Event::simple_event(); ++ assert!(!event ++ .generic_tag_val_intersect('e', &HashSet::from(["foo".to_owned(), "bar".to_owned()]))); ++ } ++ ++ #[test] ++ fn single_event_tag_match() { ++ let mut event = Event::simple_event(); ++ event.tags = vec![vec!["e".to_owned(), "foo".to_owned()]]; ++ event.build_index(); ++ assert!( ++ event.generic_tag_val_intersect( ++ 'e', ++ &HashSet::from(["foo".to_owned(), "bar".to_owned()]) ++ ) ++ ); ++ } ++ ++ #[test] ++ fn event_tags_serialize() -> Result<()> { ++ // serialize an event with tags to JSON string ++ let mut event = Event::simple_event(); ++ event.tags = vec![ ++ vec![ ++ "e".to_owned(), ++ "xxxx".to_owned(), ++ "wss://example.com".to_owned(), ++ ], ++ vec![ ++ "p".to_owned(), ++ "yyyyy".to_owned(), ++ "wss://example.com:3033".to_owned(), ++ ], ++ ]; ++ let j = serde_json::to_string(&event)?; ++ assert_eq!(j, "{\"id\":\"0\",\"pubkey\":\"0\",\"created_at\":0,\"kind\":0,\"tags\":[[\"e\",\"xxxx\",\"wss://example.com\"],[\"p\",\"yyyyy\",\"wss://example.com:3033\"]],\"content\":\"\",\"sig\":\"0\"}"); ++ Ok(()) ++ } ++ ++ #[test] ++ fn event_deserialize() -> Result<()> { ++ let raw_json = r#"{"id":"1384757da583e6129ce831c3d7afc775a33a090578f888dd0d010328ad047d0c","pubkey":"bbbd9711d357df4f4e498841fd796535c95c8e751fa35355008a911c41265fca","created_at":1612650459,"kind":1,"tags":null,"content":"hello world","sig":"59d0cc47ab566e81f72fe5f430bcfb9b3c688cb0093d1e6daa49201c00d28ecc3651468b7938642869ed98c0f1b262998e49a05a6ed056c0d92b193f4e93bc21"}"#; ++ let e: Event = serde_json::from_str(raw_json)?; ++ assert_eq!(e.kind, 1); ++ assert_eq!(e.tags.len(), 0); ++ Ok(()) ++ } ++ ++ #[test] ++ fn event_canonical() { ++ let e = Event { ++ id: "999".to_owned(), ++ pubkey: "012345".to_owned(), ++ delegated_by: None, ++ created_at: 501_234, ++ kind: 1, ++ tags: vec![], ++ content: "this is a test".to_owned(), ++ sig: "abcde".to_owned(), ++ tagidx: None, ++ }; ++ let c = e.to_canonical(); ++ let expected = Some(r#"[0,"012345",501234,1,[],"this is a test"]"#.to_owned()); ++ assert_eq!(c, expected); ++ } ++ ++ #[test] ++ fn event_tag_select() { ++ let e = Event { ++ id: "999".to_owned(), ++ pubkey: "012345".to_owned(), ++ delegated_by: None, ++ created_at: 501_234, ++ kind: 1, ++ tags: vec![ ++ vec!["j".to_owned(), "abc".to_owned()], ++ vec!["e".to_owned(), "foo".to_owned()], ++ vec!["e".to_owned(), "bar".to_owned()], ++ vec!["e".to_owned(), "baz".to_owned()], ++ vec![ ++ "p".to_owned(), ++ "aaaa".to_owned(), ++ "ws://example.com".to_owned(), ++ ], ++ ], ++ content: "this is a test".to_owned(), ++ sig: "abcde".to_owned(), ++ tagidx: None, ++ }; ++ let v = e.tag_values_by_name("e"); ++ assert_eq!(v, vec!["foo", "bar", "baz"]); ++ } ++ ++ #[test] ++ fn event_no_tag_select() { ++ let e = Event { ++ id: "999".to_owned(), ++ pubkey: "012345".to_owned(), ++ delegated_by: None, ++ created_at: 501_234, ++ kind: 1, ++ tags: vec![ ++ vec!["j".to_owned(), "abc".to_owned()], ++ vec!["e".to_owned(), "foo".to_owned()], ++ vec!["e".to_owned(), "baz".to_owned()], ++ vec![ ++ "p".to_owned(), ++ "aaaa".to_owned(), ++ "ws://example.com".to_owned(), ++ ], ++ ], ++ content: "this is a test".to_owned(), ++ sig: "abcde".to_owned(), ++ tagidx: None, ++ }; ++ let v = e.tag_values_by_name("x"); ++ // asking for tags that don't exist just returns zero-length vector ++ assert_eq!(v.len(), 0); ++ } ++ ++ #[test] ++ fn event_canonical_with_tags() { ++ let e = Event { ++ id: "999".to_owned(), ++ pubkey: "012345".to_owned(), ++ delegated_by: None, ++ created_at: 501_234, ++ kind: 1, ++ tags: vec![ ++ vec!["#e".to_owned(), "aoeu".to_owned()], ++ vec![ ++ "#p".to_owned(), ++ "aaaa".to_owned(), ++ "ws://example.com".to_owned(), ++ ], ++ ], ++ content: "this is a test".to_owned(), ++ sig: "abcde".to_owned(), ++ tagidx: None, ++ }; ++ let c = e.to_canonical(); ++ let expected_json = r###"[0,"012345",501234,1,[["#e","aoeu"],["#p","aaaa","ws://example.com"]],"this is a test"]"###; ++ let expected = Some(expected_json.to_owned()); ++ assert_eq!(c, expected); ++ } ++ ++ #[test] ++ fn ephemeral_event() { ++ let mut event = Event::simple_event(); ++ event.kind = 20000; ++ assert!(event.is_ephemeral()); ++ event.kind = 29999; ++ assert!(event.is_ephemeral()); ++ event.kind = 30000; ++ assert!(!event.is_ephemeral()); ++ event.kind = 19999; ++ assert!(!event.is_ephemeral()); ++ } ++ ++ #[test] ++ fn replaceable_event() { ++ let mut event = Event::simple_event(); ++ event.kind = 0; ++ assert!(event.is_replaceable()); ++ event.kind = 3; ++ assert!(event.is_replaceable()); ++ event.kind = 10000; ++ assert!(event.is_replaceable()); ++ event.kind = 19999; ++ assert!(event.is_replaceable()); ++ event.kind = 20000; ++ assert!(!event.is_replaceable()); ++ } ++ ++ #[test] ++ fn param_replaceable_event() { ++ let mut event = Event::simple_event(); ++ event.kind = 30000; ++ assert!(event.is_param_replaceable()); ++ event.kind = 39999; ++ assert!(event.is_param_replaceable()); ++ event.kind = 29999; ++ assert!(!event.is_param_replaceable()); ++ event.kind = 40000; ++ assert!(!event.is_param_replaceable()); ++ } ++ ++ #[test] ++ fn param_replaceable_value_case_1() { ++ // NIP case #1: "tags":[["d",""]] ++ let mut event = Event::simple_event(); ++ event.kind = 30000; ++ event.tags = vec![vec!["d".to_owned(), "".to_owned()]]; ++ assert_eq!(event.distinct_param(), Some("".to_string())); ++ } ++ ++ #[test] ++ fn param_replaceable_value_case_2() { ++ // NIP case #2: "tags":[]: implicit d tag with empty value ++ let mut event = Event::simple_event(); ++ event.kind = 30000; ++ assert_eq!(event.distinct_param(), Some("".to_string())); ++ } ++ ++ #[test] ++ fn param_replaceable_value_case_3() { ++ // NIP case #3: "tags":[["d"]]: implicit empty value "" ++ let mut event = Event::simple_event(); ++ event.kind = 30000; ++ event.tags = vec![vec!["d".to_owned()]]; ++ assert_eq!(event.distinct_param(), Some("".to_string())); ++ } ++ ++ #[test] ++ fn param_replaceable_value_case_4() { ++ // NIP case #4: "tags":[["d",""],["d","not empty"]]: only first d tag is considered ++ let mut event = Event::simple_event(); ++ event.kind = 30000; ++ event.tags = vec![ ++ vec!["d".to_owned(), "".to_string()], ++ vec!["d".to_owned(), "not empty".to_string()], ++ ]; ++ assert_eq!(event.distinct_param(), Some("".to_string())); ++ } ++ ++ #[test] ++ fn param_replaceable_value_case_4b() { ++ // Variation of #4 with ++ // NIP case #4: "tags":[["d","not empty"],["d",""]]: only first d tag is considered ++ let mut event = Event::simple_event(); ++ event.kind = 30000; ++ event.tags = vec![ ++ vec!["d".to_owned(), "not empty".to_string()], ++ vec!["d".to_owned(), "".to_string()], ++ ]; ++ assert_eq!(event.distinct_param(), Some("not empty".to_string())); ++ } ++ ++ #[test] ++ fn param_replaceable_value_case_5() { ++ // NIP case #5: "tags":[["d"],["d","some value"]]: only first d tag is considered ++ let mut event = Event::simple_event(); ++ event.kind = 30000; ++ event.tags = vec![ ++ vec!["d".to_owned()], ++ vec!["d".to_owned(), "second value".to_string()], ++ vec!["d".to_owned(), "third value".to_string()], ++ ]; ++ assert_eq!(event.distinct_param(), Some("".to_string())); ++ } ++ ++ #[test] ++ fn param_replaceable_value_case_6() { ++ // NIP case #6: "tags":[["e"]]: same as no tags ++ let mut event = Event::simple_event(); ++ event.kind = 30000; ++ event.tags = vec![vec!["e".to_owned()]]; ++ assert_eq!(event.distinct_param(), Some("".to_string())); ++ } ++ ++ #[test] ++ fn expiring_event_none() { ++ // regular events do not expire ++ let mut event = Event::simple_event(); ++ event.kind = 7; ++ event.tags = vec![vec!["test".to_string(), "foo".to_string()]]; ++ assert_eq!(event.expiration(), None); ++ } ++ ++ #[test] ++ fn expiring_event_empty() { ++ // regular events do not expire ++ let mut event = Event::simple_event(); ++ event.kind = 7; ++ event.tags = vec![vec!["expiration".to_string()]]; ++ assert_eq!(event.expiration(), None); ++ } ++ ++ #[test] ++ fn expiring_event_future() { ++ // a normal expiring event ++ let exp: u64 = 1676264138; ++ let mut event = Event::simple_event(); ++ event.kind = 1; ++ event.tags = vec![vec!["expiration".to_string(), exp.to_string()]]; ++ assert_eq!(event.expiration(), Some(exp)); ++ } ++ ++ #[test] ++ fn expiring_event_negative() { ++ // expiration set to a negative value (invalid) ++ let exp: i64 = -90; ++ let mut event = Event::simple_event(); ++ event.kind = 1; ++ event.tags = vec![vec!["expiration".to_string(), exp.to_string()]]; ++ assert_eq!(event.expiration(), None); ++ } ++ ++ #[test] ++ fn expiring_event_zero() { ++ // a normal expiring event set to zero ++ let exp: i64 = 0; ++ let mut event = Event::simple_event(); ++ event.kind = 1; ++ event.tags = vec![vec!["expiration".to_string(), exp.to_string()]]; ++ assert_eq!(event.expiration(), Some(0)); ++ } ++ ++ #[test] ++ fn expiring_event_fraction() { ++ // expiration is fractional (invalid) ++ let exp: f64 = 23.334; ++ let mut event = Event::simple_event(); ++ event.kind = 1; ++ event.tags = vec![vec!["expiration".to_string(), exp.to_string()]]; ++ assert_eq!(event.expiration(), None); ++ } ++ ++ #[test] ++ fn expiring_event_multiple() { ++ // multiple values, we just take the first ++ let mut event = Event::simple_event(); ++ event.kind = 1; ++ event.tags = vec![ ++ vec!["expiration".to_string(), (10).to_string()], ++ vec!["expiration".to_string(), (20).to_string()], ++ ]; ++ assert_eq!(event.expiration(), Some(10)); ++ } ++} +diff --git a/src/hexrange.rs b/src/hexrange.rs +new file mode 100644 +index 0000000..a9e3e83 +--- /dev/null ++++ b/src/hexrange.rs +@@ -0,0 +1,159 @@ ++//! Utilities for searching hexadecimal ++use crate::utils::is_hex; ++use hex; ++ ++/// Types of hexadecimal queries. ++#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone)] ++pub enum HexSearch { ++ // when no range is needed, exact 32-byte ++ Exact(Vec), ++ // lower (inclusive) and upper range (exclusive) ++ Range(Vec, Vec), ++ // lower bound only, upper bound is MAX inclusive ++ LowerOnly(Vec), ++} ++ ++/// Check if a string contains only f chars ++fn is_all_fs(s: &str) -> bool { ++ s.chars().all(|x| x == 'f' || x == 'F') ++} ++ ++/// Find the next hex sequence greater than the argument. ++#[must_use] ++pub fn hex_range(s: &str) -> Option { ++ let mut hash_base = s.to_owned(); ++ if !is_hex(&hash_base) || hash_base.len() > 64 { ++ return None; ++ } ++ if hash_base.len() == 64 { ++ return Some(HexSearch::Exact(hex::decode(&hash_base).ok()?)); ++ } ++ // if s is odd, add a zero ++ let mut odd = hash_base.len() % 2 != 0; ++ if odd { ++ // extend the string to make it even ++ hash_base.push('0'); ++ } ++ let base = hex::decode(hash_base).ok()?; ++ // check for all ff's ++ if is_all_fs(s) { ++ // there is no higher bound, we only want to search for blobs greater than this. ++ return Some(HexSearch::LowerOnly(base)); ++ } ++ ++ // return a range ++ let mut upper = base.clone(); ++ let mut byte_len = upper.len(); ++ ++ // for odd strings, we made them longer, but we want to increment the upper char (+16). ++ // we know we can do this without overflowing because we explicitly set the bottom half to 0's. ++ while byte_len > 0 { ++ byte_len -= 1; ++ // check if byte can be incremented, or if we need to carry. ++ let b = upper[byte_len]; ++ if b == u8::MAX { ++ // reset and carry ++ upper[byte_len] = 0; ++ } else if odd { ++ // check if first char in this byte is NOT 'f' ++ if b < 240 { ++ // bump up the first character in this byte ++ upper[byte_len] = b + 16; ++ // increment done, stop iterating through the vec ++ break; ++ } ++ // if it is 'f', reset the byte to 0 and do a carry ++ // reset and carry ++ upper[byte_len] = 0; ++ // done with odd logic, so don't repeat this ++ odd = false; ++ } else { ++ // bump up the first character in this byte ++ upper[byte_len] = b + 1; ++ // increment done, stop iterating ++ break; ++ } ++ } ++ Some(HexSearch::Range(base, upper)) ++} ++ ++#[cfg(test)] ++mod tests { ++ use super::*; ++ use crate::error::Result; ++ ++ #[test] ++ fn hex_range_exact() -> Result<()> { ++ let hex = "abcdef00abcdef00abcdef00abcdef00abcdef00abcdef00abcdef00abcdef00"; ++ let r = hex_range(hex); ++ assert_eq!( ++ r, ++ Some(HexSearch::Exact(hex::decode(hex).expect("invalid hex"))) ++ ); ++ Ok(()) ++ } ++ #[test] ++ fn hex_full_range() -> Result<()> { ++ let hex = "aaaa"; ++ let hex_upper = "aaab"; ++ let r = hex_range(hex); ++ assert_eq!( ++ r, ++ Some(HexSearch::Range( ++ hex::decode(hex).expect("invalid hex"), ++ hex::decode(hex_upper).expect("invalid hex") ++ )) ++ ); ++ Ok(()) ++ } ++ ++ #[test] ++ fn hex_full_range_odd() -> Result<()> { ++ let r = hex_range("abc"); ++ assert_eq!( ++ r, ++ Some(HexSearch::Range( ++ hex::decode("abc0").expect("invalid hex"), ++ hex::decode("abd0").expect("invalid hex") ++ )) ++ ); ++ Ok(()) ++ } ++ ++ #[test] ++ fn hex_full_range_odd_end_f() -> Result<()> { ++ let r = hex_range("abf"); ++ assert_eq!( ++ r, ++ Some(HexSearch::Range( ++ hex::decode("abf0").expect("invalid hex"), ++ hex::decode("ac00").expect("invalid hex") ++ )) ++ ); ++ Ok(()) ++ } ++ ++ #[test] ++ fn hex_no_upper() -> Result<()> { ++ let r = hex_range("ffff"); ++ assert_eq!( ++ r, ++ Some(HexSearch::LowerOnly( ++ hex::decode("ffff").expect("invalid hex") ++ )) ++ ); ++ Ok(()) ++ } ++ ++ #[test] ++ fn hex_no_upper_odd() -> Result<()> { ++ let r = hex_range("fff"); ++ assert_eq!( ++ r, ++ Some(HexSearch::LowerOnly( ++ hex::decode("fff0").expect("invalid hex") ++ )) ++ ); ++ Ok(()) ++ } ++} +diff --git a/src/info.rs b/src/info.rs +new file mode 100644 +index 0000000..2801415 +--- /dev/null ++++ b/src/info.rs +@@ -0,0 +1,132 @@ ++//! Relay metadata using NIP-11 ++/// Relay Info ++use crate::config::Settings; ++use serde::{Deserialize, Serialize}; ++ ++pub const CARGO_PKG_VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION"); ++pub const UNIT: &str = "sats"; ++ ++/// Limitations of the relay as specified in NIP-111 ++/// (This nip isn't finalized so may change) ++#[derive(Debug, Serialize, Deserialize)] ++#[allow(unused)] ++pub struct Limitation { ++ #[serde(skip_serializing_if = "Option::is_none")] ++ payment_required: Option, ++} ++ ++#[derive(Serialize, Deserialize, Debug)] ++#[allow(unused)] ++pub struct Fees { ++ #[serde(skip_serializing_if = "Option::is_none")] ++ admission: Option>, ++ #[serde(skip_serializing_if = "Option::is_none")] ++ publication: Option>, ++} ++ ++#[derive(Serialize, Deserialize, Debug)] ++#[allow(unused)] ++pub struct Fee { ++ amount: u64, ++ unit: String, ++} ++ ++#[derive(Debug, Serialize, Deserialize)] ++#[allow(unused)] ++pub struct RelayInfo { ++ #[serde(skip_serializing_if = "Option::is_none")] ++ pub id: Option, ++ #[serde(skip_serializing_if = "Option::is_none")] ++ pub name: Option, ++ #[serde(skip_serializing_if = "Option::is_none")] ++ pub description: Option, ++ #[serde(skip_serializing_if = "Option::is_none")] ++ pub pubkey: Option, ++ #[serde(skip_serializing_if = "Option::is_none")] ++ pub contact: Option, ++ #[serde(skip_serializing_if = "Option::is_none")] ++ pub icon: Option, ++ #[serde(skip_serializing_if = "Option::is_none")] ++ pub supported_nips: Option>, ++ #[serde(skip_serializing_if = "Option::is_none")] ++ pub software: Option, ++ #[serde(skip_serializing_if = "Option::is_none")] ++ pub version: Option, ++ #[serde(skip_serializing_if = "Option::is_none")] ++ pub limitation: Option, ++ #[serde(skip_serializing_if = "Option::is_none")] ++ pub payment_url: Option, ++ #[serde(skip_serializing_if = "Option::is_none")] ++ pub fees: Option, ++} ++ ++/// Convert an Info configuration into public Relay Info ++impl From for RelayInfo { ++ fn from(c: Settings) -> Self { ++ let mut supported_nips = vec![1, 2, 9, 11, 12, 15, 16, 20, 22, 33, 40]; ++ ++ if c.authorization.nip42_auth { ++ supported_nips.push(42); ++ supported_nips.sort(); ++ } ++ ++ let i = c.info; ++ let p = c.pay_to_relay; ++ ++ let limitations = Limitation { ++ payment_required: Some(p.enabled), ++ }; ++ ++ let (payment_url, fees) = if p.enabled { ++ let admission_fee = if p.admission_cost > 0 { ++ Some(vec![Fee { ++ amount: p.admission_cost, ++ unit: UNIT.to_string(), ++ }]) ++ } else { ++ None ++ }; ++ ++ let post_fee = if p.cost_per_event > 0 { ++ Some(vec![Fee { ++ amount: p.cost_per_event, ++ unit: UNIT.to_string(), ++ }]) ++ } else { ++ None ++ }; ++ ++ let fees = Fees { ++ admission: admission_fee, ++ publication: post_fee, ++ }; ++ ++ let payment_url = if p.enabled && i.relay_url.is_some() { ++ Some(format!( ++ "{}join", ++ i.relay_url.clone().unwrap().replace("ws", "http") ++ )) ++ } else { ++ None ++ }; ++ (payment_url, Some(fees)) ++ } else { ++ (None, None) ++ }; ++ ++ RelayInfo { ++ id: i.relay_url, ++ name: i.name, ++ description: i.description, ++ pubkey: i.pubkey, ++ contact: i.contact, ++ supported_nips: Some(supported_nips), ++ software: Some("https://git.sr.ht/~gheartsfield/nostr-rs-relay".to_owned()), ++ version: CARGO_PKG_VERSION.map(std::borrow::ToOwned::to_owned), ++ limitation: Some(limitations), ++ payment_url, ++ fees, ++ icon: i.relay_icon, ++ } ++ } ++} +diff --git a/src/lib.rs b/src/lib.rs +new file mode 100644 +index 0000000..a6f6268 +--- /dev/null ++++ b/src/lib.rs +@@ -0,0 +1,19 @@ ++pub mod cli; ++pub mod close; ++pub mod config; ++pub mod conn; ++pub mod db; ++pub mod delegation; ++pub mod error; ++pub mod event; ++pub mod hexrange; ++pub mod info; ++pub mod nauthz; ++pub mod nip05; ++pub mod notice; ++pub mod repo; ++pub mod subscription; ++pub mod utils; ++// Public API for creating relays programmatically ++pub mod payment; ++pub mod server; +diff --git a/src/main.rs b/src/main.rs +new file mode 100644 +index 0000000..313793e +--- /dev/null ++++ b/src/main.rs +@@ -0,0 +1,147 @@ ++//! Server process ++use clap::Parser; ++use console_subscriber::ConsoleLayer; ++use gnostr_relay::cli::CLIArgs; ++use gnostr_relay::config; ++use gnostr_relay::server::start_server; ++use std::fs; ++use std::path::Path; ++use std::process; ++use std::sync::mpsc as syncmpsc; ++use std::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender}; ++use std::thread; ++#[cfg(not(target_env = "msvc"))] ++use tikv_jemallocator::Jemalloc; ++use tracing::info; ++use tracing_appender::non_blocking::WorkerGuard; ++use tracing_subscriber::EnvFilter; ++ ++use dirs::home_dir; ++ ++#[cfg(not(target_env = "msvc"))] ++#[global_allocator] ++static GLOBAL: Jemalloc = Jemalloc; ++ ++/// Start running a Nostr relay server. ++fn main() { ++ ++ let home = home_dir(); ++ let gnostr_home: String = home.expect("REASON").display().to_string(); ++ //println!("{:?}", gnostr_home); ++ let _ = fs::create_dir_all(gnostr_home + "/.gnostr/relay"); ++ ++ let args = CLIArgs::parse(); ++ ++ // get config file name from args ++ let config_file_arg = args.config; ++ ++ // Ensure the config file is readable if it was explicitly set ++ if let Some(config_path) = config_file_arg.as_ref() { ++ let path = Path::new(&config_path); ++ if !path.exists() { ++ eprintln!("Config file not found: {}", &config_path); ++ process::exit(1); ++ } ++ if !path.is_file() { ++ eprintln!("Invalid config file path: {}", &config_path); ++ process::exit(1); ++ } ++ if let Err(err) = fs::metadata(path) { ++ eprintln!("Error while accessing file metadata: {}", err); ++ process::exit(1); ++ } ++ if let Err(err) = fs::File::open(path) { ++ eprintln!("Config file is not readable: {}", err); ++ process::exit(1); ++ } ++ } ++ ++ let mut _log_guard: Option = None; ++ ++ // configure settings from the config file (defaults to config.toml) ++ // replace default settings with those read from the config file ++ let mut settings = config::Settings::new(&config_file_arg).unwrap_or_else(|e| { ++ eprintln!("Error reading config file ({:?})", e); ++ process::exit(1); ++ }); ++ ++ let config_port_arg = args.port; ++ if let Some(config_port) = config_port_arg { ++ settings.network.port = config_port; ++ } ++ #[cfg(debug_assertions)] ++ println!("config_port_arg={:?}", config_port_arg); ++ ++ ++ // setup tracing ++ if settings.diagnostics.tracing { ++ // enable tracing with tokio-console ++ ConsoleLayer::builder().with_default_env().init(); ++ } else { ++ // standard logging ++ if let Some(path) = &settings.logging.folder_path { ++ // write logs to a folder ++ let prefix = match &settings.logging.file_prefix { ++ Some(p) => p.as_str(), ++ None => "relay", ++ }; ++ let file_appender = tracing_appender::rolling::daily(path, prefix); ++ let (non_blocking, guard) = tracing_appender::non_blocking(file_appender); ++ let filter = EnvFilter::from_default_env(); ++ // assign to a variable that is not dropped till the program ends ++ _log_guard = Some(guard); ++ ++ tracing_subscriber::fmt() ++ .with_env_filter(filter) ++ .with_writer(non_blocking) ++ .try_init() ++ .unwrap(); ++ } else { ++ // write to stdout ++ tracing_subscriber::fmt::try_init().unwrap(); ++ } ++ } ++ info!("Starting up from main"); ++ ++ let home = home_dir(); ++ let gnostr_home: String = home.expect("REASON").display().to_string(); ++ #[cfg(debug_assertions)] ++ println!("{:?}", gnostr_home); ++ ++ let _ = fs::create_dir_all(gnostr_home + "/.gnostr/relay"); ++ ++ // get database directory from args ++ let db_dir_arg = args.db; ++ ++ // update with database location from args, if provided ++ if let Some(db_dir) = db_dir_arg { ++ settings.database.data_directory = db_dir; ++ } ++ else { ++ let home = home_dir(); ++ let gnostr_home: String = home.expect("REASON").display().to_string(); ++ ++ #[cfg(debug_assertions)] ++ println!("gnostr_home={:?}", gnostr_home); ++ ++ let _db_home = fs::create_dir_all(gnostr_home.clone() + "/.gnostr/relay"); ++ ++ #[cfg(debug_assertions)] ++ println!("_db_home={:?}", _db_home); ++ ++ settings.database.data_directory = gnostr_home.clone() + "/.gnostr/relay"; ++ ++ } ++ // we should have a 'control plane' channel to monitor and bump ++ // the server. this will let us do stuff like clear the database, ++ // shutdown, etc.; for now all this does is initiate shutdown if ++ // `()` is sent. This will change in the future, this is just a ++ // stopgap to shutdown the relay when it is used as a library. ++ let (_, ctrl_rx): (MpscSender<()>, MpscReceiver<()>) = syncmpsc::channel(); ++ // run this in a new thread ++ let handle = thread::spawn(move || { ++ let _svr = start_server(&settings, ctrl_rx); ++ }); ++ // block on nostr thread to finish. ++ handle.join().unwrap(); ++} +diff --git a/src/nauthz.rs b/src/nauthz.rs +new file mode 100644 +index 0000000..2413a2b +--- /dev/null ++++ b/src/nauthz.rs +@@ -0,0 +1,111 @@ ++use crate::error::{Error, Result}; ++use crate::{event::Event, nip05::Nip05Name}; ++use nauthz_grpc::authorization_client::AuthorizationClient; ++use nauthz_grpc::event::TagEntry; ++use nauthz_grpc::{Decision, Event as GrpcEvent, EventReply, EventRequest}; ++use tracing::{info, warn}; ++ ++pub mod nauthz_grpc { ++ tonic::include_proto!("nauthz"); ++} ++ ++// A decision for the DB to act upon ++pub trait AuthzDecision: Send + Sync { ++ fn permitted(&self) -> bool; ++ fn message(&self) -> Option; ++} ++ ++impl AuthzDecision for EventReply { ++ fn permitted(&self) -> bool { ++ self.decision == Decision::Permit as i32 ++ } ++ fn message(&self) -> Option { ++ self.message.clone() ++ } ++} ++ ++// A connection to an event admission GRPC server ++pub struct EventAuthzService { ++ server_addr: String, ++ conn: Option>, ++} ++ ++// conversion of Nip05Names into GRPC type ++impl std::convert::From for nauthz_grpc::event_request::Nip05Name { ++ fn from(value: Nip05Name) -> Self { ++ nauthz_grpc::event_request::Nip05Name { ++ local: value.local.clone(), ++ domain: value.domain, ++ } ++ } ++} ++ ++// conversion of event tags into gprc struct ++fn tags_to_protobuf(tags: &[Vec]) -> Vec { ++ tags.iter() ++ .map(|x| TagEntry { values: x.clone() }) ++ .collect() ++} ++ ++impl EventAuthzService { ++ pub async fn connect(server_addr: &str) -> EventAuthzService { ++ let mut eas = EventAuthzService { ++ server_addr: server_addr.to_string(), ++ conn: None, ++ }; ++ eas.ready_connection().await; ++ eas ++ } ++ ++ pub async fn ready_connection(&mut self) { ++ if self.conn.is_none() { ++ let client = AuthorizationClient::connect(self.server_addr.to_string()).await; ++ if let Err(ref msg) = client { ++ warn!("could not connect to nostr authz GRPC server: {:?}", msg); ++ } else { ++ info!("connected to nostr authorization GRPC server"); ++ } ++ self.conn = client.ok(); ++ } ++ } ++ ++ pub async fn admit_event( ++ &mut self, ++ event: &Event, ++ ip: &str, ++ origin: Option, ++ user_agent: Option, ++ nip05: Option, ++ auth_pubkey: Option>, ++ ) -> Result> { ++ self.ready_connection().await; ++ let id_blob = hex::decode(&event.id)?; ++ let pubkey_blob = hex::decode(&event.pubkey)?; ++ let sig_blob = hex::decode(&event.sig)?; ++ if let Some(ref mut c) = self.conn { ++ let gevent = GrpcEvent { ++ id: id_blob, ++ pubkey: pubkey_blob, ++ sig: sig_blob, ++ created_at: event.created_at, ++ kind: event.kind, ++ content: event.content.clone(), ++ tags: tags_to_protobuf(&event.tags), ++ }; ++ let svr_res = c ++ .event_admit(EventRequest { ++ event: Some(gevent), ++ ip_addr: Some(ip.to_string()), ++ origin, ++ user_agent, ++ auth_pubkey, ++ nip05: nip05.map(nauthz_grpc::event_request::Nip05Name::from), ++ }) ++ .await?; ++ let reply = svr_res.into_inner(); ++ Ok(Box::new(reply)) ++ } else { ++ Err(Error::AuthzError) ++ } ++ } ++} +diff --git a/src/nip05.rs b/src/nip05.rs +new file mode 100644 +index 0000000..7fa60da +--- /dev/null ++++ b/src/nip05.rs +@@ -0,0 +1,612 @@ ++//! User verification using NIP-05 names ++//! ++//! NIP-05 defines a mechanism for authors to associate an internet ++//! address with their public key, in metadata events. This module ++//! consumes a stream of metadata events, and keeps a database table ++//! updated with the current NIP-05 verification status. ++use crate::config::VerifiedUsers; ++use crate::error::{Error, Result}; ++use crate::event::Event; ++use crate::repo::NostrRepo; ++use hyper::body::HttpBody; ++use hyper::client::connect::HttpConnector; ++use hyper::Client; ++use hyper_rustls::HttpsConnector; ++use std::sync::Arc; ++use std::time::Duration; ++use std::time::Instant; ++use std::time::SystemTime; ++use tokio::time::Interval; ++use tracing::{debug, info, warn}; ++ ++/// NIP-05 verifier state ++pub struct Verifier { ++ /// Repository for saving/retrieving events and records ++ repo: Arc, ++ /// Metadata events for us to inspect ++ metadata_rx: tokio::sync::broadcast::Receiver, ++ /// Newly validated events get written and then broadcast on this channel to subscribers ++ event_tx: tokio::sync::broadcast::Sender, ++ /// Settings ++ settings: crate::config::Settings, ++ /// HTTP client ++ client: hyper::Client, hyper::Body>, ++ /// After all accounts are updated, wait this long before checking again. ++ wait_after_finish: Duration, ++ /// Minimum amount of time between HTTP queries ++ http_wait_duration: Duration, ++ /// Interval for updating verification records ++ reverify_interval: Interval, ++} ++ ++/// A NIP-05 identifier is a local part and domain. ++#[derive(PartialEq, Eq, Debug, Clone)] ++pub struct Nip05Name { ++ pub local: String, ++ pub domain: String, ++} ++ ++impl Nip05Name { ++ /// Does this name represent the entire domain? ++ #[must_use] ++ pub fn is_domain_only(&self) -> bool { ++ self.local == "_" ++ } ++ ++ /// Determine the URL to query for verification ++ fn to_url(&self) -> Option { ++ format!( ++ "https://{}/.well-known/nostr.json?name={}", ++ self.domain, self.local ++ ) ++ .parse::() ++ .ok() ++ } ++} ++ ++// Parsing Nip05Names from strings ++impl std::convert::TryFrom<&str> for Nip05Name { ++ type Error = Error; ++ fn try_from(inet: &str) -> Result { ++ // break full name at the @ boundary. ++ let components: Vec<&str> = inet.split('@').collect(); ++ if components.len() == 2 { ++ // check if local name is valid ++ let local = components[0]; ++ let domain = components[1]; ++ if local ++ .chars() ++ .all(|x| x.is_alphanumeric() || x == '_' || x == '-' || x == '.') ++ { ++ if domain ++ .chars() ++ .all(|x| x.is_alphanumeric() || x == '-' || x == '.') ++ { ++ Ok(Nip05Name { ++ local: local.to_owned(), ++ domain: domain.to_owned(), ++ }) ++ } else { ++ Err(Error::CustomError( ++ "invalid character in domain part".to_owned(), ++ )) ++ } ++ } else { ++ Err(Error::CustomError( ++ "invalid character in local part".to_owned(), ++ )) ++ } ++ } else { ++ Err(Error::CustomError("too many/few components".to_owned())) ++ } ++ } ++} ++ ++impl std::fmt::Display for Nip05Name { ++ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { ++ write!(f, "{}@{}", self.local, self.domain) ++ } ++} ++ ++/// Check if the specified username and address are present and match in this response body ++fn body_contains_user(username: &str, address: &str, bytes: &hyper::body::Bytes) -> Result { ++ // convert the body into json ++ let body: serde_json::Value = serde_json::from_slice(bytes)?; ++ // ensure we have a names object. ++ let names_map = body ++ .as_object() ++ .and_then(|x| x.get("names")) ++ .and_then(serde_json::Value::as_object) ++ .ok_or_else(|| Error::CustomError("not a map".to_owned()))?; ++ // get the pubkey for the requested user ++ let check_name = names_map.get(username).and_then(serde_json::Value::as_str); ++ // ensure the address is a match ++ Ok(check_name.map_or(false, |x| x == address)) ++} ++ ++impl Verifier { ++ pub fn new( ++ repo: Arc, ++ metadata_rx: tokio::sync::broadcast::Receiver, ++ event_tx: tokio::sync::broadcast::Sender, ++ settings: crate::config::Settings, ++ ) -> Result { ++ info!("creating NIP-05 verifier"); ++ // setup hyper client ++ let https = hyper_rustls::HttpsConnectorBuilder::new() ++ .with_native_roots() ++ .https_or_http() ++ .enable_http1() ++ .build(); ++ ++ let client = Client::builder().build::<_, hyper::Body>(https); ++ ++ // After all accounts have been re-verified, don't check again ++ // for this long. ++ let wait_after_finish = Duration::from_secs(60 * 10); ++ // when we have an active queue of accounts to validate, we ++ // will wait this duration between HTTP requests. ++ let http_wait_duration = Duration::from_secs(1); ++ // setup initial interval for re-verification. If we find ++ // there is no work to be done, it will be reset to a longer ++ // duration. ++ let reverify_interval = tokio::time::interval(http_wait_duration); ++ Ok(Verifier { ++ repo, ++ metadata_rx, ++ event_tx, ++ settings, ++ client, ++ wait_after_finish, ++ http_wait_duration, ++ reverify_interval, ++ }) ++ } ++ ++ /// Perform web verification against a NIP-05 name and address. ++ pub async fn get_web_verification( ++ &mut self, ++ nip: &Nip05Name, ++ pubkey: &str, ++ ) -> UserWebVerificationStatus { ++ self.get_web_verification_res(nip, pubkey) ++ .await ++ .unwrap_or(UserWebVerificationStatus::Unknown) ++ } ++ ++ /// Perform web verification against an `Event` (must be metadata). ++ pub async fn get_web_verification_from_event( ++ &mut self, ++ e: &Event, ++ ) -> UserWebVerificationStatus { ++ let nip_parse = e.get_nip05_addr(); ++ if let Some(nip) = nip_parse { ++ self.get_web_verification_res(&nip, &e.pubkey) ++ .await ++ .unwrap_or(UserWebVerificationStatus::Unknown) ++ } else { ++ UserWebVerificationStatus::Unknown ++ } ++ } ++ ++ /// Perform web verification, with a `Result` return. ++ async fn get_web_verification_res( ++ &mut self, ++ nip: &Nip05Name, ++ pubkey: &str, ++ ) -> Result { ++ // determine if this domain should be checked ++ if !is_domain_allowed( ++ &nip.domain, ++ &self.settings.verified_users.domain_whitelist, ++ &self.settings.verified_users.domain_blacklist, ++ ) { ++ return Ok(UserWebVerificationStatus::DomainNotAllowed); ++ } ++ let url = nip ++ .to_url() ++ .ok_or_else(|| Error::CustomError("invalid NIP-05 URL".to_owned()))?; ++ let req = hyper::Request::builder() ++ .method(hyper::Method::GET) ++ .uri(url) ++ .header("Accept", "application/json") ++ .header( ++ "User-Agent", ++ format!( ++ "nostr-rs-relay/{} NIP-05 Verifier", ++ crate::info::CARGO_PKG_VERSION.unwrap() ++ ), ++ ) ++ .body(hyper::Body::empty()) ++ .expect("request builder"); ++ ++ let response_fut = self.client.request(req); ++ ++ if let Ok(response_res) = tokio::time::timeout(Duration::from_secs(5), response_fut).await { ++ // limit size of verification document to 1MB. ++ const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024 * 1024; ++ let response = response_res?; ++ // determine content length from response ++ let response_content_length = match response.body().size_hint().upper() { ++ Some(v) => v, ++ None => MAX_ALLOWED_RESPONSE_SIZE + 1, // reject missing content length ++ }; ++ // TODO: test how hyper handles the client providing an inaccurate content-length. ++ if response_content_length <= MAX_ALLOWED_RESPONSE_SIZE { ++ let (parts, body) = response.into_parts(); ++ // TODO: consider redirects ++ if parts.status == http::StatusCode::OK { ++ // parse body, determine if the username / key / address is present ++ let body_bytes = hyper::body::to_bytes(body).await?; ++ let body_matches = body_contains_user(&nip.local, pubkey, &body_bytes)?; ++ if body_matches { ++ return Ok(UserWebVerificationStatus::Verified); ++ } ++ // successful response, parsed as a nip-05 ++ // document, but this name/pubkey was not ++ // present. ++ return Ok(UserWebVerificationStatus::Unverified); ++ } ++ } else { ++ info!( ++ "content length missing or exceeded limits for account: {:?}", ++ nip.to_string() ++ ); ++ } ++ } else { ++ info!("timeout verifying account {:?}", nip); ++ return Ok(UserWebVerificationStatus::Unknown); ++ } ++ Ok(UserWebVerificationStatus::Unknown) ++ } ++ ++ /// Perform NIP-05 verifier tasks. ++ pub async fn run(&mut self) { ++ // use this to schedule periodic re-validation tasks ++ // run a loop, restarting on failure ++ loop { ++ let res = self.run_internal().await; ++ match res { ++ Err(Error::ChannelClosed) => { ++ // channel was closed, we are shutting down ++ return; ++ } ++ Err(e) => { ++ info!("error in verifier: {:?}", e); ++ } ++ _ => {} ++ } ++ } ++ } ++ ++ /// Internal select loop for performing verification ++ async fn run_internal(&mut self) -> Result<()> { ++ tokio::select! { ++ m = self.metadata_rx.recv() => { ++ match m { ++ Ok(e) => { ++ if let Some(naddr) = e.get_nip05_addr() { ++ info!("got metadata event for ({:?},{:?})", naddr.to_string() ,e.get_author_prefix()); ++ // Process a new author, checking if they are verified: ++ let check_verified = self.repo.get_latest_user_verification(&e.pubkey).await; ++ // ensure the event we got is more recent than the one we have, otherwise we can ignore it. ++ if let Ok(last_check) = check_verified { ++ if e.created_at <= last_check.event_created { ++ // this metadata is from the same author as an existing verification. ++ // it is older than what we have, so we can ignore it. ++ debug!("received older metadata event for author {:?}", e.get_author_prefix()); ++ return Ok(()); ++ } ++ } ++ // old, or no existing record for this user. In either case, we just create a new one. ++ let start = Instant::now(); ++ let v = self.get_web_verification_from_event(&e).await; ++ info!( ++ "checked name {:?}, result: {:?}, in: {:?}", ++ naddr.to_string(), ++ v, ++ start.elapsed() ++ ); ++ // sleep to limit how frequently we make HTTP requests for new metadata events. This should limit us to 4 req/sec. ++ tokio::time::sleep(Duration::from_millis(250)).await; ++ // if this user was verified, we need to write the ++ // record, persist the event, and broadcast. ++ if let UserWebVerificationStatus::Verified = v { ++ self.create_new_verified_user(&naddr.to_string(), &e).await?; ++ } ++ } ++ }, ++ Err(tokio::sync::broadcast::error::RecvError::Lagged(c)) => { ++ warn!("incoming metadata events overwhelmed buffer, {} events dropped",c); ++ } ++ Err(tokio::sync::broadcast::error::RecvError::Closed) => { ++ info!("metadata broadcast channel closed"); ++ return Err(Error::ChannelClosed); ++ } ++ } ++ }, ++ _ = self.reverify_interval.tick() => { ++ // check and see if there is an old account that needs ++ // to be reverified ++ self.do_reverify().await?; ++ }, ++ } ++ Ok(()) ++ } ++ ++ /// Reverify the oldest user verification record. ++ async fn do_reverify(&mut self) -> Result<()> { ++ let reverify_setting = self ++ .settings ++ .verified_users ++ .verify_update_frequency_duration; ++ let max_failures = self.settings.verified_users.max_consecutive_failures; ++ // get from settings, but default to 6hrs between re-checking an account ++ let reverify_dur = reverify_setting.unwrap_or_else(|| Duration::from_secs(60 * 60 * 6)); ++ // find all verification records that have success or failure OLDER than the reverify_dur. ++ let now = SystemTime::now(); ++ let earliest = now - reverify_dur; ++ let earliest_epoch = earliest ++ .duration_since(SystemTime::UNIX_EPOCH) ++ .map(|x| x.as_secs()) ++ .unwrap_or(0); ++ let vr = self.repo.get_oldest_user_verification(earliest_epoch).await; ++ match vr { ++ Ok(ref v) => { ++ let new_status = self.get_web_verification(&v.name, &v.address).await; ++ match new_status { ++ UserWebVerificationStatus::Verified => { ++ // freshly verified account, update the ++ // timestamp. ++ self.repo.update_verification_timestamp(v.rowid).await?; ++ info!("verification updated for {}", v.to_string()); ++ } ++ UserWebVerificationStatus::DomainNotAllowed ++ | UserWebVerificationStatus::Unknown => { ++ // server may be offline, or temporarily ++ // blocked by the config file. Note the ++ // failure so we can process something ++ // else. ++ ++ // have we had enough failures to give up? ++ if v.failure_count >= max_failures as u64 { ++ info!( ++ "giving up on verifying {:?} after {} failures", ++ v.name, v.failure_count ++ ); ++ self.repo.delete_verification(v.rowid).await?; ++ } else { ++ // record normal failure, incrementing failure count ++ info!("verification failed for {}", v.to_string()); ++ self.repo.fail_verification(v.rowid).await?; ++ } ++ } ++ UserWebVerificationStatus::Unverified => { ++ // domain has removed the verification, drop ++ // the record on our side. ++ info!("verification rescinded for {}", v.to_string()); ++ self.repo.delete_verification(v.rowid).await?; ++ } ++ } ++ } ++ Err( ++ Error::SqlError(rusqlite::Error::QueryReturnedNoRows) ++ | Error::SqlxError(sqlx::Error::RowNotFound), ++ ) => { ++ // No users need verification. Reset the interval to ++ // the next verification attempt. ++ let start = tokio::time::Instant::now() + self.wait_after_finish; ++ self.reverify_interval = tokio::time::interval_at(start, self.http_wait_duration); ++ } ++ Err(ref e) => { ++ warn!( ++ "Error when checking for NIP-05 verification records: {:?}", ++ e ++ ); ++ } ++ } ++ Ok(()) ++ } ++ ++ /// Persist an event, create a verification record, and broadcast. ++ // TODO: have more event-writing logic handled in the db module. ++ // Right now, these events avoid the rate limit. That is ++ // acceptable since as soon as the user is registered, this path ++ // is no longer used. ++ // TODO: refactor these into spawn_blocking ++ // calls to get them off the async executors. ++ async fn create_new_verified_user(&mut self, name: &str, event: &Event) -> Result<()> { ++ let start = Instant::now(); ++ // we should only do this if we are enabled. if we are ++ // disabled/passive, the event has already been persisted. ++ let should_write_event = self.settings.verified_users.is_enabled(); ++ if should_write_event { ++ match self.repo.write_event(event).await { ++ Ok(updated) => { ++ if updated != 0 { ++ info!( ++ "persisted event (new verified pubkey): {:?} in {:?}", ++ event.get_event_id_prefix(), ++ start.elapsed() ++ ); ++ self.event_tx.send(event.clone()).ok(); ++ } ++ } ++ Err(err) => { ++ warn!("event insert failed: {:?}", err); ++ if let Error::SqlError(r) = err { ++ warn!("because: : {:?}", r); ++ } ++ } ++ } ++ } ++ // write the verification record ++ self.repo ++ .create_verification_record(&event.id, name) ++ .await?; ++ Ok(()) ++ } ++} ++ ++/// Result of checking user's verification status against DNS/HTTP. ++#[derive(PartialEq, Eq, Debug, Clone)] ++pub enum UserWebVerificationStatus { ++ Verified, // user is verified, as of now. ++ DomainNotAllowed, // domain blacklist or whitelist denied us from attempting a verification ++ Unknown, // user's status could not be determined (timeout, server error) ++ Unverified, // user's status is not verified (successful check, name / addr do not match) ++} ++ ++/// A NIP-05 verification record. ++#[derive(PartialEq, Eq, Debug, Clone)] ++// Basic information for a verification event. Gives us all we need to assert a NIP-05 address is good. ++pub struct VerificationRecord { ++ pub rowid: u64, // database row for this verification event ++ pub name: Nip05Name, // address being verified ++ pub address: String, // pubkey ++ pub event: String, // event ID hash providing the verification ++ pub event_created: u64, // when the metadata event was published ++ pub last_success: Option, // the most recent time a verification was provided. None if verification under this name has never succeeded. ++ pub last_failure: Option, // the most recent time verification was attempted, but could not be completed. ++ pub failure_count: u64, // how many consecutive failures have been observed. ++} ++ ++/// Check with settings to determine if a given domain is allowed to ++/// publish. ++#[must_use] ++pub fn is_domain_allowed( ++ domain: &str, ++ whitelist: &Option>, ++ blacklist: &Option>, ++) -> bool { ++ // if there is a whitelist, domain must be present in it. ++ if let Some(wl) = whitelist { ++ // workaround for Vec contains not accepting &str ++ return wl.iter().any(|x| x == domain); ++ } ++ // otherwise, check that user is not in the blacklist ++ if let Some(bl) = blacklist { ++ return !bl.iter().any(|x| x == domain); ++ } ++ true ++} ++ ++impl VerificationRecord { ++ /// Check if the record is recent enough to be considered valid, ++ /// and the domain is allowed. ++ #[must_use] ++ pub fn is_valid(&self, verified_users_settings: &VerifiedUsers) -> bool { ++ //let settings = SETTINGS.read().unwrap(); ++ // how long a verification record is good for ++ let nip05_expiration = &verified_users_settings.verify_expiration_duration; ++ if let Some(e) = nip05_expiration { ++ if !self.is_current(e) { ++ return false; ++ } ++ } ++ // check domains ++ is_domain_allowed( ++ &self.name.domain, ++ &verified_users_settings.domain_whitelist, ++ &verified_users_settings.domain_blacklist, ++ ) ++ } ++ ++ /// Check if this record has been validated since the given ++ /// duration. ++ fn is_current(&self, d: &Duration) -> bool { ++ match self.last_success { ++ Some(s) => { ++ // current time - duration ++ let now = SystemTime::now(); ++ let cutoff = now - *d; ++ let cutoff_epoch = cutoff ++ .duration_since(SystemTime::UNIX_EPOCH) ++ .map(|x| x.as_secs()) ++ .unwrap_or(0); ++ s > cutoff_epoch ++ } ++ None => false, ++ } ++ } ++} ++ ++impl std::fmt::Display for VerificationRecord { ++ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { ++ write!( ++ f, ++ "({:?},{:?})", ++ self.name.to_string(), ++ self.address.chars().take(8).collect::() ++ ) ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use super::*; ++ ++ #[test] ++ fn local_from_inet() { ++ let addr = "bob@example.com"; ++ let parsed = Nip05Name::try_from(addr); ++ assert!(parsed.is_ok()); ++ let v = parsed.unwrap(); ++ assert_eq!(v.local, "bob"); ++ assert_eq!(v.domain, "example.com"); ++ } ++ ++ #[test] ++ fn not_enough_sep() { ++ let addr = "bob_example.com"; ++ let parsed = Nip05Name::try_from(addr); ++ assert!(parsed.is_err()); ++ } ++ ++ #[test] ++ fn too_many_sep() { ++ let addr = "foo@bob@example.com"; ++ let parsed = Nip05Name::try_from(addr); ++ assert!(parsed.is_err()); ++ } ++ ++ #[test] ++ fn invalid_local_name() { ++ // non-permitted ascii chars ++ assert!(Nip05Name::try_from("foo!@example.com").is_err()); ++ assert!(Nip05Name::try_from("foo @example.com").is_err()); ++ assert!(Nip05Name::try_from(" foo@example.com").is_err()); ++ assert!(Nip05Name::try_from("f oo@example.com").is_err()); ++ assert!(Nip05Name::try_from("foo<@example.com").is_err()); ++ // unicode dash ++ assert!(Nip05Name::try_from("foo‐bar@example.com").is_err()); ++ // emoji ++ assert!(Nip05Name::try_from("foo😭bar@example.com").is_err()); ++ } ++ #[test] ++ fn invalid_domain_name() { ++ // non-permitted ascii chars ++ assert!(Nip05Name::try_from("foo@examp!e.com").is_err()); ++ assert!(Nip05Name::try_from("foo@ example.com").is_err()); ++ assert!(Nip05Name::try_from("foo@exa mple.com").is_err()); ++ assert!(Nip05Name::try_from("foo@example .com").is_err()); ++ assert!(Nip05Name::try_from("foo@exa bool { ++ match self { ++ Self::Duplicate | Self::Saved => true, ++ Self::Invalid | Self::Blocked | Self::RateLimited | Self::Error | Self::Restricted => false, ++ } ++ } ++ ++ #[must_use] ++ pub fn prefix(&self) -> &'static str { ++ match self { ++ Self::Saved => "saved", ++ Self::Duplicate => "duplicate", ++ Self::Invalid => "invalid", ++ Self::Blocked => "blocked", ++ Self::RateLimited => "rate-limited", ++ Self::Error => "error", ++ Self::Restricted => "restricted", ++ } ++ } ++} ++ ++impl Notice { ++ //pub fn err(err: error::Error, id: String) -> Notice { ++ // Notice::err_msg(format!("{}", err), id) ++ //} ++ ++ #[must_use] ++ pub fn message(msg: String) -> Notice { ++ Notice::Message(msg) ++ } ++ ++ fn prefixed(id: String, msg: &str, status: EventResultStatus) -> Notice { ++ let msg = format!("{}: {}", status.prefix(), msg); ++ Notice::EventResult(EventResult { id, msg, status }) ++ } ++ ++ #[must_use] ++ pub fn invalid(id: String, msg: &str) -> Notice { ++ Notice::prefixed(id, msg, EventResultStatus::Invalid) ++ } ++ ++ #[must_use] ++ pub fn blocked(id: String, msg: &str) -> Notice { ++ Notice::prefixed(id, msg, EventResultStatus::Blocked) ++ } ++ ++ #[must_use] ++ pub fn rate_limited(id: String, msg: &str) -> Notice { ++ Notice::prefixed(id, msg, EventResultStatus::RateLimited) ++ } ++ ++ #[must_use] ++ pub fn duplicate(id: String) -> Notice { ++ Notice::prefixed(id, "", EventResultStatus::Duplicate) ++ } ++ ++ #[must_use] ++ pub fn error(id: String, msg: &str) -> Notice { ++ Notice::prefixed(id, msg, EventResultStatus::Error) ++ } ++ ++ #[must_use] ++ pub fn restricted(id: String, msg: &str) -> Notice { ++ Notice::prefixed(id, msg, EventResultStatus::Restricted) ++ } ++ ++ #[must_use] ++ pub fn saved(id: String) -> Notice { ++ Notice::EventResult(EventResult { ++ id, ++ msg: "".into(), ++ status: EventResultStatus::Saved, ++ }) ++ } ++} +diff --git a/src/payment/lnbits.rs b/src/payment/lnbits.rs +new file mode 100644 +index 0000000..a5c15d7 +--- /dev/null ++++ b/src/payment/lnbits.rs +@@ -0,0 +1,176 @@ ++//! LNBits payment processor ++use http::Uri; ++use hyper::client::connect::HttpConnector; ++use hyper::Client; ++use hyper_rustls::HttpsConnector; ++use nostr::Keys; ++use serde::{Deserialize, Serialize}; ++use serde_json::Value; ++ ++use async_trait::async_trait; ++use rand::Rng; ++ ++use std::str::FromStr; ++use url::Url; ++ ++use crate::{config::Settings, error::Error}; ++ ++use super::{InvoiceInfo, InvoiceStatus, PaymentProcessor}; ++ ++const APIPATH: &str = "/api/v1/payments/"; ++ ++/// Info LNBits expects in create invoice request ++#[derive(Serialize, Deserialize, Debug)] ++pub struct LNBitsCreateInvoice { ++ out: bool, ++ amount: u64, ++ memo: String, ++ webhook: String, ++ unit: String, ++ internal: bool, ++ expiry: u64, ++} ++ ++/// Invoice response for LN bits ++#[derive(Debug, Serialize, Deserialize)] ++pub struct LNBitsCreateInvoiceResponse { ++ payment_hash: String, ++ payment_request: String, ++} ++ ++/// LNBits call back response ++/// Used when an invoice is paid ++/// lnbits to post the status change to relay ++#[derive(Serialize, Deserialize, Debug, Clone)] ++pub struct LNBitsCallback { ++ pub checking_id: String, ++ pub pending: bool, ++ pub amount: u64, ++ pub memo: String, ++ pub time: u64, ++ pub bolt11: String, ++ pub preimage: String, ++ pub payment_hash: String, ++ pub wallet_id: String, ++ pub webhook: String, ++ pub webhook_status: Option, ++} ++ ++/// LN Bits repose for check invoice endpoint ++#[derive(Serialize, Deserialize, Debug, Clone)] ++pub struct LNBitsCheckInvoiceResponse { ++ paid: bool, ++} ++ ++#[derive(Clone)] ++pub struct LNBitsPaymentProcessor { ++ /// HTTP client ++ client: hyper::Client, hyper::Body>, ++ settings: Settings, ++} ++ ++impl LNBitsPaymentProcessor { ++ pub fn new(settings: &Settings) -> Self { ++ // setup hyper client ++ let https = hyper_rustls::HttpsConnectorBuilder::new() ++ .with_native_roots() ++ .https_only() ++ .enable_http1() ++ .build(); ++ let client = Client::builder().build::<_, hyper::Body>(https); ++ ++ Self { ++ client, ++ settings: settings.clone(), ++ } ++ } ++} ++ ++#[async_trait] ++impl PaymentProcessor for LNBitsPaymentProcessor { ++ /// Calls LNBits api to ger new invoice ++ async fn get_invoice(&self, key: &Keys, amount: u64) -> Result { ++ let random_number: u16 = rand::thread_rng().gen(); ++ let memo = format!("{}: {}", random_number, key.public_key()); ++ ++ let callback_url = Url::parse( ++ &self ++ .settings ++ .info ++ .relay_url ++ .clone() ++ .unwrap() ++ .replace("ws", "http"), ++ )? ++ .join("lnbits")?; ++ ++ let body = LNBitsCreateInvoice { ++ out: false, ++ amount, ++ memo: memo.clone(), ++ webhook: callback_url.to_string(), ++ unit: "sat".to_string(), ++ internal: false, ++ expiry: 3600, ++ }; ++ let url = Url::parse(&self.settings.pay_to_relay.node_url)?.join(APIPATH)?; ++ let uri = Uri::from_str(url.as_str().strip_suffix('/').unwrap_or(url.as_str())).unwrap(); ++ ++ let req = hyper::Request::builder() ++ .method(hyper::Method::POST) ++ .uri(uri) ++ .header("X-Api-Key", &self.settings.pay_to_relay.api_secret) ++ .body(hyper::Body::from(serde_json::to_string(&body)?)) ++ .expect("request builder"); ++ ++ let res = self.client.request(req).await?; ++ ++ // Json to Struct of LNbits callback ++ let body = hyper::body::to_bytes(res.into_body()).await?; ++ let invoice_response: LNBitsCreateInvoiceResponse = serde_json::from_slice(&body)?; ++ ++ Ok(InvoiceInfo { ++ pubkey: key.public_key().to_string(), ++ payment_hash: invoice_response.payment_hash, ++ bolt11: invoice_response.payment_request, ++ amount, ++ memo, ++ status: InvoiceStatus::Unpaid, ++ confirmed_at: None, ++ }) ++ } ++ ++ /// Calls LNBits Api to check the payment status of invoice ++ async fn check_invoice(&self, payment_hash: &str) -> Result { ++ let url = Url::parse(&self.settings.pay_to_relay.node_url)? ++ .join(APIPATH)? ++ .join(payment_hash)?; ++ let uri = Uri::from_str(url.as_str()).unwrap(); ++ ++ let req = hyper::Request::builder() ++ .method(hyper::Method::GET) ++ .uri(uri) ++ .header("X-Api-Key", &self.settings.pay_to_relay.api_secret) ++ .body(hyper::Body::empty()) ++ .expect("request builder"); ++ ++ let res = self.client.request(req).await?; ++ // Json to Struct of LNbits callback ++ let body = hyper::body::to_bytes(res.into_body()).await?; ++ let invoice_response: Value = serde_json::from_slice(&body)?; ++ ++ let status = if let Ok(invoice_response) = ++ serde_json::from_value::(invoice_response) ++ { ++ if invoice_response.paid { ++ InvoiceStatus::Paid ++ } else { ++ InvoiceStatus::Unpaid ++ } ++ } else { ++ InvoiceStatus::Expired ++ }; ++ ++ Ok(status) ++ } ++} +diff --git a/src/payment/mod.rs b/src/payment/mod.rs +new file mode 100644 +index 0000000..0158cf8 +--- /dev/null ++++ b/src/payment/mod.rs +@@ -0,0 +1,276 @@ ++use crate::error::{Error, Result}; ++use crate::event::Event; ++use crate::payment::lnbits::LNBitsPaymentProcessor; ++use crate::repo::NostrRepo; ++use serde::{Deserialize, Serialize}; ++use std::sync::Arc; ++use tracing::{info, warn}; ++ ++use async_trait::async_trait; ++use nostr::key::{FromPkStr, FromSkStr}; ++use nostr::{key::Keys, Event as NostrEvent, EventBuilder}; ++ ++pub mod lnbits; ++ ++/// Payment handler ++pub struct Payment { ++ /// Repository for saving/retrieving events and events ++ repo: Arc, ++ /// Newly validated events get written and then broadcast on this channel to subscribers ++ event_tx: tokio::sync::broadcast::Sender, ++ /// Payment message sender ++ payment_tx: tokio::sync::broadcast::Sender, ++ /// Payment message receiver ++ payment_rx: tokio::sync::broadcast::Receiver, ++ /// Settings ++ settings: crate::config::Settings, ++ // Nostr Keys ++ nostr_keys: Option, ++ /// Payment Processor ++ processor: Arc, ++} ++ ++#[async_trait] ++pub trait PaymentProcessor: Send + Sync { ++ /// Get invoice from processor ++ async fn get_invoice(&self, keys: &Keys, amount: u64) -> Result; ++ /// Check payment status of an invoice ++ async fn check_invoice(&self, payment_hash: &str) -> Result; ++} ++ ++#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] ++pub enum Processor { ++ LNBits, ++} ++ ++/// Possible states of an invoice ++#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, sqlx::Type)] ++#[sqlx(type_name = "status")] ++pub enum InvoiceStatus { ++ Unpaid, ++ Paid, ++ Expired, ++} ++ ++impl ToString for InvoiceStatus { ++ fn to_string(&self) -> String { ++ match self { ++ InvoiceStatus::Paid => "Paid".to_string(), ++ InvoiceStatus::Unpaid => "Unpaid".to_string(), ++ InvoiceStatus::Expired => "Expired".to_string(), ++ } ++ } ++} ++ ++/// Invoice information ++#[derive(Debug, Clone)] ++pub struct InvoiceInfo { ++ pub pubkey: String, ++ pub payment_hash: String, ++ pub bolt11: String, ++ pub amount: u64, ++ pub status: InvoiceStatus, ++ pub memo: String, ++ pub confirmed_at: Option, ++} ++ ++/// Message variants for the payment channel ++#[derive(Debug, Clone)] ++pub enum PaymentMessage { ++ /// New account ++ NewAccount(String), ++ /// Check account, ++ CheckAccount(String), ++ /// Account Admitted ++ AccountAdmitted(String), ++ /// Invoice generated ++ Invoice(String, InvoiceInfo), ++ /// Invoice call back ++ /// Payment hash is passed ++ // This may have to be changed to better support other processors ++ InvoicePaid(String), ++} ++ ++impl Payment { ++ pub fn new( ++ repo: Arc, ++ payment_tx: tokio::sync::broadcast::Sender, ++ payment_rx: tokio::sync::broadcast::Receiver, ++ event_tx: tokio::sync::broadcast::Sender, ++ settings: crate::config::Settings, ++ ) -> Result { ++ info!("Create payment handler"); ++ ++ // Create nostr key from sk string ++ let nostr_keys = if let Some(secret_key) = &settings.pay_to_relay.secret_key { ++ Some(Keys::from_sk_str(secret_key)?) ++ } else { ++ None ++ }; ++ ++ // Create processor kind defined in settings ++ let processor = match &settings.pay_to_relay.processor { ++ Processor::LNBits => Arc::new(LNBitsPaymentProcessor::new(&settings)), ++ }; ++ ++ Ok(Payment { ++ repo, ++ payment_tx, ++ payment_rx, ++ event_tx, ++ settings, ++ nostr_keys, ++ processor, ++ }) ++ } ++ ++ /// Perform Payment tasks ++ pub async fn run(&mut self) { ++ loop { ++ let res = self.run_internal().await; ++ if let Err(e) = res { ++ info!("error in payment: {:?}", e); ++ } ++ } ++ } ++ ++ /// Internal select loop for preforming payment operations ++ async fn run_internal(&mut self) -> Result<()> { ++ tokio::select! { ++ m = self.payment_rx.recv() => { ++ match m { ++ Ok(PaymentMessage::NewAccount(pubkey)) => { ++ info!("payment event for {:?}", pubkey); ++ // REVIEW: This will need to change for cost per event ++ let amount = self.settings.pay_to_relay.admission_cost; ++ let invoice_info = self.get_invoice_info(&pubkey, amount).await?; ++ // TODO: should handle this error ++ self.payment_tx.send(PaymentMessage::Invoice(pubkey, invoice_info)).ok(); ++ }, ++ // Gets the most recent unpaid invoice from database ++ // Checks LNbits to verify if paid/unpaid ++ Ok(PaymentMessage::CheckAccount(pubkey)) => { ++ let keys = Keys::from_pk_str(&pubkey)?; ++ ++ if let Ok(Some(invoice_info)) = self.repo.get_unpaid_invoice(&keys).await { ++ match self.check_invoice_status(&invoice_info.payment_hash).await? { ++ InvoiceStatus::Paid => { ++ self.repo.admit_account(&keys, self.settings.pay_to_relay.admission_cost).await?; ++ self.payment_tx.send(PaymentMessage::AccountAdmitted(pubkey)).ok(); ++ } ++ _ => { ++ self.payment_tx.send(PaymentMessage::Invoice(pubkey, invoice_info)).ok(); ++ } ++ } ++ } else { ++ let amount = self.settings.pay_to_relay.admission_cost; ++ let invoice_info = self.get_invoice_info(&pubkey, amount).await?; ++ self.payment_tx.send(PaymentMessage::Invoice(pubkey, invoice_info)).ok(); ++ } ++ } ++ Ok(PaymentMessage::InvoicePaid(payment_hash)) => { ++ if self.check_invoice_status(&payment_hash).await?.eq(&InvoiceStatus::Paid) { ++ let pubkey = self.repo ++ .update_invoice(&payment_hash, InvoiceStatus::Paid) ++ .await?; ++ ++ let key = Keys::from_pk_str(&pubkey)?; ++ self.repo.admit_account(&key, self.settings.pay_to_relay.admission_cost).await?; ++ } ++ } ++ Ok(_) => { ++ // For this variant nothing need to be done here ++ // it is used by `server` ++ } ++ Err(err) => warn!("Payment RX: {err}") ++ } ++ } ++ } ++ ++ Ok(()) ++ } ++ ++ /// Sends Nostr DM to pubkey that requested invoice ++ /// Two events the terms followed by the bolt11 invoice ++ pub async fn send_admission_message( ++ &self, ++ pubkey: &str, ++ invoice_info: &InvoiceInfo, ++ ) -> Result<()> { ++ let nostr_keys = match &self.nostr_keys { ++ Some(key) => key, ++ None => return Err(Error::CustomError("Nostr key not defined".to_string())), ++ }; ++ ++ // Create Nostr key from pk ++ let key = Keys::from_pk_str(pubkey)?; ++ ++ let pubkey = key.public_key(); ++ ++ // Event DM with terms of service ++ let message_event: NostrEvent = EventBuilder::new_encrypted_direct_msg( ++ nostr_keys, ++ pubkey, ++ &self.settings.pay_to_relay.terms_message, ++ )? ++ .to_event(nostr_keys)?; ++ ++ // Event DM with invoice ++ let invoice_event: NostrEvent = ++ EventBuilder::new_encrypted_direct_msg(nostr_keys, pubkey, &invoice_info.bolt11)? ++ .to_event(nostr_keys)?; ++ ++ // Persist DM events to DB ++ self.repo.write_event(&message_event.clone().into()).await?; ++ self.repo.write_event(&invoice_event.clone().into()).await?; ++ ++ // Broadcast DM events ++ self.event_tx.send(message_event.clone().into()).ok(); ++ self.event_tx.send(invoice_event.clone().into()).ok(); ++ ++ Ok(()) ++ } ++ ++ /// Get Invoice Info ++ /// If the has an active invoice that will be return ++ /// Otherwise a new invoice will be generated by the payment processor ++ pub async fn get_invoice_info(&self, pubkey: &str, amount: u64) -> Result { ++ // If user is already in DB this will be false ++ // This avoids recreating admission invoices ++ // I think it will continue to send DMs with the invoice ++ // If client continues to try and write to the relay (will be same invoice) ++ let key = Keys::from_pk_str(pubkey)?; ++ if !self.repo.create_account(&key).await? { ++ if let Ok(Some(invoice_info)) = self.repo.get_unpaid_invoice(&key).await { ++ return Ok(invoice_info); ++ } ++ } ++ ++ let key = Keys::from_pk_str(pubkey)?; ++ ++ let invoice_info = self.processor.get_invoice(&key, amount).await?; ++ ++ // Persist invoice to DB ++ self.repo ++ .create_invoice_record(&key, invoice_info.clone()) ++ .await?; ++ ++ if self.settings.pay_to_relay.direct_message { ++ // Admission event invoice and terms to pubkey that is joining ++ self.send_admission_message(pubkey, &invoice_info).await?; ++ } ++ ++ Ok(invoice_info) ++ } ++ ++ /// Check paid status of invoice with LNbits ++ pub async fn check_invoice_status(&self, payment_hash: &str) -> Result { ++ // Check base if passed expiry time ++ let status = self.processor.check_invoice(payment_hash).await?; ++ self.repo ++ .update_invoice(payment_hash, status.clone()) ++ .await?; ++ ++ Ok(status) ++ } ++} +diff --git a/src/repo/mod.rs b/src/repo/mod.rs +new file mode 100644 +index 0000000..b4dafb9 +--- /dev/null ++++ b/src/repo/mod.rs +@@ -0,0 +1,98 @@ ++use crate::db::QueryResult; ++use crate::error::Result; ++use crate::event::Event; ++use crate::nip05::VerificationRecord; ++use crate::payment::{InvoiceInfo, InvoiceStatus}; ++use crate::subscription::Subscription; ++use crate::utils::unix_time; ++use async_trait::async_trait; ++use nostr::Keys; ++use rand::Rng; ++ ++pub mod postgres; ++pub mod postgres_migration; ++pub mod sqlite; ++pub mod sqlite_migration; ++ ++#[async_trait] ++pub trait NostrRepo: Send + Sync { ++ /// Start the repository (any initialization or maintenance tasks can be kicked off here) ++ async fn start(&self) -> Result<()>; ++ ++ /// Run migrations and return current version ++ async fn migrate_up(&self) -> Result; ++ ++ /// Persist event to database ++ async fn write_event(&self, e: &Event) -> Result; ++ ++ /// Perform a database query using a subscription. ++ /// ++ /// The [`Subscription`] is converted into a SQL query. Each result ++ /// is published on the `query_tx` channel as it is returned. If a ++ /// message becomes available on the `abandon_query_rx` channel, the ++ /// query is immediately aborted. ++ async fn query_subscription( ++ &self, ++ sub: Subscription, ++ client_id: String, ++ query_tx: tokio::sync::mpsc::Sender, ++ mut abandon_query_rx: tokio::sync::oneshot::Receiver<()>, ++ ) -> Result<()>; ++ ++ /// Perform normal maintenance ++ async fn optimize_db(&self) -> Result<()>; ++ ++ /// Create a new verification record connected to a specific event ++ async fn create_verification_record(&self, event_id: &str, name: &str) -> Result<()>; ++ ++ /// Update verification timestamp ++ async fn update_verification_timestamp(&self, id: u64) -> Result<()>; ++ ++ /// Update verification record as failed ++ async fn fail_verification(&self, id: u64) -> Result<()>; ++ ++ /// Delete verification record ++ async fn delete_verification(&self, id: u64) -> Result<()>; ++ ++ /// Get the latest verification record for a given pubkey. ++ async fn get_latest_user_verification(&self, pub_key: &str) -> Result; ++ ++ /// Get oldest verification before timestamp ++ async fn get_oldest_user_verification(&self, before: u64) -> Result; ++ ++ /// Create a new account ++ async fn create_account(&self, pubkey: &Keys) -> Result; ++ ++ /// Admit an account ++ async fn admit_account(&self, pubkey: &Keys, admission_cost: u64) -> Result<()>; ++ ++ /// Gets user balance if they are an admitted pubkey ++ async fn get_account_balance(&self, pubkey: &Keys) -> Result<(bool, u64)>; ++ ++ /// Update account balance ++ async fn update_account_balance( ++ &self, ++ pub_key: &Keys, ++ positive: bool, ++ new_balance: u64, ++ ) -> Result<()>; ++ ++ /// Create invoice record ++ async fn create_invoice_record(&self, pubkey: &Keys, invoice_info: InvoiceInfo) -> Result<()>; ++ ++ /// Update Invoice for given payment hash ++ async fn update_invoice(&self, payment_hash: &str, status: InvoiceStatus) -> Result; ++ ++ /// Get the most recent invoice for a given pubkey ++ /// invoice must be unpaid and not expired ++ async fn get_unpaid_invoice(&self, pubkey: &Keys) -> Result>; ++} ++ ++// Current time, with a slight forward jitter in seconds ++pub(crate) fn now_jitter(sec: u64) -> u64 { ++ // random time between now, and 10min in future. ++ let mut rng = rand::thread_rng(); ++ let jitter_amount = rng.gen_range(0..sec); ++ let now = unix_time(); ++ now.saturating_add(jitter_amount) ++} +diff --git a/src/repo/postgres.rs b/src/repo/postgres.rs +new file mode 100644 +index 0000000..05b3832 +--- /dev/null ++++ b/src/repo/postgres.rs +@@ -0,0 +1,965 @@ ++use crate::db::QueryResult; ++use crate::error::Result; ++use crate::event::{single_char_tagname, Event}; ++use crate::nip05::{Nip05Name, VerificationRecord}; ++use crate::payment::{InvoiceInfo, InvoiceStatus}; ++use crate::repo::{now_jitter, NostrRepo}; ++use crate::subscription::{ReqFilter, Subscription}; ++use async_std::stream::StreamExt; ++use async_trait::async_trait; ++use chrono::{DateTime, TimeZone, Utc}; ++use sqlx::postgres::PgRow; ++use sqlx::Error::RowNotFound; ++use sqlx::{Error, Execute, FromRow, Postgres, QueryBuilder, Row}; ++use std::time::{Duration, Instant}; ++ ++use crate::error; ++use crate::hexrange::{hex_range, HexSearch}; ++use crate::repo::postgres_migration::run_migrations; ++use crate::server::NostrMetrics; ++use crate::utils::{self, is_hex, is_lower_hex}; ++use nostr::key::Keys; ++use tokio::sync::mpsc::Sender; ++use tokio::sync::oneshot::Receiver; ++use tracing::{debug, error, info, trace, warn}; ++ ++pub type PostgresPool = sqlx::pool::Pool; ++ ++pub struct PostgresRepo { ++ conn: PostgresPool, ++ conn_write: PostgresPool, ++ metrics: NostrMetrics, ++} ++ ++impl PostgresRepo { ++ pub fn new(c: PostgresPool, cw: PostgresPool, m: NostrMetrics) -> PostgresRepo { ++ PostgresRepo { ++ conn: c, ++ conn_write: cw, ++ metrics: m, ++ } ++ } ++} ++ ++/// Cleanup expired events on a regular basis ++async fn cleanup_expired(conn: PostgresPool, frequency: Duration) -> Result<()> { ++ tokio::task::spawn(async move { ++ loop { ++ tokio::select! { ++ _ = tokio::time::sleep(frequency) => { ++ let start = Instant::now(); ++ let exp_res = delete_expired(conn.clone()).await; ++ match exp_res { ++ Ok(exp_count) => { ++ if exp_count > 0 { ++ info!("removed {} expired events in: {:?}", exp_count, start.elapsed()); ++ } ++ }, ++ Err(e) => { ++ warn!("could not remove expired events due to error: {:?}", e); ++ } ++ } ++ } ++ }; ++ } ++ }); ++ Ok(()) ++} ++ ++/// One-time deletion of all expired events ++async fn delete_expired(conn: PostgresPool) -> Result { ++ let mut tx = conn.begin().await?; ++ let update_count = sqlx::query("DELETE FROM \"event\" WHERE expires_at <= $1;") ++ .bind(Utc.timestamp_opt(utils::unix_time() as i64, 0).unwrap()) ++ .execute(&mut tx) ++ .await? ++ .rows_affected(); ++ tx.commit().await?; ++ Ok(update_count) ++} ++ ++#[async_trait] ++impl NostrRepo for PostgresRepo { ++ async fn start(&self) -> Result<()> { ++ // begin a cleanup task for expired events. ++ cleanup_expired(self.conn_write.clone(), Duration::from_secs(600)).await?; ++ Ok(()) ++ } ++ ++ async fn migrate_up(&self) -> Result { ++ Ok(run_migrations(&self.conn_write).await?) ++ } ++ ++ async fn write_event(&self, e: &Event) -> Result { ++ // start transaction ++ let mut tx = self.conn_write.begin().await?; ++ let start = Instant::now(); ++ ++ // get relevant fields from event and convert to blobs. ++ let id_blob = hex::decode(&e.id).ok(); ++ let pubkey_blob: Option> = hex::decode(&e.pubkey).ok(); ++ let delegator_blob: Option> = ++ e.delegated_by.as_ref().and_then(|d| hex::decode(d).ok()); ++ let event_str = serde_json::to_string(&e).unwrap(); ++ ++ // determine if this event would be shadowed by an existing ++ // replaceable event or parameterized replaceable event. ++ if e.is_replaceable() { ++ let repl_count = sqlx::query( ++ "SELECT e.id FROM event e WHERE e.pub_key=$1 AND e.kind=$2 AND e.created_at >= $3 LIMIT 1;") ++ .bind(&pubkey_blob) ++ .bind(e.kind as i64) ++ .bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap()) ++ .fetch_optional(&mut tx) ++ .await?; ++ if repl_count.is_some() { ++ return Ok(0); ++ } ++ } ++ if let Some(d_tag) = e.distinct_param() { ++ let repl_count: i64 = if is_lower_hex(&d_tag) && (d_tag.len() % 2 == 0) { ++ sqlx::query_scalar( ++ "SELECT count(*) AS count FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.pub_key=$1 AND e.kind=$2 AND t.name='d' AND t.value_hex=$3 AND e.created_at >= $4 LIMIT 1;") ++ .bind(hex::decode(&e.pubkey).ok()) ++ .bind(e.kind as i64) ++ .bind(hex::decode(d_tag).ok()) ++ .bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap()) ++ .fetch_one(&mut tx) ++ .await? ++ } else { ++ sqlx::query_scalar( ++ "SELECT count(*) AS count FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.pub_key=$1 AND e.kind=$2 AND t.name='d' AND t.value=$3 AND e.created_at >= $4 LIMIT 1;") ++ .bind(hex::decode(&e.pubkey).ok()) ++ .bind(e.kind as i64) ++ .bind(d_tag.as_bytes()) ++ .bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap()) ++ .fetch_one(&mut tx) ++ .await? ++ }; ++ // if any rows were returned, then some newer event with ++ // the same author/kind/tag value exist, and we can ignore ++ // this event. ++ if repl_count > 0 { ++ return Ok(0); ++ } ++ } ++ // ignore if the event hash is a duplicate. ++ let mut ins_count = sqlx::query( ++ r#"INSERT INTO "event" ++(id, pub_key, created_at, expires_at, kind, "content", delegated_by) ++VALUES($1, $2, $3, $4, $5, $6, $7) ++ON CONFLICT (id) DO NOTHING"#, ++ ) ++ .bind(&id_blob) ++ .bind(&pubkey_blob) ++ .bind(Utc.timestamp_opt(e.created_at as i64, 0).unwrap()) ++ .bind( ++ e.expiration() ++ .and_then(|x| Utc.timestamp_opt(x as i64, 0).latest()), ++ ) ++ .bind(e.kind as i64) ++ .bind(event_str.into_bytes()) ++ .bind(delegator_blob) ++ .execute(&mut tx) ++ .await? ++ .rows_affected(); ++ ++ if ins_count == 0 { ++ // if the event was a duplicate, no need to insert event or ++ // pubkey references. This will abort the txn. ++ return Ok(0); ++ } ++ ++ // add all tags to the tag table ++ for tag in e.tags.iter() { ++ // ensure we have 2 values. ++ if tag.len() >= 2 { ++ let tag_name = &tag[0]; ++ let tag_val = &tag[1]; ++ // only single-char tags are searchable ++ let tag_char_opt = single_char_tagname(tag_name); ++ match &tag_char_opt { ++ Some(_) => { ++ // if tag value is lowercase hex; ++ if is_lower_hex(tag_val) && (tag_val.len() % 2 == 0) { ++ sqlx::query("INSERT INTO tag (event_id, \"name\", value, value_hex) VALUES($1, $2, NULL, $3) \ ++ ON CONFLICT (event_id, \"name\", value, value_hex) DO NOTHING") ++ .bind(&id_blob) ++ .bind(tag_name) ++ .bind(hex::decode(tag_val).ok()) ++ .execute(&mut tx) ++ .await ++ .unwrap(); ++ } else { ++ sqlx::query("INSERT INTO tag (event_id, \"name\", value, value_hex) VALUES($1, $2, $3, NULL) \ ++ ON CONFLICT (event_id, \"name\", value, value_hex) DO NOTHING") ++ .bind(&id_blob) ++ .bind(tag_name) ++ .bind(tag_val.as_bytes()) ++ .execute(&mut tx) ++ .await ++ .unwrap(); ++ } ++ } ++ None => {} ++ } ++ } ++ } ++ if e.is_replaceable() { ++ let update_count = sqlx::query("DELETE FROM \"event\" WHERE kind=$1 and pub_key = $2 and id not in (select id from \"event\" where kind=$1 and pub_key=$2 order by created_at desc limit 1);") ++ .bind(e.kind as i64) ++ .bind(hex::decode(&e.pubkey).ok()) ++ .execute(&mut tx) ++ .await?.rows_affected(); ++ if update_count > 0 { ++ info!( ++ "hid {} older replaceable kind {} events for author: {:?}", ++ update_count, ++ e.kind, ++ e.get_author_prefix() ++ ); ++ } ++ } ++ // parameterized replaceable events ++ // check for parameterized replaceable events that would be hidden; don't insert these either. ++ if let Some(d_tag) = e.distinct_param() { ++ let update_count = if is_lower_hex(&d_tag) && (d_tag.len() % 2 == 0) { ++ sqlx::query("DELETE FROM event WHERE kind=$1 AND pub_key=$2 AND id IN (SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.kind=$1 AND e.pub_key=$2 AND t.name='d' AND t.value_hex=$3 ORDER BY created_at DESC OFFSET 1);") ++ .bind(e.kind as i64) ++ .bind(hex::decode(&e.pubkey).ok()) ++ .bind(hex::decode(d_tag).ok()) ++ .execute(&mut tx) ++ .await?.rows_affected() ++ } else { ++ sqlx::query("DELETE FROM event WHERE kind=$1 AND pub_key=$2 AND id IN (SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.kind=$1 AND e.pub_key=$2 AND t.name='d' AND t.value=$3 ORDER BY created_at DESC OFFSET 1);") ++ .bind(e.kind as i64) ++ .bind(hex::decode(&e.pubkey).ok()) ++ .bind(d_tag.as_bytes()) ++ .execute(&mut tx) ++ .await?.rows_affected() ++ }; ++ if update_count > 0 { ++ info!( ++ "removed {} older parameterized replaceable kind {} events for author: {:?}", ++ update_count, ++ e.kind, ++ e.get_author_prefix() ++ ); ++ } ++ } ++ // if this event is a deletion, hide the referenced events from the same author. ++ if e.kind == 5 { ++ let event_candidates = e.tag_values_by_name("e"); ++ let pub_keys: Vec> = event_candidates ++ .iter() ++ .filter(|x| is_hex(x) && x.len() == 64) ++ .filter_map(|x| hex::decode(x).ok()) ++ .collect(); ++ ++ let mut builder = QueryBuilder::new( ++ "UPDATE \"event\" SET hidden = 1::bit(1) WHERE kind != 5 AND pub_key = ", ++ ); ++ builder.push_bind(hex::decode(&e.pubkey).ok()); ++ builder.push(" AND id IN ("); ++ ++ let mut sep = builder.separated(", "); ++ for pk in pub_keys { ++ sep.push_bind(pk); ++ } ++ sep.push_unseparated(")"); ++ ++ let update_count = builder.build().execute(&mut tx).await?.rows_affected(); ++ info!( ++ "hid {} deleted events for author {:?}", ++ update_count, ++ e.get_author_prefix() ++ ); ++ } else { ++ // check if a deletion has already been recorded for this event. ++ // Only relevant for non-deletion events ++ let del_count = sqlx::query( ++ "SELECT e.id FROM \"event\" e \ ++ LEFT JOIN tag t ON e.id = t.event_id \ ++ WHERE e.pub_key = $1 AND t.\"name\" = 'e' AND e.kind = 5 AND t.value = $2 LIMIT 1", ++ ) ++ .bind(&pubkey_blob) ++ .bind(&id_blob) ++ .fetch_optional(&mut tx) ++ .await?; ++ ++ // check if a the query returned a result, meaning we should ++ // hid the current event ++ if del_count.is_some() { ++ // a deletion already existed, mark original event as hidden. ++ info!( ++ "hid event: {:?} due to existing deletion by author: {:?}", ++ e.get_event_id_prefix(), ++ e.get_author_prefix() ++ ); ++ sqlx::query("UPDATE \"event\" SET hidden = 1::bit(1) WHERE id = $1") ++ .bind(&id_blob) ++ .execute(&mut tx) ++ .await?; ++ // event was deleted, so let caller know nothing new ++ // arrived, preventing this from being sent to active ++ // subscriptions ++ ins_count = 0; ++ } ++ } ++ tx.commit().await?; ++ self.metrics ++ .write_events ++ .observe(start.elapsed().as_secs_f64()); ++ Ok(ins_count) ++ } ++ ++ async fn query_subscription( ++ &self, ++ sub: Subscription, ++ client_id: String, ++ query_tx: Sender, ++ mut abandon_query_rx: Receiver<()>, ++ ) -> Result<()> { ++ let start = Instant::now(); ++ let mut row_count: usize = 0; ++ let metrics = &self.metrics; ++ ++ for filter in sub.filters.iter() { ++ let start = Instant::now(); ++ // generate SQL query ++ let q_filter = query_from_filter(filter); ++ if q_filter.is_none() { ++ debug!("Failed to generate query!"); ++ continue; ++ } ++ ++ debug!("SQL generated in {:?}", start.elapsed()); ++ ++ // cutoff for displaying slow queries ++ let slow_cutoff = Duration::from_millis(2000); ++ ++ // any client that doesn't cause us to generate new rows in 5 ++ // seconds gets dropped. ++ let abort_cutoff = Duration::from_secs(5); ++ ++ let start = Instant::now(); ++ let mut slow_first_event; ++ let mut last_successful_send = Instant::now(); ++ ++ // execute the query. Don't cache, since queries vary so much. ++ let mut q_filter = q_filter.unwrap(); ++ let q_build = q_filter.build(); ++ let sql = q_build.sql(); ++ let mut results = q_build.fetch(&self.conn); ++ ++ let mut first_result = true; ++ while let Some(row) = results.next().await { ++ if let Err(e) = row { ++ error!("Query failed: {} {} {:?}", e, sql, filter); ++ break; ++ } ++ let first_event_elapsed = start.elapsed(); ++ slow_first_event = first_event_elapsed >= slow_cutoff; ++ if first_result { ++ debug!( ++ "first result in {:?} (cid: {}, sub: {:?})", ++ first_event_elapsed, client_id, sub.id ++ ); ++ first_result = false; ++ } ++ ++ // logging for slow queries; show sub and SQL. ++ // to reduce logging; only show 1/16th of clients (leading 0) ++ if slow_first_event && client_id.starts_with("00") { ++ debug!( ++ "query req (slow): {:?} (cid: {}, sub: {:?})", ++ &sub, client_id, sub.id ++ ); ++ } else { ++ trace!( ++ "query req: {:?} (cid: {}, sub: {:?})", ++ &sub, ++ client_id, ++ sub.id ++ ); ++ } ++ ++ // check if this is still active; every 100 rows ++ if row_count % 100 == 0 && abandon_query_rx.try_recv().is_ok() { ++ debug!( ++ "query cancelled by client (cid: {}, sub: {:?})", ++ client_id, sub.id ++ ); ++ return Ok(()); ++ } ++ ++ row_count += 1; ++ let event_json: Vec = row.unwrap().get(0); ++ loop { ++ if query_tx.capacity() != 0 { ++ // we have capacity to add another item ++ break; ++ } else { ++ // the queue is full ++ trace!("db reader thread is stalled"); ++ if last_successful_send + abort_cutoff < Instant::now() { ++ // the queue has been full for too long, abort ++ info!("aborting database query due to slow client"); ++ metrics ++ .query_aborts ++ .with_label_values(&["slowclient"]) ++ .inc(); ++ return Ok(()); ++ } ++ // give the queue a chance to clear before trying again ++ async_std::task::sleep(Duration::from_millis(100)).await; ++ } ++ } ++ ++ // TODO: we could use try_send, but we'd have to juggle ++ // getting the query result back as part of the error ++ // result. ++ query_tx ++ .send(QueryResult { ++ sub_id: sub.get_id(), ++ event: String::from_utf8(event_json).unwrap(), ++ }) ++ .await ++ .ok(); ++ last_successful_send = Instant::now(); ++ } ++ } ++ query_tx ++ .send(QueryResult { ++ sub_id: sub.get_id(), ++ event: "EOSE".to_string(), ++ }) ++ .await ++ .ok(); ++ self.metrics ++ .query_sub ++ .observe(start.elapsed().as_secs_f64()); ++ debug!( ++ "query completed in {:?} (cid: {}, sub: {:?}, db_time: {:?}, rows: {})", ++ start.elapsed(), ++ client_id, ++ sub.id, ++ start.elapsed(), ++ row_count ++ ); ++ Ok(()) ++ } ++ ++ async fn optimize_db(&self) -> Result<()> { ++ // Not implemented ++ Ok(()) ++ } ++ ++ async fn create_verification_record(&self, event_id: &str, name: &str) -> Result<()> { ++ let mut tx = self.conn_write.begin().await?; ++ ++ sqlx::query("DELETE FROM user_verification WHERE \"name\" = $1") ++ .bind(name) ++ .execute(&mut tx) ++ .await?; ++ ++ sqlx::query("INSERT INTO user_verification (event_id, \"name\", verified_at) VALUES ($1, $2, now())") ++ .bind(hex::decode(event_id).ok()) ++ .bind(name) ++ .execute(&mut tx) ++ .await?; ++ ++ tx.commit().await?; ++ info!("saved new verification record for ({:?})", name); ++ Ok(()) ++ } ++ ++ async fn update_verification_timestamp(&self, id: u64) -> Result<()> { ++ // add some jitter to the verification to prevent everything from stacking up together. ++ let verify_time = now_jitter(600); ++ ++ // update verification time and reset any failure count ++ sqlx::query("UPDATE user_verification SET verified_at = $1, fail_count = 0 WHERE id = $2") ++ .bind(Utc.timestamp_opt(verify_time as i64, 0).unwrap()) ++ .bind(id as i64) ++ .execute(&self.conn_write) ++ .await?; ++ ++ info!("verification updated for {}", id); ++ Ok(()) ++ } ++ ++ async fn fail_verification(&self, id: u64) -> Result<()> { ++ sqlx::query("UPDATE user_verification SET failed_at = now(), fail_count = fail_count + 1 WHERE id = $1") ++ .bind(id as i64) ++ .execute(&self.conn_write) ++ .await?; ++ Ok(()) ++ } ++ ++ async fn delete_verification(&self, id: u64) -> Result<()> { ++ sqlx::query("DELETE FROM user_verification WHERE id = $1") ++ .bind(id as i64) ++ .execute(&self.conn_write) ++ .await?; ++ Ok(()) ++ } ++ ++ async fn get_latest_user_verification(&self, pub_key: &str) -> Result { ++ let query = r#"SELECT ++ v.id, ++ v."name", ++ e.id as event_id, ++ e.pub_key, ++ e.created_at, ++ v.verified_at, ++ v.failed_at, ++ v.fail_count ++ FROM user_verification v ++ LEFT JOIN "event" e ON e.id = v.event_id ++ WHERE e.pub_key = $1 ++ ORDER BY e.created_at DESC, v.verified_at DESC, v.failed_at DESC ++ LIMIT 1"#; ++ sqlx::query_as::<_, VerificationRecord>(query) ++ .bind(hex::decode(pub_key).ok()) ++ .fetch_optional(&self.conn) ++ .await? ++ .ok_or(error::Error::SqlxError(RowNotFound)) ++ } ++ ++ async fn get_oldest_user_verification(&self, before: u64) -> Result { ++ let query = r#"SELECT ++ v.id, ++ v."name", ++ e.id as event_id, ++ e.pub_key, ++ e.created_at, ++ v.verified_at, ++ v.failed_at, ++ v.fail_count ++ FROM user_verification v ++ LEFT JOIN "event" e ON e.id = v.event_id ++ WHERE (v.verified_at < $1 OR v.verified_at IS NULL) ++ AND (v.failed_at < $1 OR v.failed_at IS NULL) ++ ORDER BY v.verified_at ASC, v.failed_at ASC ++ LIMIT 1"#; ++ sqlx::query_as::<_, VerificationRecord>(query) ++ .bind(Utc.timestamp_opt(before as i64, 0).unwrap()) ++ .fetch_optional(&self.conn) ++ .await? ++ .ok_or(error::Error::SqlxError(RowNotFound)) ++ } ++ ++ async fn create_account(&self, pub_key: &Keys) -> Result { ++ let pub_key = pub_key.public_key().to_string(); ++ let mut tx = self.conn_write.begin().await?; ++ ++ let result = sqlx::query("INSERT INTO account (pubkey, balance) VALUES ($1, 0);") ++ .bind(pub_key) ++ .execute(&mut tx) ++ .await; ++ ++ let success = match result { ++ Ok(res) => { ++ tx.commit().await?; ++ res.rows_affected() == 1 ++ } ++ Err(_err) => false, ++ }; ++ ++ Ok(success) ++ } ++ ++ /// Admit account ++ async fn admit_account(&self, pub_key: &Keys, admission_cost: u64) -> Result<()> { ++ let pub_key = pub_key.public_key().to_string(); ++ sqlx::query( ++ "UPDATE account SET is_admitted = TRUE, balance = balance - $1 WHERE pubkey = $2", ++ ) ++ .bind(admission_cost as i64) ++ .bind(pub_key) ++ .execute(&self.conn_write) ++ .await?; ++ Ok(()) ++ } ++ ++ /// Gets if the account is admitted and balance ++ async fn get_account_balance(&self, pub_key: &Keys) -> Result<(bool, u64)> { ++ let pub_key = pub_key.public_key().to_string(); ++ let query = r#"SELECT ++ is_admitted, ++ balance ++ FROM account ++ WHERE pubkey = $1 ++ LIMIT 1"#; ++ ++ let result = sqlx::query_as::<_, (bool, i64)>(query) ++ .bind(pub_key) ++ .fetch_optional(&self.conn_write) ++ .await? ++ .ok_or(error::Error::SqlxError(RowNotFound))?; ++ ++ Ok((result.0, result.1 as u64)) ++ } ++ ++ /// Update account balance ++ async fn update_account_balance( ++ &self, ++ pub_key: &Keys, ++ positive: bool, ++ new_balance: u64, ++ ) -> Result<()> { ++ let pub_key = pub_key.public_key().to_string(); ++ match positive { ++ true => { ++ sqlx::query("UPDATE account SET balance = balance + $1 WHERE pubkey = $2") ++ .bind(new_balance as i64) ++ .bind(pub_key) ++ .execute(&self.conn_write) ++ .await? ++ } ++ false => { ++ sqlx::query("UPDATE account SET balance = balance - $1 WHERE pubkey = $2") ++ .bind(new_balance as i64) ++ .bind(pub_key) ++ .execute(&self.conn_write) ++ .await? ++ } ++ }; ++ Ok(()) ++ } ++ ++ /// Create invoice record ++ async fn create_invoice_record(&self, pub_key: &Keys, invoice_info: InvoiceInfo) -> Result<()> { ++ let pub_key = pub_key.public_key().to_string(); ++ let mut tx = self.conn_write.begin().await?; ++ ++ sqlx::query( ++ "INSERT INTO invoice (pubkey, payment_hash, amount, status, description, created_at, invoice) VALUES ($1, $2, $3, $4, $5, now(), $6)", ++ ) ++ .bind(pub_key) ++ .bind(invoice_info.payment_hash) ++ .bind(invoice_info.amount as i64) ++ .bind(invoice_info.status) ++ .bind(invoice_info.memo) ++ .bind(invoice_info.bolt11) ++ .execute(&mut tx) ++ .await.unwrap(); ++ ++ debug!("Invoice added"); ++ ++ tx.commit().await?; ++ Ok(()) ++ } ++ ++ /// Update invoice record ++ async fn update_invoice(&self, payment_hash: &str, status: InvoiceStatus) -> Result { ++ debug!("Payment Hash: {}", payment_hash); ++ let query = "SELECT pubkey, status, amount FROM invoice WHERE payment_hash=$1;"; ++ let (pubkey, prev_invoice_status, amount) = ++ sqlx::query_as::<_, (String, InvoiceStatus, i64)>(query) ++ .bind(payment_hash) ++ .fetch_optional(&self.conn_write) ++ .await? ++ .ok_or(error::Error::SqlxError(RowNotFound))?; ++ ++ // If the invoice is paid update the confirmed at timestamp ++ let query = if status.eq(&InvoiceStatus::Paid) { ++ "UPDATE invoice SET status=$1, confirmed_at = now() WHERE payment_hash=$2;" ++ } else { ++ "UPDATE invoice SET status=$1 WHERE payment_hash=$2;" ++ }; ++ ++ sqlx::query(query) ++ .bind(&status) ++ .bind(payment_hash) ++ .execute(&self.conn_write) ++ .await?; ++ ++ if prev_invoice_status.eq(&InvoiceStatus::Unpaid) && status.eq(&InvoiceStatus::Paid) { ++ sqlx::query("UPDATE account SET balance = balance + $1 WHERE pubkey = $2") ++ .bind(amount) ++ .bind(&pubkey) ++ .execute(&self.conn_write) ++ .await?; ++ } ++ ++ Ok(pubkey) ++ } ++ ++ /// Get the most recent invoice for a given pubkey ++ /// invoice must be unpaid and not expired ++ async fn get_unpaid_invoice(&self, pubkey: &Keys) -> Result> { ++ let query = r#" ++SELECT amount, payment_hash, description, invoice ++FROM invoice ++WHERE pubkey = $1 ++ORDER BY created_at DESC ++LIMIT 1; ++ "#; ++ match sqlx::query_as::<_, (i64, String, String, String)>(query) ++ .bind(pubkey.public_key().to_string()) ++ .fetch_optional(&self.conn_write) ++ .await ++ .unwrap() ++ { ++ Some((amount, payment_hash, description, invoice)) => Ok(Some(InvoiceInfo { ++ pubkey: pubkey.public_key().to_string(), ++ payment_hash, ++ bolt11: invoice, ++ amount: amount as u64, ++ status: InvoiceStatus::Unpaid, ++ memo: description, ++ confirmed_at: None, ++ })), ++ None => Ok(None), ++ } ++ } ++} ++ ++/// Create a dynamic SQL query and params from a subscription filter. ++fn query_from_filter(f: &ReqFilter) -> Option> { ++ // if the filter is malformed, don't return anything. ++ if f.force_no_match { ++ return None; ++ } ++ ++ let mut query = QueryBuilder::new("SELECT e.\"content\", e.created_at FROM \"event\" e WHERE "); ++ ++ // This tracks whether we need to push a prefix AND before adding another clause ++ let mut push_and = false; ++ // Query for "authors", allowing prefix matches ++ if let Some(auth_vec) = &f.authors { ++ // filter out non-hex values ++ let auth_vec: Vec<&String> = auth_vec.iter().filter(|a| is_hex(a)).collect(); ++ ++ if !auth_vec.is_empty() { ++ query.push("("); ++ ++ // shortcut authors into "IN" query ++ let any_is_range = auth_vec.iter().any(|pk| pk.len() != 64); ++ if !any_is_range { ++ query.push("e.pub_key in ("); ++ let mut pk_sep = query.separated(", "); ++ for pk in auth_vec.iter() { ++ pk_sep.push_bind(hex::decode(pk).ok()); ++ } ++ query.push(") OR e.delegated_by in ("); ++ let mut pk_delegated_sep = query.separated(", "); ++ for pk in auth_vec.iter() { ++ pk_delegated_sep.push_bind(hex::decode(pk).ok()); ++ } ++ query.push(")"); ++ push_and = true; ++ } else { ++ let mut range_authors = query.separated(" OR "); ++ for auth in auth_vec { ++ match hex_range(auth) { ++ Some(HexSearch::Exact(ex)) => { ++ range_authors ++ .push("(e.pub_key = ") ++ .push_bind_unseparated(ex.clone()) ++ .push_unseparated(" OR e.delegated_by = ") ++ .push_bind_unseparated(ex) ++ .push_unseparated(")"); ++ } ++ Some(HexSearch::Range(lower, upper)) => { ++ range_authors ++ .push("((e.pub_key > ") ++ .push_bind_unseparated(lower.clone()) ++ .push_unseparated(" AND e.pub_key < ") ++ .push_bind_unseparated(upper.clone()) ++ .push_unseparated(") OR (e.delegated_by > ") ++ .push_bind_unseparated(lower) ++ .push_unseparated(" AND e.delegated_by < ") ++ .push_bind_unseparated(upper) ++ .push_unseparated("))"); ++ } ++ Some(HexSearch::LowerOnly(lower)) => { ++ range_authors ++ .push("(e.pub_key > ") ++ .push_bind_unseparated(lower.clone()) ++ .push_unseparated(" OR e.delegated_by > ") ++ .push_bind_unseparated(lower) ++ .push_unseparated(")"); ++ } ++ None => { ++ info!("Could not parse hex range from author {:?}", auth); ++ } ++ } ++ push_and = true; ++ } ++ } ++ query.push(")"); ++ } ++ } ++ ++ // Query for Kind ++ if let Some(ks) = &f.kinds { ++ if !ks.is_empty() { ++ if push_and { ++ query.push(" AND "); ++ } ++ push_and = true; ++ ++ query.push("e.kind in ("); ++ let mut list_query = query.separated(", "); ++ for k in ks.iter() { ++ list_query.push_bind(*k as i64); ++ } ++ query.push(")"); ++ } ++ } ++ ++ // Query for event, allowing prefix matches ++ if let Some(id_vec) = &f.ids { ++ // filter out non-hex values ++ let id_vec: Vec<&String> = id_vec.iter().filter(|a| is_hex(a)).collect(); ++ ++ if !id_vec.is_empty() { ++ if push_and { ++ query.push(" AND ("); ++ } else { ++ query.push("("); ++ } ++ push_and = true; ++ ++ // shortcut ids into "IN" query ++ let any_is_range = id_vec.iter().any(|pk| pk.len() != 64); ++ if !any_is_range { ++ query.push("id in ("); ++ let mut sep = query.separated(", "); ++ for id in id_vec.iter() { ++ sep.push_bind(hex::decode(id).ok()); ++ } ++ query.push(")"); ++ } else { ++ // take each author and convert to a hex search ++ let mut id_query = query.separated(" OR "); ++ for id in id_vec { ++ match hex_range(id) { ++ Some(HexSearch::Exact(ex)) => { ++ id_query ++ .push("(id = ") ++ .push_bind_unseparated(ex) ++ .push_unseparated(")"); ++ } ++ Some(HexSearch::Range(lower, upper)) => { ++ id_query ++ .push("(id > ") ++ .push_bind_unseparated(lower) ++ .push_unseparated(" AND id < ") ++ .push_bind_unseparated(upper) ++ .push_unseparated(")"); ++ } ++ Some(HexSearch::LowerOnly(lower)) => { ++ id_query ++ .push("(id > ") ++ .push_bind_unseparated(lower) ++ .push_unseparated(")"); ++ } ++ None => { ++ info!("Could not parse hex range from id {:?}", id); ++ } ++ } ++ } ++ } ++ ++ query.push(")"); ++ } ++ } ++ ++ // Query for tags ++ if let Some(map) = &f.tags { ++ if !map.is_empty() { ++ if push_and { ++ query.push(" AND "); ++ } ++ push_and = true; ++ ++ for (key, val) in map.iter() { ++ query.push("e.id IN (SELECT ee.id FROM \"event\" ee LEFT JOIN tag t on ee.id = t.event_id WHERE ee.hidden != 1::bit(1) and (t.\"name\" = ") ++ .push_bind(key.to_string()) ++ .push(" AND (value in ("); ++ ++ // plain value match first ++ let mut tag_query = query.separated(", "); ++ for v in val.iter() { ++ if (v.len() % 2 != 0) && !is_lower_hex(v) { ++ tag_query.push_bind(v.as_bytes()); ++ } else { ++ tag_query.push_bind(hex::decode(v).ok()); ++ } ++ } ++ query.push("))))"); ++ } ++ } ++ } ++ ++ // Query for timestamp ++ if f.since.is_some() { ++ if push_and { ++ query.push(" AND "); ++ } ++ push_and = true; ++ query ++ .push("e.created_at >= ") ++ .push_bind(Utc.timestamp_opt(f.since.unwrap() as i64, 0).unwrap()); ++ } ++ ++ // Query for timestamp ++ if f.until.is_some() { ++ if push_and { ++ query.push(" AND "); ++ } ++ push_and = true; ++ query ++ .push("e.created_at <= ") ++ .push_bind(Utc.timestamp_opt(f.until.unwrap() as i64, 0).unwrap()); ++ } ++ ++ // never display hidden events ++ if push_and { ++ query.push(" AND e.hidden != 1::bit(1)"); ++ } else { ++ query.push("e.hidden != 1::bit(1)"); ++ } ++ // never display expired events ++ query ++ .push(" AND (e.expires_at IS NULL OR e.expires_at > ") ++ .push_bind(Utc.timestamp_opt(utils::unix_time() as i64, 0).unwrap()) ++ .push(")"); ++ ++ // Apply per-filter limit to this query. ++ // The use of a LIMIT implies a DESC order, to capture only the most recent events. ++ if let Some(lim) = f.limit { ++ query.push(" ORDER BY e.created_at DESC LIMIT "); ++ query.push(lim.min(1000)); ++ } else { ++ query.push(" ORDER BY e.created_at ASC LIMIT "); ++ query.push(1000); ++ } ++ Some(query) ++} ++ ++impl FromRow<'_, PgRow> for VerificationRecord { ++ fn from_row(row: &'_ PgRow) -> std::result::Result { ++ let name = Nip05Name::try_from(row.get::<'_, &str, &str>("name")).or(Err(RowNotFound))?; ++ Ok(VerificationRecord { ++ rowid: row.get::<'_, i64, &str>("id") as u64, ++ name, ++ address: hex::encode(row.get::<'_, Vec, &str>("pub_key")), ++ event: hex::encode(row.get::<'_, Vec, &str>("event_id")), ++ event_created: row.get::<'_, DateTime, &str>("created_at").timestamp() as u64, ++ last_success: match row.try_get::<'_, DateTime, &str>("verified_at") { ++ Ok(x) => Some(x.timestamp() as u64), ++ _ => None, ++ }, ++ last_failure: match row.try_get::<'_, DateTime, &str>("failed_at") { ++ Ok(x) => Some(x.timestamp() as u64), ++ _ => None, ++ }, ++ failure_count: row.get::<'_, i32, &str>("fail_count") as u64, ++ }) ++ } ++} +diff --git a/src/repo/postgres_migration.rs b/src/repo/postgres_migration.rs +new file mode 100644 +index 0000000..27eb602 +--- /dev/null ++++ b/src/repo/postgres_migration.rs +@@ -0,0 +1,320 @@ ++use crate::repo::postgres::PostgresPool; ++use async_trait::async_trait; ++use sqlx::{Executor, Postgres, Transaction}; ++ ++#[async_trait] ++pub trait Migration { ++ fn serial_number(&self) -> i64; ++ async fn run(&self, tx: &mut Transaction); ++} ++ ++struct SimpleSqlMigration { ++ pub serial_number: i64, ++ pub sql: Vec<&'static str>, ++} ++ ++#[async_trait] ++impl Migration for SimpleSqlMigration { ++ fn serial_number(&self) -> i64 { ++ self.serial_number ++ } ++ ++ async fn run(&self, tx: &mut Transaction) { ++ for sql in self.sql.iter() { ++ tx.execute(*sql).await.unwrap(); ++ } ++ } ++} ++ ++/// Execute all migrations on the database. ++pub async fn run_migrations(db: &PostgresPool) -> crate::error::Result { ++ prepare_migrations_table(db).await; ++ run_migration(m001::migration(), db).await; ++ let m002_result = run_migration(m002::migration(), db).await; ++ if m002_result == MigrationResult::Upgraded { ++ m002::rebuild_tags(db).await?; ++ } ++ run_migration(m003::migration(), db).await; ++ run_migration(m004::migration(), db).await; ++ run_migration(m005::migration(), db).await; ++ Ok(current_version(db).await as usize) ++} ++ ++async fn current_version(db: &PostgresPool) -> i64 { ++ sqlx::query_scalar("SELECT max(serial_number) FROM migrations;") ++ .fetch_one(db) ++ .await ++ .unwrap() ++} ++ ++async fn prepare_migrations_table(db: &PostgresPool) { ++ sqlx::query("CREATE TABLE IF NOT EXISTS migrations (serial_number bigint)") ++ .execute(db) ++ .await ++ .unwrap(); ++} ++ ++// Running a migration was either unnecessary, or completed ++#[derive(PartialEq, Eq, Debug, Clone)] ++enum MigrationResult { ++ Upgraded, ++ NotNeeded, ++} ++ ++async fn run_migration(migration: impl Migration, db: &PostgresPool) -> MigrationResult { ++ let row: i64 = ++ sqlx::query_scalar("SELECT COUNT(*) AS count FROM migrations WHERE serial_number = $1") ++ .bind(migration.serial_number()) ++ .fetch_one(db) ++ .await ++ .unwrap(); ++ ++ if row > 0 { ++ return MigrationResult::NotNeeded; ++ } ++ ++ let mut transaction = db.begin().await.unwrap(); ++ migration.run(&mut transaction).await; ++ ++ sqlx::query("INSERT INTO migrations VALUES ($1)") ++ .bind(migration.serial_number()) ++ .execute(&mut transaction) ++ .await ++ .unwrap(); ++ ++ transaction.commit().await.unwrap(); ++ MigrationResult::Upgraded ++} ++ ++mod m001 { ++ use crate::repo::postgres_migration::{Migration, SimpleSqlMigration}; ++ ++ pub const VERSION: i64 = 1; ++ ++ pub fn migration() -> impl Migration { ++ SimpleSqlMigration { ++ serial_number: VERSION, ++ sql: vec![ ++ r#" ++-- Events table ++CREATE TABLE "event" ( ++ id bytea NOT NULL, ++ pub_key bytea NOT NULL, ++ created_at timestamp with time zone NOT NULL, ++ kind integer NOT NULL, ++ "content" bytea NOT NULL, ++ hidden bit(1) NOT NULL DEFAULT 0::bit(1), ++ delegated_by bytea NULL, ++ first_seen timestamp with time zone NOT NULL DEFAULT now(), ++ CONSTRAINT event_pkey PRIMARY KEY (id) ++); ++CREATE INDEX event_created_at_idx ON "event" (created_at,kind); ++CREATE INDEX event_pub_key_idx ON "event" (pub_key); ++CREATE INDEX event_delegated_by_idx ON "event" (delegated_by); ++ ++-- Tags table ++CREATE TABLE "tag" ( ++ id int8 NOT NULL GENERATED BY DEFAULT AS IDENTITY, ++ event_id bytea NOT NULL, ++ "name" varchar NOT NULL, ++ value bytea NOT NULL, ++ CONSTRAINT tag_fk FOREIGN KEY (event_id) REFERENCES "event"(id) ON DELETE CASCADE ++); ++CREATE INDEX tag_event_id_idx ON tag USING btree (event_id, name); ++CREATE INDEX tag_value_idx ON tag USING btree (value); ++ ++-- NIP-05 Verification table ++CREATE TABLE "user_verification" ( ++ id int8 NOT NULL GENERATED BY DEFAULT AS IDENTITY, ++ event_id bytea NOT NULL, ++ "name" varchar NOT NULL, ++ verified_at timestamptz NULL, ++ failed_at timestamptz NULL, ++ fail_count int4 NULL DEFAULT 0, ++ CONSTRAINT user_verification_pk PRIMARY KEY (id), ++ CONSTRAINT user_verification_fk FOREIGN KEY (event_id) REFERENCES "event"(id) ON DELETE CASCADE ++); ++CREATE INDEX user_verification_event_id_idx ON user_verification USING btree (event_id); ++CREATE INDEX user_verification_name_idx ON user_verification USING btree (name); ++ "#, ++ ], ++ } ++ } ++} ++ ++mod m002 { ++ use async_std::stream::StreamExt; ++ use indicatif::{ProgressBar, ProgressStyle}; ++ use sqlx::Row; ++ use std::time::Instant; ++ use tracing::info; ++ ++ use crate::event::{single_char_tagname, Event}; ++ use crate::repo::postgres::PostgresPool; ++ use crate::repo::postgres_migration::{Migration, SimpleSqlMigration}; ++ use crate::utils::is_lower_hex; ++ ++ pub const VERSION: i64 = 2; ++ ++ pub fn migration() -> impl Migration { ++ SimpleSqlMigration { ++ serial_number: VERSION, ++ sql: vec![ ++ r#" ++-- Add tag value column ++ALTER TABLE tag ADD COLUMN value_hex bytea; ++-- Remove not-null constraint ++ALTER TABLE tag ALTER COLUMN value DROP NOT NULL; ++-- Add value index ++CREATE INDEX tag_value_hex_idx ON tag USING btree (value_hex); ++ "#, ++ ], ++ } ++ } ++ ++ pub async fn rebuild_tags(db: &PostgresPool) -> crate::error::Result<()> { ++ // Check how many events we have to process ++ let start = Instant::now(); ++ let mut tx = db.begin().await.unwrap(); ++ let mut update_tx = db.begin().await.unwrap(); ++ // Clear out table ++ sqlx::query("DELETE FROM tag;") ++ .execute(&mut update_tx) ++ .await?; ++ { ++ let event_count: i64 = sqlx::query_scalar("SELECT COUNT(*) from event;") ++ .fetch_one(&mut tx) ++ .await ++ .unwrap(); ++ let bar = ProgressBar::new(event_count.try_into().unwrap()) ++ .with_message("rebuilding tags table"); ++ bar.set_style( ++ ProgressStyle::with_template( ++ "[{elapsed_precise}] {bar:40.white/blue} {pos:>7}/{len:7} [{percent}%] {msg}", ++ ) ++ .unwrap(), ++ ); ++ let mut events = ++ sqlx::query("SELECT id, content FROM event ORDER BY id;").fetch(&mut tx); ++ while let Some(row) = events.next().await { ++ bar.inc(1); ++ // get the row id and content ++ let row = row.unwrap(); ++ let event_id: Vec = row.get(0); ++ let event_bytes: Vec = row.get(1); ++ let event: Event = serde_json::from_str(&String::from_utf8(event_bytes).unwrap())?; ++ ++ for t in event.tags.iter().filter(|x| x.len() > 1) { ++ let tagname = t.get(0).unwrap(); ++ let tagnamechar_opt = single_char_tagname(tagname); ++ if tagnamechar_opt.is_none() { ++ continue; ++ } ++ // safe because len was > 1 ++ let tagval = t.get(1).unwrap(); ++ // insert as BLOB if we can restore it losslessly. ++ // this means it needs to be even length and lowercase. ++ if (tagval.len() % 2 == 0) && is_lower_hex(tagval) { ++ let q = "INSERT INTO tag (event_id, \"name\", value, value_hex) VALUES ($1, $2, NULL, $3) ON CONFLICT DO NOTHING;"; ++ sqlx::query(q) ++ .bind(&event_id) ++ .bind(tagname) ++ .bind(hex::decode(tagval).ok()) ++ .execute(&mut update_tx) ++ .await?; ++ } else { ++ let q = "INSERT INTO tag (event_id, \"name\", value, value_hex) VALUES ($1, $2, $3, NULL) ON CONFLICT DO NOTHING;"; ++ sqlx::query(q) ++ .bind(&event_id) ++ .bind(tagname) ++ .bind(tagval.as_bytes()) ++ .execute(&mut update_tx) ++ .await?; ++ } ++ } ++ } ++ update_tx.commit().await?; ++ bar.finish(); ++ } ++ info!("rebuilt tags in {:?}", start.elapsed()); ++ Ok(()) ++ } ++} ++ ++mod m003 { ++ use crate::repo::postgres_migration::{Migration, SimpleSqlMigration}; ++ ++ pub const VERSION: i64 = 3; ++ ++ pub fn migration() -> impl Migration { ++ SimpleSqlMigration { ++ serial_number: VERSION, ++ sql: vec![ ++ r#" ++-- Add unique constraint on tag ++ALTER TABLE tag ADD CONSTRAINT unique_constraint_name UNIQUE (event_id, "name", value, value_hex); ++ "#, ++ ], ++ } ++ } ++} ++ ++mod m004 { ++ use crate::repo::postgres_migration::{Migration, SimpleSqlMigration}; ++ ++ pub const VERSION: i64 = 4; ++ ++ pub fn migration() -> impl Migration { ++ SimpleSqlMigration { ++ serial_number: VERSION, ++ sql: vec![ ++ r#" ++-- Add expiration time for events ++ALTER TABLE event ADD COLUMN expires_at timestamp(0) with time zone; ++-- Index expiration time ++CREATE INDEX event_expires_at_idx ON "event" (expires_at); ++ "#, ++ ], ++ } ++ } ++} ++ ++mod m005 { ++ use crate::repo::postgres_migration::{Migration, SimpleSqlMigration}; ++ ++ pub const VERSION: i64 = 5; ++ ++ pub fn migration() -> impl Migration { ++ SimpleSqlMigration { ++ serial_number: VERSION, ++ sql: vec![ ++ r#" ++-- Create account table ++CREATE TABLE "account" ( ++ pubkey varchar NOT NULL, ++ is_admitted BOOLEAN NOT NULL DEFAULT FALSE, ++ balance BIGINT NOT NULL DEFAULT 0, ++ tos_accepted_at TIMESTAMP, ++ CONSTRAINT account_pkey PRIMARY KEY (pubkey) ++); ++ ++CREATE TYPE status AS ENUM ('Paid', 'Unpaid', 'Expired'); ++ ++ ++CREATE TABLE "invoice" ( ++ payment_hash varchar NOT NULL, ++ pubkey varchar NOT NULL, ++ invoice varchar NOT NULL, ++ amount BIGINT NOT NULL, ++ status status NOT NULL DEFAULT 'Unpaid', ++ description varchar, ++ created_at timestamp, ++ confirmed_at timestamp, ++ CONSTRAINT invoice_payment_hash PRIMARY KEY (payment_hash), ++ CONSTRAINT invoice_pubkey_fkey FOREIGN KEY (pubkey) REFERENCES account (pubkey) ON DELETE CASCADE ++); ++ "#, ++ ], ++ } ++ } ++} +diff --git a/src/repo/sqlite.rs b/src/repo/sqlite.rs +new file mode 100644 +index 0000000..c297847 +--- /dev/null ++++ b/src/repo/sqlite.rs +@@ -0,0 +1,1387 @@ ++//! Event persistence and querying ++//use crate::config::SETTINGS; ++use crate::config::Settings; ++use crate::db::QueryResult; ++use crate::error::{Error::SqlError, Result}; ++use crate::event::{single_char_tagname, Event}; ++use crate::hexrange::hex_range; ++use crate::hexrange::HexSearch; ++use crate::nip05::{Nip05Name, VerificationRecord}; ++use crate::payment::{InvoiceInfo, InvoiceStatus}; ++use crate::repo::sqlite_migration::{upgrade_db, STARTUP_SQL}; ++use crate::server::NostrMetrics; ++use crate::subscription::{ReqFilter, Subscription}; ++use crate::utils::{is_hex, unix_time}; ++use async_trait::async_trait; ++use hex; ++use r2d2; ++use r2d2_sqlite::SqliteConnectionManager; ++use rusqlite::params; ++use rusqlite::types::ToSql; ++use rusqlite::OpenFlags; ++use std::fmt::Write as _; ++use std::path::Path; ++use std::sync::Arc; ++use std::thread; ++use std::time::Duration; ++use std::time::Instant; ++use tokio::sync::{Mutex, MutexGuard, Semaphore}; ++use tokio::task; ++use tracing::{debug, info, trace, warn}; ++ ++use crate::repo::{now_jitter, NostrRepo}; ++use nostr::key::Keys; ++ ++pub type SqlitePool = r2d2::Pool; ++pub type PooledConnection = r2d2::PooledConnection; ++pub const DB_FILE: &str = "nostr.db"; ++ ++#[derive(Clone)] ++pub struct SqliteRepo { ++ /// Metrics ++ metrics: NostrMetrics, ++ /// Pool for reading events and NIP-05 status ++ read_pool: SqlitePool, ++ /// Pool for writing events and NIP-05 verification ++ write_pool: SqlitePool, ++ /// Pool for performing checkpoints/optimization ++ maint_pool: SqlitePool, ++ /// Flag to indicate a checkpoint is underway ++ checkpoint_in_progress: Arc>, ++ /// Flag to limit writer concurrency ++ write_in_progress: Arc>, ++ /// Semaphore for readers to acquire blocking threads ++ reader_threads_ready: Arc, ++} ++ ++impl SqliteRepo { ++ // build all the pools needed ++ #[must_use] ++ pub fn new(settings: &Settings, metrics: NostrMetrics) -> SqliteRepo { ++ let write_pool = build_pool( ++ "writer", ++ settings, ++ OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE, ++ 0, ++ 2, ++ false, ++ ); ++ let maint_pool = build_pool( ++ "maintenance", ++ settings, ++ OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE, ++ 0, ++ 2, ++ true, ++ ); ++ let read_pool = build_pool( ++ "reader", ++ settings, ++ OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE, ++ settings.database.min_conn, ++ settings.database.max_conn, ++ true, ++ ); ++ ++ // this is used to block new reads during critical checkpoints ++ let checkpoint_in_progress = Arc::new(Mutex::new(0)); ++ // SQLite can only effectively write single threaded, so don't ++ // block multiple worker threads unnecessarily. ++ let write_in_progress = Arc::new(Mutex::new(0)); ++ // configure the number of worker threads that can be spawned ++ // to match the number of database reader connections. ++ let max_conn = settings.database.max_conn as usize; ++ let reader_threads_ready = Arc::new(Semaphore::new(max_conn)); ++ SqliteRepo { ++ metrics, ++ read_pool, ++ write_pool, ++ maint_pool, ++ checkpoint_in_progress, ++ write_in_progress, ++ reader_threads_ready, ++ } ++ } ++ ++ /// Persist an event to the database, returning rows added. ++ pub fn persist_event(conn: &mut PooledConnection, e: &Event) -> Result { ++ // enable auto vacuum ++ conn.execute_batch("pragma auto_vacuum = FULL")?; ++ ++ // start transaction ++ let tx = conn.transaction()?; ++ // get relevant fields from event and convert to blobs. ++ let id_blob = hex::decode(&e.id).ok(); ++ let pubkey_blob: Option> = hex::decode(&e.pubkey).ok(); ++ let delegator_blob: Option> = ++ e.delegated_by.as_ref().and_then(|d| hex::decode(d).ok()); ++ let event_str = serde_json::to_string(&e).ok(); ++ // check for replaceable events that would hide this one; we won't even attempt to insert these. ++ if e.is_replaceable() { ++ let repl_count = tx.query_row( ++ "SELECT e.id FROM event e INDEXED BY author_index WHERE e.author=? AND e.kind=? AND e.created_at >= ? LIMIT 1;", ++ params![pubkey_blob, e.kind, e.created_at], |row| row.get::(0)); ++ if repl_count.ok().is_some() { ++ return Ok(0); ++ } ++ } ++ // check for parameterized replaceable events that would be hidden; don't insert these either. ++ if let Some(d_tag) = e.distinct_param() { ++ let repl_count = tx.query_row( ++ "SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.author=? AND e.kind=? AND t.name='d' AND t.value=? AND e.created_at >= ? LIMIT 1;", ++ params![pubkey_blob, e.kind, d_tag, e.created_at],|row| row.get::(0)); ++ // if any rows were returned, then some newer event with ++ // the same author/kind/tag value exist, and we can ignore ++ // this event. ++ if repl_count.ok().is_some() { ++ return Ok(0); ++ } ++ } ++ // ignore if the event hash is a duplicate. ++ let mut ins_count = tx.execute( ++ "INSERT OR IGNORE INTO event (event_hash, created_at, expires_at, kind, author, delegated_by, content, first_seen, hidden) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, strftime('%s','now'), FALSE);", ++ params![id_blob, e.created_at, e.expiration(), e.kind, pubkey_blob, delegator_blob, event_str] ++ )? as u64; ++ if ins_count == 0 { ++ // if the event was a duplicate, no need to insert event or ++ // pubkey references. ++ tx.rollback().ok(); ++ return Ok(ins_count); ++ } ++ // remember primary key of the event most recently inserted. ++ let ev_id = tx.last_insert_rowid(); ++ // add all tags to the tag table ++ for tag in &e.tags { ++ // ensure we have 2 values. ++ if tag.len() >= 2 { ++ let tagname = &tag[0]; ++ let tagval = &tag[1]; ++ // only single-char tags are searchable ++ let tagchar_opt = single_char_tagname(tagname); ++ match &tagchar_opt { ++ Some(_) => { ++ tx.execute( ++ "INSERT OR IGNORE INTO tag (event_id, name, value, kind, created_at) VALUES (?1, ?2, ?3, ?4, ?5)", ++ params![ev_id, &tagname, &tagval, e.kind, e.created_at], ++ )?; ++ } ++ None => {} ++ } ++ } ++ } ++ // if this event is replaceable update, remove other replaceable ++ // event with the same kind from the same author that was issued ++ // earlier than this. ++ if e.is_replaceable() { ++ let author = hex::decode(&e.pubkey).ok(); ++ // this is a backwards check - hide any events that were older. ++ let update_count = tx.execute( ++ "DELETE FROM event WHERE kind=? and author=? and id NOT IN (SELECT id FROM event INDEXED BY author_kind_index WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1)", ++ params![e.kind, author, e.kind, author], ++ )?; ++ if update_count > 0 { ++ info!( ++ "removed {} older replaceable kind {} events for author: {:?}", ++ update_count, ++ e.kind, ++ e.get_author_prefix() ++ ); ++ } ++ } ++ // if this event is parameterized replaceable, remove other events. ++ if let Some(d_tag) = e.distinct_param() { ++ let update_count = tx.execute( ++ "DELETE FROM event WHERE kind=? AND author=? AND id IN (SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.kind=? AND e.author=? AND t.name='d' AND t.value=? ORDER BY t.created_at DESC LIMIT -1 OFFSET 1);", ++ params![e.kind, pubkey_blob, e.kind, pubkey_blob, d_tag])?; ++ if update_count > 0 { ++ info!( ++ "removed {} older parameterized replaceable kind {} events for author: {:?}", ++ update_count, ++ e.kind, ++ e.get_author_prefix() ++ ); ++ } ++ } ++ // if this event is a deletion, hide the referenced events from the same author. ++ if e.kind == 5 { ++ let event_candidates = e.tag_values_by_name("e"); ++ // first parameter will be author ++ let mut params: Vec> = vec![Box::new(hex::decode(&e.pubkey)?)]; ++ event_candidates ++ .iter() ++ .filter(|x| is_hex(x) && x.len() == 64) ++ .filter_map(|x| hex::decode(x).ok()) ++ .for_each(|x| params.push(Box::new(x))); ++ let query = format!( ++ "UPDATE event SET hidden=TRUE WHERE kind!=5 AND author=? AND event_hash IN ({})", ++ repeat_vars(params.len() - 1) ++ ); ++ let mut stmt = tx.prepare(&query)?; ++ let update_count = stmt.execute(rusqlite::params_from_iter(params))?; ++ info!( ++ "hid {} deleted events for author {:?}", ++ update_count, ++ e.get_author_prefix() ++ ); ++ } else { ++ // check if a deletion has already been recorded for this event. ++ // Only relevant for non-deletion events ++ let del_count = tx.query_row( ++ "SELECT e.id FROM event e WHERE e.author=? AND e.id IN (SELECT t.event_id FROM tag t WHERE t.name='e' AND t.kind=5 AND t.value=?) LIMIT 1;", ++ params![pubkey_blob, e.id], |row| row.get::(0)); ++ // check if a the query returned a result, meaning we should ++ // hid the current event ++ if del_count.ok().is_some() { ++ // a deletion already existed, mark original event as hidden. ++ info!( ++ "hid event: {:?} due to existing deletion by author: {:?}", ++ e.get_event_id_prefix(), ++ e.get_author_prefix() ++ ); ++ let _update_count = ++ tx.execute("UPDATE event SET hidden=TRUE WHERE id=?", params![ev_id])?; ++ // event was deleted, so let caller know nothing new ++ // arrived, preventing this from being sent to active ++ // subscriptions ++ ins_count = 0; ++ } ++ } ++ tx.commit()?; ++ Ok(ins_count) ++ } ++} ++ ++#[async_trait] ++impl NostrRepo for SqliteRepo { ++ async fn start(&self) -> Result<()> { ++ db_checkpoint_task( ++ self.maint_pool.clone(), ++ Duration::from_secs(60), ++ self.write_in_progress.clone(), ++ self.checkpoint_in_progress.clone(), ++ ) ++ .await?; ++ cleanup_expired( ++ self.maint_pool.clone(), ++ Duration::from_secs(600), ++ self.write_in_progress.clone(), ++ ) ++ .await ++ } ++ ++ async fn migrate_up(&self) -> Result { ++ let _write_guard = self.write_in_progress.lock().await; ++ let mut conn = self.write_pool.get()?; ++ task::spawn_blocking(move || upgrade_db(&mut conn)).await? ++ } ++ /// Persist event to database ++ async fn write_event(&self, e: &Event) -> Result { ++ let start = Instant::now(); ++ let max_write_attempts = 10; ++ let mut attempts = 0; ++ let _write_guard = self.write_in_progress.lock().await; ++ // spawn a blocking thread ++ //let mut conn = self.write_pool.get()?; ++ let pool = self.write_pool.clone(); ++ let e = e.clone(); ++ let event_count = task::spawn_blocking(move || { ++ let mut conn = pool.get()?; ++ // this could fail because the database was busy; try ++ // multiple times before giving up. ++ loop { ++ attempts += 1; ++ let wr = SqliteRepo::persist_event(&mut conn, &e); ++ match wr { ++ Err(SqlError(rusqlite::Error::SqliteFailure(e, _))) => { ++ // this basically means that NIP-05 or another ++ // writer was using the database between us ++ // reading and promoting the connection to a ++ // write lock. ++ info!( ++ "event write failed, DB locked (attempt: {}); sqlite err: {}", ++ attempts, e.extended_code ++ ); ++ } ++ _ => { ++ return wr; ++ } ++ } ++ if attempts >= max_write_attempts { ++ return wr; ++ } ++ } ++ }) ++ .await?; ++ self.metrics ++ .write_events ++ .observe(start.elapsed().as_secs_f64()); ++ event_count ++ } ++ ++ /// Perform a database query using a subscription. ++ /// ++ /// The [`Subscription`] is converted into a SQL query. Each result ++ /// is published on the `query_tx` channel as it is returned. If a ++ /// message becomes available on the `abandon_query_rx` channel, the ++ /// query is immediately aborted. ++ async fn query_subscription( ++ &self, ++ sub: Subscription, ++ client_id: String, ++ query_tx: tokio::sync::mpsc::Sender, ++ mut abandon_query_rx: tokio::sync::oneshot::Receiver<()>, ++ ) -> Result<()> { ++ let pre_spawn_start = Instant::now(); ++ // if we let every request spawn a thread, we'll exhaust the ++ // thread pool waiting for queries to finish under high load. ++ // Instead, don't bother spawning threads when they will just ++ // block on a database connection. ++ let sem = self ++ .reader_threads_ready ++ .clone() ++ .acquire_owned() ++ .await ++ .unwrap(); ++ let self = self.clone(); ++ let metrics = self.metrics.clone(); ++ task::spawn_blocking(move || { ++ { ++ // if we are waiting on a checkpoint, stop until it is complete ++ let _x = self.checkpoint_in_progress.blocking_lock(); ++ } ++ let db_queue_time = pre_spawn_start.elapsed(); ++ // if the queue time was very long (>5 seconds), spare the DB and abort. ++ if db_queue_time > Duration::from_secs(5) { ++ info!( ++ "shedding DB query load queued for {:?} (cid: {}, sub: {:?})", ++ db_queue_time, client_id, sub.id ++ ); ++ metrics.query_aborts.with_label_values(&["loadshed"]).inc(); ++ return Ok(()); ++ } ++ // otherwise, report queuing time if it is slow ++ else if db_queue_time > Duration::from_secs(1) { ++ debug!( ++ "(slow) DB query queued for {:?} (cid: {}, sub: {:?})", ++ db_queue_time, client_id, sub.id ++ ); ++ } ++ // check before getting a DB connection if the client still wants the results ++ if abandon_query_rx.try_recv().is_ok() { ++ debug!( ++ "query cancelled by client (before execution) (cid: {}, sub: {:?})", ++ client_id, sub.id ++ ); ++ return Ok(()); ++ } ++ ++ let start = Instant::now(); ++ let mut row_count: usize = 0; ++ // cutoff for displaying slow queries ++ let slow_cutoff = Duration::from_millis(250); ++ let mut filter_count = 0; ++ // remove duplicates from the filter list. ++ if let Ok(mut conn) = self.read_pool.get() { ++ { ++ let pool_state = self.read_pool.state(); ++ metrics ++ .db_connections ++ .set((pool_state.connections - pool_state.idle_connections).into()); ++ } ++ for filter in sub.filters.iter() { ++ let filter_start = Instant::now(); ++ filter_count += 1; ++ let sql_gen_elapsed = filter_start.elapsed(); ++ let (q, p, idx) = query_from_filter(filter); ++ if sql_gen_elapsed > Duration::from_millis(10) { ++ debug!("SQL (slow) generated in {:?}", filter_start.elapsed()); ++ } ++ // any client that doesn't cause us to generate new rows in 2 ++ // seconds gets dropped. ++ let abort_cutoff = Duration::from_secs(2); ++ let mut slow_first_event; ++ let mut last_successful_send = Instant::now(); ++ // execute the query. ++ // make the actual SQL query (with parameters inserted) available ++ conn.trace(Some(|x| trace!("SQL trace: {:?}", x))); ++ let mut stmt = conn.prepare_cached(&q)?; ++ let mut event_rows = stmt.query(rusqlite::params_from_iter(p))?; ++ ++ let mut first_result = true; ++ while let Some(row) = event_rows.next()? { ++ let first_event_elapsed = filter_start.elapsed(); ++ slow_first_event = first_event_elapsed >= slow_cutoff; ++ if first_result { ++ debug!( ++ "first result in {:?} (cid: {}, sub: {:?}, filter: {}) [used index: {:?}]", ++ first_event_elapsed, client_id, sub.id, filter_count, idx ++ ); ++ // logging for slow queries; show filter and SQL. ++ // to reduce logging; only show 1/16th of clients (leading 0) ++ if slow_first_event && client_id.starts_with('0') { ++ debug!( ++ "filter first result in {:?} (slow): {} (cid: {}, sub: {:?})", ++ first_event_elapsed, ++ serde_json::to_string(&filter)?, ++ client_id, ++ sub.id ++ ); ++ } ++ first_result = false; ++ } ++ // check if a checkpoint is trying to run, and abort ++ if row_count % 100 == 0 { ++ { ++ if self.checkpoint_in_progress.try_lock().is_err() { ++ // lock was held, abort this query ++ debug!( ++ "query aborted due to checkpoint (cid: {}, sub: {:?})", ++ client_id, sub.id ++ ); ++ metrics ++ .query_aborts ++ .with_label_values(&["checkpoint"]) ++ .inc(); ++ return Ok(()); ++ } ++ } ++ } ++ ++ // check if this is still active; every 100 rows ++ if row_count % 100 == 0 && abandon_query_rx.try_recv().is_ok() { ++ debug!( ++ "query cancelled by client (cid: {}, sub: {:?})", ++ client_id, sub.id ++ ); ++ return Ok(()); ++ } ++ row_count += 1; ++ let event_json = row.get(0)?; ++ loop { ++ if query_tx.capacity() != 0 { ++ // we have capacity to add another item ++ break; ++ } ++ // the queue is full ++ trace!("db reader thread is stalled"); ++ if last_successful_send + abort_cutoff < Instant::now() { ++ // the queue has been full for too long, abort ++ info!("aborting database query due to slow client (cid: {}, sub: {:?})", ++ client_id, sub.id); ++ metrics ++ .query_aborts ++ .with_label_values(&["slowclient"]) ++ .inc(); ++ let ok: Result<()> = Ok(()); ++ return ok; ++ } ++ // check if a checkpoint is trying to run, and abort ++ if self.checkpoint_in_progress.try_lock().is_err() { ++ // lock was held, abort this query ++ debug!( ++ "query aborted due to checkpoint (cid: {}, sub: {:?})", ++ client_id, sub.id ++ ); ++ metrics ++ .query_aborts ++ .with_label_values(&["checkpoint"]) ++ .inc(); ++ return Ok(()); ++ } ++ // give the queue a chance to clear before trying again ++ debug!( ++ "query thread sleeping due to full query_tx (cid: {}, sub: {:?})", ++ client_id, sub.id ++ ); ++ thread::sleep(Duration::from_millis(500)); ++ } ++ // TODO: we could use try_send, but we'd have to juggle ++ // getting the query result back as part of the error ++ // result. ++ query_tx ++ .blocking_send(QueryResult { ++ sub_id: sub.get_id(), ++ event: event_json, ++ }) ++ .ok(); ++ last_successful_send = Instant::now(); ++ } ++ metrics ++ .query_db ++ .observe(filter_start.elapsed().as_secs_f64()); ++ // if the filter took too much db_time, print out the JSON. ++ if filter_start.elapsed() > slow_cutoff && client_id.starts_with('0') { ++ debug!( ++ "query filter req (slow): {} (cid: {}, sub: {:?}, filter: {})", ++ serde_json::to_string(&filter)?, ++ client_id, ++ sub.id, ++ filter_count ++ ); ++ } ++ } ++ } else { ++ warn!("Could not get a database connection for querying"); ++ } ++ drop(sem); // new query can begin ++ debug!( ++ "query completed in {:?} (cid: {}, sub: {:?}, db_time: {:?}, rows: {})", ++ pre_spawn_start.elapsed(), ++ client_id, ++ sub.id, ++ start.elapsed(), ++ row_count ++ ); ++ query_tx ++ .blocking_send(QueryResult { ++ sub_id: sub.get_id(), ++ event: "EOSE".to_string(), ++ }) ++ .ok(); ++ metrics ++ .query_sub ++ .observe(pre_spawn_start.elapsed().as_secs_f64()); ++ let ok: Result<()> = Ok(()); ++ ok ++ }); ++ Ok(()) ++ } ++ ++ /// Perform normal maintenance ++ async fn optimize_db(&self) -> Result<()> { ++ let conn = self.write_pool.get()?; ++ task::spawn_blocking(move || { ++ let start = Instant::now(); ++ conn.execute_batch("PRAGMA optimize;").ok(); ++ info!("optimize ran in {:?}", start.elapsed()); ++ }) ++ .await?; ++ Ok(()) ++ } ++ ++ /// Create a new verification record connected to a specific event ++ async fn create_verification_record(&self, event_id: &str, name: &str) -> Result<()> { ++ let e = hex::decode(event_id).ok(); ++ let n = name.to_owned(); ++ let mut conn = self.write_pool.get()?; ++ let _write_guard = self.write_in_progress.lock().await; ++ tokio::task::spawn_blocking(move || { ++ let tx = conn.transaction()?; ++ { ++ // if we create a /new/ one, we should get rid of any old ones. or group the new ones by name and only consider the latest. ++ let query = "INSERT INTO user_verification (metadata_event, name, verified_at) VALUES ((SELECT id from event WHERE event_hash=?), ?, strftime('%s','now'));"; ++ let mut stmt = tx.prepare(query)?; ++ stmt.execute(params![e, n])?; ++ // get the row ID ++ let v_id = tx.last_insert_rowid(); ++ // delete everything else by this name ++ let del_query = "DELETE FROM user_verification WHERE name = ? AND id != ?;"; ++ let mut del_stmt = tx.prepare(del_query)?; ++ let count = del_stmt.execute(params![n,v_id])?; ++ if count > 0 { ++ info!("removed {} old verification records for ({:?})", count, n); ++ } ++ } ++ tx.commit()?; ++ info!("saved new verification record for ({:?})", n); ++ let ok: Result<()> = Ok(()); ++ ok ++ }).await? ++ } ++ ++ /// Update verification timestamp ++ async fn update_verification_timestamp(&self, id: u64) -> Result<()> { ++ let mut conn = self.write_pool.get()?; ++ let _write_guard = self.write_in_progress.lock().await; ++ tokio::task::spawn_blocking(move || { ++ // add some jitter to the verification to prevent everything from stacking up together. ++ let verif_time = now_jitter(600); ++ let tx = conn.transaction()?; ++ { ++ // update verification time and reset any failure count ++ let query = ++ "UPDATE user_verification SET verified_at=?, failure_count=0 WHERE id=?"; ++ let mut stmt = tx.prepare(query)?; ++ stmt.execute(params![verif_time, id])?; ++ } ++ tx.commit()?; ++ let ok: Result<()> = Ok(()); ++ ok ++ }) ++ .await? ++ } ++ ++ /// Update verification record as failed ++ async fn fail_verification(&self, id: u64) -> Result<()> { ++ let mut conn = self.write_pool.get()?; ++ let _write_guard = self.write_in_progress.lock().await; ++ tokio::task::spawn_blocking(move || { ++ // add some jitter to the verification to prevent everything from stacking up together. ++ let fail_time = now_jitter(600); ++ let tx = conn.transaction()?; ++ { ++ let query = "UPDATE user_verification SET failed_at=?, failure_count=failure_count+1 WHERE id=?"; ++ let mut stmt = tx.prepare(query)?; ++ stmt.execute(params![fail_time, id])?; ++ } ++ tx.commit()?; ++ let ok: Result<()> = Ok(()); ++ ok ++ }) ++ .await? ++ } ++ ++ /// Delete verification record ++ async fn delete_verification(&self, id: u64) -> Result<()> { ++ let mut conn = self.write_pool.get()?; ++ let _write_guard = self.write_in_progress.lock().await; ++ tokio::task::spawn_blocking(move || { ++ let tx = conn.transaction()?; ++ { ++ let query = "DELETE FROM user_verification WHERE id=?;"; ++ let mut stmt = tx.prepare(query)?; ++ stmt.execute(params![id])?; ++ } ++ tx.commit()?; ++ let ok: Result<()> = Ok(()); ++ ok ++ }) ++ .await? ++ } ++ ++ /// Get the latest verification record for a given pubkey. ++ async fn get_latest_user_verification(&self, pub_key: &str) -> Result { ++ let mut conn = self.read_pool.get()?; ++ let pub_key = pub_key.to_owned(); ++ tokio::task::spawn_blocking(move || { ++ let tx = conn.transaction()?; ++ let query = "SELECT v.id, v.name, e.event_hash, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v LEFT JOIN event e ON e.id=v.metadata_event WHERE e.author=? ORDER BY e.created_at DESC, v.verified_at DESC, v.failed_at DESC LIMIT 1;"; ++ let mut stmt = tx.prepare_cached(query)?; ++ let fields = stmt.query_row(params![hex::decode(&pub_key).ok()], |r| { ++ let rowid: u64 = r.get(0)?; ++ let rowname: String = r.get(1)?; ++ let eventid: Vec = r.get(2)?; ++ let created_at: u64 = r.get(3)?; ++ // create a tuple since we can't throw non-rusqlite errors in this closure ++ Ok(( ++ rowid, ++ rowname, ++ eventid, ++ created_at, ++ r.get(4).ok(), ++ r.get(5).ok(), ++ r.get(6)?, ++ )) ++ })?; ++ Ok(VerificationRecord { ++ rowid: fields.0, ++ name: Nip05Name::try_from(&fields.1[..])?, ++ address: pub_key, ++ event: hex::encode(fields.2), ++ event_created: fields.3, ++ last_success: fields.4, ++ last_failure: fields.5, ++ failure_count: fields.6, ++ }) ++ }).await? ++ } ++ ++ /// Get oldest verification before timestamp ++ async fn get_oldest_user_verification(&self, before: u64) -> Result { ++ let mut conn = self.read_pool.get()?; ++ tokio::task::spawn_blocking(move || { ++ let tx = conn.transaction()?; ++ let query = "SELECT v.id, v.name, e.event_hash, e.author, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v INNER JOIN event e ON e.id=v.metadata_event WHERE (v.verified_at < ? OR v.verified_at IS NULL) AND (v.failed_at < ? OR v.failed_at IS NULL) ORDER BY v.verified_at ASC, v.failed_at ASC LIMIT 1;"; ++ let mut stmt = tx.prepare_cached(query)?; ++ let fields = stmt.query_row(params![before, before], |r| { ++ let rowid: u64 = r.get(0)?; ++ let rowname: String = r.get(1)?; ++ let eventid: Vec = r.get(2)?; ++ let pubkey: Vec = r.get(3)?; ++ let created_at: u64 = r.get(4)?; ++ // create a tuple since we can't throw non-rusqlite errors in this closure ++ Ok(( ++ rowid, ++ rowname, ++ eventid, ++ pubkey, ++ created_at, ++ r.get(5).ok(), ++ r.get(6).ok(), ++ r.get(7)?, ++ )) ++ })?; ++ let vr = VerificationRecord { ++ rowid: fields.0, ++ name: Nip05Name::try_from(&fields.1[..])?, ++ address: hex::encode(fields.3), ++ event: hex::encode(fields.2), ++ event_created: fields.4, ++ last_success: fields.5, ++ last_failure: fields.6, ++ failure_count: fields.7, ++ }; ++ Ok(vr) ++ }).await? ++ } ++ ++ /// Create account ++ async fn create_account(&self, pub_key: &Keys) -> Result { ++ let pub_key = pub_key.public_key().to_string(); ++ ++ let mut conn = self.write_pool.get()?; ++ let ins_count = tokio::task::spawn_blocking(move || { ++ let tx = conn.transaction()?; ++ let ins_count: u64; ++ { ++ // Ignore if user is already in db ++ let query = "INSERT OR IGNORE INTO account (pubkey, is_admitted, balance) VALUES (?1, ?2, ?3);"; ++ let mut stmt = tx.prepare(query)?; ++ ins_count = stmt.execute(params![&pub_key, false, 0])? as u64; ++ } ++ tx.commit()?; ++ let ok: Result = Ok(ins_count); ++ ok ++ }).await??; ++ ++ if ins_count != 1 { ++ return Ok(false); ++ } ++ ++ Ok(true) ++ } ++ ++ /// Admit account ++ async fn admit_account(&self, pub_key: &Keys, admission_cost: u64) -> Result<()> { ++ let pub_key = pub_key.public_key().to_string(); ++ let mut conn = self.write_pool.get()?; ++ let pub_key = pub_key.to_owned(); ++ tokio::task::spawn_blocking(move || { ++ let tx = conn.transaction()?; ++ { ++ let query = "UPDATE account SET is_admitted = TRUE, tos_accepted_at = strftime('%s','now'), balance = balance - ?1 WHERE pubkey=?2;"; ++ let mut stmt = tx.prepare(query)?; ++ stmt.execute(params![admission_cost, pub_key])?; ++ } ++ tx.commit()?; ++ let ok: Result<()> = Ok(()); ++ ok ++ }) ++ .await? ++ } ++ ++ /// Gets if the account is admitted and balance ++ async fn get_account_balance(&self, pub_key: &Keys) -> Result<(bool, u64)> { ++ let pub_key = pub_key.public_key().to_string(); ++ let mut conn = self.write_pool.get()?; ++ let pub_key = pub_key.to_owned(); ++ tokio::task::spawn_blocking(move || { ++ let tx = conn.transaction()?; ++ let query = "SELECT is_admitted, balance FROM account WHERE pubkey = ?1;"; ++ let mut stmt = tx.prepare_cached(query)?; ++ let fields = stmt.query_row(params![pub_key], |r| { ++ let is_admitted: bool = r.get(0)?; ++ let balance: u64 = r.get(1)?; ++ // create a tuple since we can't throw non-rusqlite errors in this closure ++ Ok((is_admitted, balance)) ++ })?; ++ Ok(fields) ++ }) ++ .await? ++ } ++ ++ /// Update account balance ++ async fn update_account_balance( ++ &self, ++ pub_key: &Keys, ++ positive: bool, ++ new_balance: u64, ++ ) -> Result<()> { ++ let pub_key = pub_key.public_key().to_string(); ++ ++ let mut conn = self.write_pool.get()?; ++ tokio::task::spawn_blocking(move || { ++ let tx = conn.transaction()?; ++ { ++ let query = if positive { ++ "UPDATE account SET balance=balance + ?1 WHERE pubkey=?2" ++ } else { ++ "UPDATE account SET balance=balance - ?1 WHERE pubkey=?2" ++ }; ++ let mut stmt = tx.prepare(query)?; ++ stmt.execute(params![new_balance, pub_key])?; ++ } ++ tx.commit()?; ++ let ok: Result<()> = Ok(()); ++ ok ++ }) ++ .await? ++ } ++ ++ /// Create invoice record ++ async fn create_invoice_record(&self, pub_key: &Keys, invoice_info: InvoiceInfo) -> Result<()> { ++ let pub_key = pub_key.public_key().to_string(); ++ let pub_key = pub_key.to_owned(); ++ let mut conn = self.write_pool.get()?; ++ tokio::task::spawn_blocking(move || { ++ let tx = conn.transaction()?; ++ { ++ let query = "INSERT INTO invoice (pubkey, payment_hash, amount, status, description, created_at, invoice) VALUES (?1, ?2, ?3, ?4, ?5, strftime('%s','now'), ?6);"; ++ let mut stmt = tx.prepare(query)?; ++ stmt.execute(params![&pub_key, invoice_info.payment_hash, invoice_info.amount, invoice_info.status.to_string(), invoice_info.memo, invoice_info.bolt11])?; ++ } ++ tx.commit()?; ++ let ok: Result<()> = Ok(()); ++ ok ++ }).await??; ++ ++ Ok(()) ++ } ++ ++ /// Update invoice record ++ async fn update_invoice(&self, payment_hash: &str, status: InvoiceStatus) -> Result { ++ let mut conn = self.write_pool.get()?; ++ let payment_hash = payment_hash.to_owned(); ++ ++ tokio::task::spawn_blocking(move || { ++ let tx = conn.transaction()?; ++ let pubkey: String; ++ { ++ ++ // Get required invoice info for given payment hash ++ let query = "SELECT pubkey, status, amount FROM invoice WHERE payment_hash=?1;"; ++ let mut stmt = tx.prepare(query)?; ++ let (pub_key, prev_status, amount) = stmt.query_row(params![payment_hash], |r| { ++ let pub_key: String = r.get(0)?; ++ let status: String = r.get(1)?; ++ let amount: u64 = r.get(2)?; ++ ++ ++ Ok((pub_key, status, amount)) ++ ++ })?; ++ ++ // If the invoice is paid update the confirmed_at timestamp ++ let query = if status.eq(&InvoiceStatus::Paid) { ++ "UPDATE invoice SET status=?1, confirmed_at = strftime('%s', 'now') WHERE payment_hash=?2;" ++ } else { ++ "UPDATE invoice SET status=?1 WHERE payment_hash=?2;" ++ }; ++ let mut stmt = tx.prepare(query)?; ++ stmt.execute(params![status.to_string(), payment_hash])?; ++ ++ // Increase account balance by given invoice amount ++ if prev_status == "Unpaid" && status.eq(&InvoiceStatus::Paid) { ++ let query = ++ "UPDATE account SET balance = balance + ?1 WHERE pubkey = ?2;"; ++ let mut stmt = tx.prepare(query)?; ++ stmt.execute(params![amount, pub_key])?; ++ } ++ ++ pubkey = pub_key; ++ } ++ ++ tx.commit()?; ++ let ok: Result = Ok(pubkey); ++ ok ++ }) ++ .await? ++ } ++ ++ /// Get the most recent invoice for a given pubkey ++ /// invoice must be unpaid and not expired ++ async fn get_unpaid_invoice(&self, pubkey: &Keys) -> Result> { ++ let mut conn = self.write_pool.get()?; ++ ++ let pubkey = pubkey.to_owned(); ++ let pubkey_str = pubkey.clone().public_key().to_string(); ++ let (payment_hash, invoice, amount, description) = tokio::task::spawn_blocking(move || { ++ let tx = conn.transaction()?; ++ ++ let query = r#" ++SELECT amount, payment_hash, description, invoice ++FROM invoice ++WHERE pubkey = ?1 AND status = 'Unpaid' ++ORDER BY created_at DESC ++LIMIT 1; ++ "#; ++ let mut stmt = tx.prepare(query).unwrap(); ++ stmt.query_row(params![&pubkey_str], |r| { ++ let amount: u64 = r.get(0)?; ++ let payment_hash: String = r.get(1)?; ++ let description: String = r.get(2)?; ++ let invoice: String = r.get(3)?; ++ ++ Ok((payment_hash, invoice, amount, description)) ++ }) ++ }) ++ .await??; ++ ++ Ok(Some(InvoiceInfo { ++ pubkey: pubkey.public_key().to_string(), ++ payment_hash, ++ bolt11: invoice, ++ amount, ++ status: InvoiceStatus::Unpaid, ++ memo: description, ++ confirmed_at: None, ++ })) ++ } ++} ++ ++/// Decide if there is an index that should be used explicitly ++fn override_index(f: &ReqFilter) -> Option { ++ if f.ids.is_some() { ++ return Some("event_hash_index".into()); ++ } ++ // queries for multiple kinds default to kind_index, which is ++ // significantly slower than kind_created_at_index. ++ if let Some(ks) = &f.kinds { ++ if f.ids.is_none() ++ && ks.len() > 1 ++ && f.since.is_none() ++ && f.until.is_none() ++ && f.tags.is_none() ++ && f.authors.is_none() ++ { ++ return Some("kind_created_at_index".into()); ++ } ++ } ++ // if there is an author, it is much better to force the authors index. ++ if f.authors.is_some() { ++ if f.since.is_none() && f.until.is_none() && f.limit.is_none() { ++ if f.kinds.is_none() { ++ // with no use of kinds/created_at, just author ++ return Some("author_index".into()); ++ } ++ // prefer author_kind if there are kinds ++ return Some("author_kind_index".into()); ++ } ++ // finally, prefer author_created_at if time is provided ++ return Some("author_created_at_index".into()); ++ } ++ None ++} ++ ++/// Create a dynamic SQL subquery and params from a subscription filter (and optional explicit index used) ++fn query_from_filter(f: &ReqFilter) -> (String, Vec>, Option) { ++ // build a dynamic SQL query. all user-input is either an integer ++ // (sqli-safe), or a string that is filtered to only contain ++ // hexadecimal characters. Strings that require escaping (tag ++ // names/values) use parameters. ++ ++ // if the filter is malformed, don't return anything. ++ if f.force_no_match { ++ let empty_query = "SELECT e.content FROM event e WHERE 1=0".to_owned(); ++ // query parameters for SQLite ++ let empty_params: Vec> = vec![]; ++ return (empty_query, empty_params, None); ++ } ++ ++ // check if the index needs to be overridden ++ let idx_name = override_index(f); ++ let idx_stmt = idx_name ++ .as_ref() ++ .map_or_else(|| "".to_owned(), |i| format!("INDEXED BY {i}")); ++ let mut query = format!("SELECT e.content FROM event e {idx_stmt}"); ++ // query parameters for SQLite ++ let mut params: Vec> = vec![]; ++ ++ // individual filter components (single conditions such as an author or event ID) ++ let mut filter_components: Vec = Vec::new(); ++ // Query for "authors", allowing prefix matches ++ if let Some(authvec) = &f.authors { ++ // take each author and convert to a hexsearch ++ let mut auth_searches: Vec = vec![]; ++ for auth in authvec { ++ match hex_range(auth) { ++ Some(HexSearch::Exact(ex)) => { ++ auth_searches.push("author=?".to_owned()); ++ params.push(Box::new(ex)); ++ } ++ Some(HexSearch::Range(lower, upper)) => { ++ auth_searches.push("(author>? AND author { ++ auth_searches.push("author>?".to_owned()); ++ params.push(Box::new(lower)); ++ } ++ None => { ++ trace!("Could not parse hex range from author {:?}", auth); ++ } ++ } ++ } ++ if !authvec.is_empty() { ++ let auth_clause = format!("({})", auth_searches.join(" OR ")); ++ filter_components.push(auth_clause); ++ } else { ++ filter_components.push("false".to_owned()); ++ } ++ } ++ // Query for Kind ++ if let Some(ks) = &f.kinds { ++ // kind is number, no escaping needed ++ let str_kinds: Vec = ks.iter().map(std::string::ToString::to_string).collect(); ++ let kind_clause = format!("kind IN ({})", str_kinds.join(", ")); ++ filter_components.push(kind_clause); ++ } ++ // Query for event, allowing prefix matches ++ if let Some(idvec) = &f.ids { ++ // take each author and convert to a hexsearch ++ let mut id_searches: Vec = vec![]; ++ for id in idvec { ++ match hex_range(id) { ++ Some(HexSearch::Exact(ex)) => { ++ id_searches.push("event_hash=?".to_owned()); ++ params.push(Box::new(ex)); ++ } ++ Some(HexSearch::Range(lower, upper)) => { ++ id_searches.push("(event_hash>? AND event_hash { ++ id_searches.push("event_hash>?".to_owned()); ++ params.push(Box::new(lower)); ++ } ++ None => { ++ info!("Could not parse hex range from id {:?}", id); ++ } ++ } ++ } ++ if idvec.is_empty() { ++ // if the ids list was empty, we should never return ++ // any results. ++ filter_components.push("false".to_owned()); ++ } else { ++ let id_clause = format!("({})", id_searches.join(" OR ")); ++ filter_components.push(id_clause); ++ } ++ } ++ // Query for tags ++ if let Some(map) = &f.tags { ++ for (key, val) in map.iter() { ++ let mut str_vals: Vec> = vec![]; ++ for v in val { ++ str_vals.push(Box::new(v.clone())); ++ } ++ // create clauses with "?" params for each tag value being searched ++ let str_clause = format!("AND value IN ({})", repeat_vars(str_vals.len())); ++ // find evidence of the target tag name/value existing for this event. ++ // Query for Kind/Since/Until additionally, to reduce the number of tags that come back. ++ let kind_clause; ++ if let Some(ks) = &f.kinds { ++ // kind is number, no escaping needed ++ let str_kinds: Vec = ++ ks.iter().map(std::string::ToString::to_string).collect(); ++ kind_clause = format!("AND kind IN ({})", str_kinds.join(", ")); ++ } else { ++ kind_clause = String::new(); ++ }; ++ let since_clause = if f.since.is_some() { ++ format!("AND created_at >= {}", f.since.unwrap()) ++ } else { ++ String::new() ++ }; ++ // Query for timestamp ++ let until_clause = if f.until.is_some() { ++ format!("AND created_at <= {}", f.until.unwrap()) ++ } else { ++ String::new() ++ }; ++ ++ let tag_clause = format!( ++ "e.id IN (SELECT t.event_id FROM tag t WHERE (name=? {str_clause} {kind_clause} {since_clause} {until_clause}))" ++ ); ++ ++ // add the tag name as the first parameter ++ params.push(Box::new(key.to_string())); ++ // add all tag values that are blobs as params ++ params.append(&mut str_vals); ++ filter_components.push(tag_clause); ++ } ++ } ++ // Query for timestamp ++ if f.since.is_some() { ++ let created_clause = format!("created_at >= {}", f.since.unwrap()); ++ filter_components.push(created_clause); ++ } ++ // Query for timestamp ++ if f.until.is_some() { ++ let until_clause = format!("created_at <= {}", f.until.unwrap()); ++ filter_components.push(until_clause); ++ } ++ // never display hidden events ++ query.push_str(" WHERE hidden!=TRUE"); ++ // never display hidden events ++ filter_components.push("(expires_at IS NULL OR expires_at > ?)".to_string()); ++ params.push(Box::new(unix_time())); ++ // build filter component conditions ++ if !filter_components.is_empty() { ++ query.push_str(" AND "); ++ query.push_str(&filter_components.join(" AND ")); ++ } ++ // Apply per-filter limit to this subquery. ++ // The use of a LIMIT implies a DESC order, to capture only the most recent events. ++ if let Some(lim) = f.limit { ++ let _ = write!(query, " ORDER BY e.created_at DESC LIMIT {lim}"); ++ } else { ++ query.push_str(" ORDER BY e.created_at ASC"); ++ } ++ (query, params, idx_name) ++} ++ ++/// Create a dynamic SQL query string and params from a subscription. ++fn _query_from_sub(sub: &Subscription) -> (String, Vec>, Vec) { ++ // build a dynamic SQL query for an entire subscription, based on ++ // SQL subqueries for filters. ++ let mut subqueries: Vec = Vec::new(); ++ let mut indexes = vec![]; ++ // subquery params ++ let mut params: Vec> = vec![]; ++ // for every filter in the subscription, generate a subquery ++ for f in &sub.filters { ++ let (f_subquery, mut f_params, index) = query_from_filter(f); ++ if let Some(i) = index { ++ indexes.push(i); ++ } ++ subqueries.push(f_subquery); ++ params.append(&mut f_params); ++ } ++ // encapsulate subqueries into select statements ++ let subqueries_selects: Vec = subqueries ++ .iter() ++ .map(|s| format!("SELECT distinct content, created_at FROM ({s})")) ++ .collect(); ++ let query: String = subqueries_selects.join(" UNION "); ++ (query, params, indexes) ++} ++ ++/// Build a database connection pool. ++/// # Panics ++/// ++/// Will panic if the pool could not be created. ++#[must_use] ++pub fn build_pool( ++ name: &str, ++ settings: &Settings, ++ flags: OpenFlags, ++ min_size: u32, ++ max_size: u32, ++ wait_for_db: bool, ++) -> SqlitePool { ++ let db_dir = &settings.database.data_directory; ++ let full_path = Path::new(db_dir).join(DB_FILE); ++ ++ // small hack; if the database doesn't exist yet, that means the ++ // writer thread hasn't finished. Give it a chance to work. This ++ // is only an issue with the first time we run. ++ if !settings.database.in_memory { ++ while !full_path.exists() && wait_for_db { ++ debug!("Database reader pool is waiting on the database to be created..."); ++ thread::sleep(Duration::from_millis(500)); ++ } ++ } ++ let manager = if settings.database.in_memory { ++ SqliteConnectionManager::file("file::memory:?cache=shared") ++ .with_flags(flags) ++ .with_init(|c| c.execute_batch(STARTUP_SQL)) ++ } else { ++ SqliteConnectionManager::file(&full_path) ++ .with_flags(flags) ++ .with_init(|c| c.execute_batch(STARTUP_SQL)) ++ }; ++ let pool: SqlitePool = r2d2::Pool::builder() ++ .test_on_check_out(true) // no noticeable performance hit ++ .min_idle(Some(min_size)) ++ .max_size(max_size) ++ .idle_timeout(Some(Duration::from_secs(10))) ++ .max_lifetime(Some(Duration::from_secs(30))) ++ .build(manager) ++ .unwrap(); ++ // retrieve a connection to ensure the startup statements run immediately ++ { ++ let _ = pool.get(); ++ } ++ ++ info!( ++ "Built a connection pool {:?} (min={}, max={})", ++ name, min_size, max_size ++ ); ++ pool ++} ++ ++/// Cleanup expired events on a regular basis ++async fn cleanup_expired( ++ pool: SqlitePool, ++ frequency: Duration, ++ write_in_progress: Arc>, ++) -> Result<()> { ++ tokio::task::spawn(async move { ++ loop { ++ tokio::select! { ++ _ = tokio::time::sleep(frequency) => { ++ if let Ok(mut conn) = pool.get() { ++ let mut _guard:Option> = None; ++ // take a write lock to prevent event writes ++ // from proceeding while we are deleting ++ // events. This isn't necessary, but ++ // minimizes the chances of forcing event ++ // persistence to be retried. ++ _guard = Some(write_in_progress.lock().await); ++ let start = Instant::now(); ++ let exp_res = tokio::task::spawn_blocking(move || { ++ delete_expired(&mut conn) ++ }).await; ++ match exp_res { ++ Ok(Ok(count)) => { ++ if count > 0 { ++ info!("removed {} expired events in: {:?}", count, start.elapsed()); ++ } ++ }, ++ _ => { ++ // either the task or underlying query failed ++ info!("there was an error cleaning up expired events: {:?}", exp_res); ++ } ++ } ++ } ++ } ++ }; ++ } ++ }); ++ Ok(()) ++} ++ ++/// Execute a query to delete all expired events ++pub fn delete_expired(conn: &mut PooledConnection) -> Result { ++ let tx = conn.transaction()?; ++ let update_count = tx.execute( ++ "DELETE FROM event WHERE expires_at <= ?", ++ params![unix_time()], ++ )?; ++ tx.commit()?; ++ Ok(update_count) ++} ++ ++/// Perform database WAL checkpoint on a regular basis ++pub async fn db_checkpoint_task( ++ pool: SqlitePool, ++ frequency: Duration, ++ write_in_progress: Arc>, ++ checkpoint_in_progress: Arc>, ++) -> Result<()> { ++ // TODO; use acquire_many on the reader semaphore to stop them from interrupting this. ++ tokio::task::spawn(async move { ++ // WAL size in pages. ++ let mut current_wal_size = 0; ++ // WAL threshold for more aggressive checkpointing (10,000 pages, or about 40MB) ++ let wal_threshold = 1000 * 10; ++ // default threshold for the busy timer ++ let busy_wait_default = Duration::from_secs(1); ++ // if the WAL file is getting too big, switch to this ++ let busy_wait_default_long = Duration::from_secs(10); ++ loop { ++ tokio::select! { ++ _ = tokio::time::sleep(frequency) => { ++ if let Ok(mut conn) = pool.get() { ++ // block all other writers ++ let _write_guard = write_in_progress.lock().await; ++ let mut _guard:Option> = None; ++ // the busy timer will block writers, so don't set ++ // this any higher than you want max latency for event ++ // writes. ++ if current_wal_size <= wal_threshold { ++ conn.busy_timeout(busy_wait_default).ok(); ++ } else { ++ // if the wal size has exceeded a threshold, increase the busy timeout. ++ conn.busy_timeout(busy_wait_default_long).ok(); ++ // take a lock that will prevent new readers. ++ info!("blocking new readers to perform wal_checkpoint"); ++ _guard = Some(checkpoint_in_progress.lock().await); ++ } ++ debug!("running wal_checkpoint(TRUNCATE)"); ++ if let Ok(new_size) = checkpoint_db(&mut conn) { ++ current_wal_size = new_size; ++ } ++ } ++ } ++ }; ++ } ++ }); ++ ++ Ok(()) ++} ++ ++#[derive(Debug)] ++enum SqliteStatus { ++ Ok, ++ Busy, ++ Error, ++ Other(u64), ++} ++ ++/// Checkpoint/Truncate WAL. Returns the number of WAL pages remaining. ++pub fn checkpoint_db(conn: &mut PooledConnection) -> Result { ++ let query = "PRAGMA wal_checkpoint(TRUNCATE);"; ++ let start = Instant::now(); ++ let (cp_result, wal_size, _frames_checkpointed) = conn.query_row(query, [], |row| { ++ let checkpoint_result: u64 = row.get(0)?; ++ let wal_size: u64 = row.get(1)?; ++ let frames_checkpointed: u64 = row.get(2)?; ++ Ok((checkpoint_result, wal_size, frames_checkpointed)) ++ })?; ++ let result = match cp_result { ++ 0 => SqliteStatus::Ok, ++ 1 => SqliteStatus::Busy, ++ 2 => SqliteStatus::Error, ++ x => SqliteStatus::Other(x), ++ }; ++ info!( ++ "checkpoint ran in {:?} (result: {:?}, WAL size: {})", ++ start.elapsed(), ++ result, ++ wal_size ++ ); ++ Ok(wal_size as usize) ++} ++ ++/// Produce a arbitrary list of '?' parameters. ++fn repeat_vars(count: usize) -> String { ++ if count == 0 { ++ return "".to_owned(); ++ } ++ let mut s = "?,".repeat(count); ++ // Remove trailing comma ++ s.pop(); ++ s ++} ++ ++/// Display database pool stats every 1 minute ++pub async fn monitor_pool(name: &str, pool: SqlitePool) { ++ let sleep_dur = Duration::from_secs(60); ++ loop { ++ log_pool_stats(name, &pool); ++ tokio::time::sleep(sleep_dur).await; ++ } ++} ++ ++/// Log pool stats ++fn log_pool_stats(name: &str, pool: &SqlitePool) { ++ let state: r2d2::State = pool.state(); ++ let in_use_cxns = state.connections - state.idle_connections; ++ debug!( ++ "DB pool {:?} usage (in_use: {}, available: {}, max: {})", ++ name, ++ in_use_cxns, ++ state.connections, ++ pool.max_size() ++ ); ++} ++ ++/// Check if the pool is fully utilized ++fn _pool_at_capacity(pool: &SqlitePool) -> bool { ++ let state: r2d2::State = pool.state(); ++ state.idle_connections == 0 ++} +diff --git a/src/repo/sqlite_migration.rs b/src/repo/sqlite_migration.rs +new file mode 100644 +index 0000000..f2ccab0 +--- /dev/null ++++ b/src/repo/sqlite_migration.rs +@@ -0,0 +1,841 @@ ++//! Database schema and migrations ++use crate::db::PooledConnection; ++use crate::error::Result; ++use crate::event::{single_char_tagname, Event}; ++use crate::utils::is_lower_hex; ++use const_format::formatcp; ++use indicatif::{ProgressBar, ProgressStyle}; ++use rusqlite::limits::Limit; ++use rusqlite::params; ++use rusqlite::Connection; ++use std::cmp::Ordering; ++use std::time::Instant; ++use tracing::{debug, error, info}; ++ ++/// Startup DB Pragmas ++pub const STARTUP_SQL: &str = r##" ++PRAGMA main.synchronous = NORMAL; ++PRAGMA foreign_keys = ON; ++PRAGMA journal_size_limit = 32768; ++PRAGMA temp_store = 2; -- use memory, not temp files ++PRAGMA main.cache_size = 20000; -- 80MB max cache size per conn ++pragma mmap_size = 0; -- disable mmap (default) ++"##; ++ ++/// Latest database version ++pub const DB_VERSION: usize = 18; ++ ++/// Schema definition ++const INIT_SQL: &str = formatcp!( ++ r##" ++-- Database settings ++PRAGMA encoding = "UTF-8"; ++PRAGMA journal_mode = WAL; ++PRAGMA auto_vacuum = FULL; ++PRAGMA main.synchronous=NORMAL; ++PRAGMA foreign_keys = ON; ++PRAGMA application_id = 1654008667; ++PRAGMA user_version = {}; ++ ++-- Event Table ++CREATE TABLE IF NOT EXISTS event ( ++id INTEGER PRIMARY KEY, ++event_hash BLOB NOT NULL, -- 4-byte hash ++first_seen INTEGER NOT NULL, -- when the event was first seen (not authored!) (seconds since 1970) ++created_at INTEGER NOT NULL, -- when the event was authored ++expires_at INTEGER, -- when the event expires and may be deleted ++author BLOB NOT NULL, -- author pubkey ++delegated_by BLOB, -- delegator pubkey (NIP-26) ++kind INTEGER NOT NULL, -- event kind ++hidden INTEGER, -- relevant for queries ++content TEXT NOT NULL -- serialized json of event object ++); ++ ++-- Event Indexes ++CREATE UNIQUE INDEX IF NOT EXISTS event_hash_index ON event(event_hash); ++CREATE INDEX IF NOT EXISTS author_index ON event(author); ++CREATE INDEX IF NOT EXISTS kind_index ON event(kind); ++CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at); ++CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by); ++CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at); ++CREATE INDEX IF NOT EXISTS kind_author_index ON event(kind,author); ++CREATE INDEX IF NOT EXISTS kind_created_at_index ON event(kind,created_at); ++CREATE INDEX IF NOT EXISTS author_created_at_index ON event(author,created_at); ++CREATE INDEX IF NOT EXISTS author_kind_index ON event(author,kind); ++CREATE INDEX IF NOT EXISTS event_expiration ON event(expires_at); ++ ++-- Tag Table ++-- Tag values are stored as either a BLOB (if they come in as a ++-- hex-string), or TEXT otherwise. ++-- This means that searches need to select the appropriate column. ++-- We duplicate the kind/created_at to make indexes much more efficient. ++CREATE TABLE IF NOT EXISTS tag ( ++id INTEGER PRIMARY KEY, ++event_id INTEGER NOT NULL, -- an event ID that contains a tag. ++name TEXT, -- the tag name ("p", "e", whatever) ++value TEXT, -- the tag value, if not hex. ++value_hex BLOB, -- the tag value, if it can be interpreted as a lowercase hex string. ++created_at INTEGER NOT NULL, -- when the event was authored ++kind INTEGER NOT NULL, -- event kind ++FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE ++); ++CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value); ++CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value); ++CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value); ++CREATE INDEX IF NOT EXISTS tag_covering_index ON tag(name,kind,value,created_at,event_id); ++ ++-- NIP-05 User Validation ++CREATE TABLE IF NOT EXISTS user_verification ( ++id INTEGER PRIMARY KEY, ++metadata_event INTEGER NOT NULL, -- the metadata event used for this validation. ++name TEXT NOT NULL, -- the nip05 field value (user@domain). ++verified_at INTEGER, -- timestamp this author/nip05 was most recently verified. ++failed_at INTEGER, -- timestamp a verification attempt failed (host down). ++failure_count INTEGER DEFAULT 0, -- number of consecutive failures. ++FOREIGN KEY(metadata_event) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE ++); ++CREATE INDEX IF NOT EXISTS user_verification_name_index ON user_verification(name); ++CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event); ++ ++-- Create account table ++CREATE TABLE IF NOT EXISTS account ( ++pubkey TEXT PRIMARY KEY, ++is_admitted INTEGER NOT NULL DEFAULT 0, ++balance INTEGER NOT NULL DEFAULT 0, ++tos_accepted_at INTEGER ++); ++ ++-- Create account index ++CREATE INDEX IF NOT EXISTS user_pubkey_index ON account(pubkey); ++ ++-- Invoice table ++CREATE TABLE IF NOT EXISTS invoice ( ++payment_hash TEXT PRIMARY KEY, ++pubkey TEXT NOT NULL, ++invoice TEXT NOT NULL, ++amount INTEGER NOT NULL, ++status TEXT CHECK ( status IN ('Paid', 'Unpaid', 'Expired' ) ) NOT NUll DEFAULT 'Unpaid', ++description TEXT, ++created_at INTEGER NOT NULL, ++confirmed_at INTEGER, ++CONSTRAINT invoice_pubkey_fkey FOREIGN KEY (pubkey) REFERENCES account (pubkey) ON DELETE CASCADE ++); ++ ++-- Create invoice index ++CREATE INDEX IF NOT EXISTS invoice_pubkey_index ON invoice(pubkey); ++ ++ ++"##, ++ DB_VERSION ++); ++ ++/// Determine the current application database schema version. ++pub fn curr_db_version(conn: &mut Connection) -> Result { ++ let query = "PRAGMA user_version;"; ++ let curr_version = conn.query_row(query, [], |row| row.get(0))?; ++ Ok(curr_version) ++} ++ ++/// Determine event count ++pub fn db_event_count(conn: &mut Connection) -> Result { ++ let query = "SELECT count(*) FROM event;"; ++ let count = conn.query_row(query, [], |row| row.get(0))?; ++ Ok(count) ++} ++ ++/// Determine tag count ++pub fn db_tag_count(conn: &mut Connection) -> Result { ++ let query = "SELECT count(*) FROM tag;"; ++ let count = conn.query_row(query, [], |row| row.get(0))?; ++ Ok(count) ++} ++ ++fn mig_init(conn: &mut PooledConnection) -> usize { ++ match conn.execute_batch(INIT_SQL) { ++ Ok(()) => { ++ info!( ++ "database pragma/schema initialized to v{}, and ready", ++ DB_VERSION ++ ); ++ } ++ Err(err) => { ++ error!("update failed: {}", err); ++ panic!("database could not be initialized"); ++ } ++ } ++ DB_VERSION ++} ++ ++/// Upgrade DB to latest version, and execute pragma settings ++pub fn upgrade_db(conn: &mut PooledConnection) -> Result { ++ // check the version. ++ let mut curr_version = curr_db_version(conn)?; ++ info!("DB version = {:?}", curr_version); ++ ++ debug!( ++ "SQLite max query parameters: {}", ++ conn.limit(Limit::SQLITE_LIMIT_VARIABLE_NUMBER) ++ ); ++ debug!( ++ "SQLite max table/blob/text length: {} MB", ++ (f64::from(conn.limit(Limit::SQLITE_LIMIT_LENGTH)) / f64::from(1024 * 1024)).floor() ++ ); ++ debug!( ++ "SQLite max SQL length: {} MB", ++ (f64::from(conn.limit(Limit::SQLITE_LIMIT_SQL_LENGTH)) / f64::from(1024 * 1024)).floor() ++ ); ++ ++ match curr_version.cmp(&DB_VERSION) { ++ // Database is new or not current ++ Ordering::Less => { ++ // initialize from scratch ++ if curr_version == 0 { ++ curr_version = mig_init(conn); ++ } ++ // for initialized but out-of-date schemas, proceed to ++ // upgrade sequentially until we are current. ++ if curr_version == 1 { ++ curr_version = mig_1_to_2(conn)?; ++ } ++ if curr_version == 2 { ++ curr_version = mig_2_to_3(conn)?; ++ } ++ if curr_version == 3 { ++ curr_version = mig_3_to_4(conn)?; ++ } ++ if curr_version == 4 { ++ curr_version = mig_4_to_5(conn)?; ++ } ++ if curr_version == 5 { ++ curr_version = mig_5_to_6(conn)?; ++ } ++ if curr_version == 6 { ++ curr_version = mig_6_to_7(conn)?; ++ } ++ if curr_version == 7 { ++ curr_version = mig_7_to_8(conn)?; ++ } ++ if curr_version == 8 { ++ curr_version = mig_8_to_9(conn)?; ++ } ++ if curr_version == 9 { ++ curr_version = mig_9_to_10(conn)?; ++ } ++ if curr_version == 10 { ++ curr_version = mig_10_to_11(conn)?; ++ } ++ if curr_version == 11 { ++ curr_version = mig_11_to_12(conn)?; ++ } ++ if curr_version == 12 { ++ curr_version = mig_12_to_13(conn)?; ++ } ++ if curr_version == 13 { ++ curr_version = mig_13_to_14(conn)?; ++ } ++ if curr_version == 14 { ++ curr_version = mig_14_to_15(conn)?; ++ } ++ if curr_version == 15 { ++ curr_version = mig_15_to_16(conn)?; ++ } ++ if curr_version == 16 { ++ curr_version = mig_16_to_17(conn)?; ++ } ++ if curr_version == 17 { ++ curr_version = mig_17_to_18(conn)?; ++ } ++ ++ if curr_version == DB_VERSION { ++ info!( ++ "All migration scripts completed successfully. Welcome to v{}.", ++ DB_VERSION ++ ); ++ } ++ } ++ // Database is current, all is good ++ Ordering::Equal => { ++ debug!("Database version was already current (v{DB_VERSION})"); ++ } ++ // Database is newer than what this code understands, abort ++ Ordering::Greater => { ++ panic!( ++ "Database version is newer than supported by this executable (v{curr_version} > v{DB_VERSION})", ++ ); ++ } ++ } ++ ++ // Setup PRAGMA ++ conn.execute_batch(STARTUP_SQL)?; ++ debug!("SQLite PRAGMA startup completed"); ++ Ok(DB_VERSION) ++} ++ ++pub fn rebuild_tags(conn: &mut PooledConnection) -> Result<()> { ++ // Check how many events we have to process ++ let count = db_event_count(conn)?; ++ let update_each_percent = 0.05; ++ let mut percent_done = 0.0; ++ let mut events_processed = 0; ++ let start = Instant::now(); ++ let tx = conn.transaction()?; ++ { ++ // Clear out table ++ tx.execute("DELETE FROM tag;", [])?; ++ let mut stmt = tx.prepare("select id, content from event order by id;")?; ++ let mut tag_rows = stmt.query([])?; ++ while let Some(row) = tag_rows.next()? { ++ if (events_processed as f32) / (count as f32) > percent_done { ++ info!("Tag update {}% complete...", (100.0 * percent_done).round()); ++ percent_done += update_each_percent; ++ } ++ // we want to capture the event_id that had the tag, the tag name, and the tag hex value. ++ let event_id: u64 = row.get(0)?; ++ let event_json: String = row.get(1)?; ++ let event: Event = serde_json::from_str(&event_json)?; ++ // look at each event, and each tag, creating new tag entries if appropriate. ++ for t in event.tags.iter().filter(|x| x.len() > 1) { ++ let tagname = t.get(0).unwrap(); ++ let tagnamechar_opt = single_char_tagname(tagname); ++ if tagnamechar_opt.is_none() { ++ continue; ++ } ++ // safe because len was > 1 ++ let tagval = t.get(1).unwrap(); ++ // insert as BLOB if we can restore it losslessly. ++ // this means it needs to be even length and lowercase. ++ if (tagval.len() % 2 == 0) && is_lower_hex(tagval) { ++ tx.execute( ++ "INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);", ++ params![event_id, tagname, hex::decode(tagval).ok()], ++ )?; ++ } else { ++ // otherwise, insert as text ++ tx.execute( ++ "INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);", ++ params![event_id, tagname, &tagval], ++ )?; ++ } ++ } ++ events_processed += 1; ++ } ++ } ++ tx.commit()?; ++ info!("rebuilt tags in {:?}", start.elapsed()); ++ Ok(()) ++} ++ ++//// Migration Scripts ++ ++fn mig_1_to_2(conn: &mut PooledConnection) -> Result { ++ // only change is adding a hidden column to events. ++ let upgrade_sql = r##" ++ALTER TABLE event ADD hidden INTEGER; ++UPDATE event SET hidden=FALSE; ++PRAGMA user_version = 2; ++"##; ++ match conn.execute_batch(upgrade_sql) { ++ Ok(()) => { ++ info!("database schema upgraded v1 -> v2"); ++ } ++ Err(err) => { ++ error!("update failed: {}", err); ++ panic!("database could not be upgraded"); ++ } ++ } ++ Ok(2) ++} ++ ++fn mig_2_to_3(conn: &mut PooledConnection) -> Result { ++ // this version lacks the tag column ++ info!("database schema needs update from 2->3"); ++ let upgrade_sql = r##" ++CREATE TABLE IF NOT EXISTS tag ( ++id INTEGER PRIMARY KEY, ++event_id INTEGER NOT NULL, -- an event ID that contains a tag. ++name TEXT, -- the tag name ("p", "e", whatever) ++value TEXT, -- the tag value, if not hex. ++value_hex BLOB, -- the tag value, if it can be interpreted as a hex string. ++FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE ++); ++PRAGMA user_version = 3; ++"##; ++ // TODO: load existing refs into tag table ++ match conn.execute_batch(upgrade_sql) { ++ Ok(()) => { ++ info!("database schema upgraded v2 -> v3"); ++ } ++ Err(err) => { ++ error!("update failed: {}", err); ++ panic!("database could not be upgraded"); ++ } ++ } ++ // iterate over every event/pubkey tag ++ let tx = conn.transaction()?; ++ { ++ let mut stmt = tx.prepare("select event_id, \"e\", lower(hex(referenced_event)) from event_ref union select event_id, \"p\", lower(hex(referenced_pubkey)) from pubkey_ref;")?; ++ let mut tag_rows = stmt.query([])?; ++ while let Some(row) = tag_rows.next()? { ++ // we want to capture the event_id that had the tag, the tag name, and the tag hex value. ++ let event_id: u64 = row.get(0)?; ++ let tag_name: String = row.get(1)?; ++ let tag_value: String = row.get(2)?; ++ // this will leave behind p/e tags that were non-hex, but they are invalid anyways. ++ if is_lower_hex(&tag_value) { ++ tx.execute( ++ "INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);", ++ params![event_id, tag_name, hex::decode(&tag_value).ok()], ++ )?; ++ } ++ } ++ } ++ info!("Updated tag values"); ++ tx.commit()?; ++ Ok(3) ++} ++ ++fn mig_3_to_4(conn: &mut PooledConnection) -> Result { ++ info!("database schema needs update from 3->4"); ++ let upgrade_sql = r##" ++-- incoming metadata events with nip05 ++CREATE TABLE IF NOT EXISTS user_verification ( ++id INTEGER PRIMARY KEY, ++metadata_event INTEGER NOT NULL, -- the metadata event used for this validation. ++name TEXT NOT NULL, -- the nip05 field value (user@domain). ++verified_at INTEGER, -- timestamp this author/nip05 was most recently verified. ++failed_at INTEGER, -- timestamp a verification attempt failed (host down). ++failure_count INTEGER DEFAULT 0, -- number of consecutive failures. ++FOREIGN KEY(metadata_event) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE ++); ++CREATE INDEX IF NOT EXISTS user_verification_name_index ON user_verification(name); ++CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event); ++PRAGMA user_version = 4; ++"##; ++ match conn.execute_batch(upgrade_sql) { ++ Ok(()) => { ++ info!("database schema upgraded v3 -> v4"); ++ } ++ Err(err) => { ++ error!("update failed: {}", err); ++ panic!("database could not be upgraded"); ++ } ++ } ++ Ok(4) ++} ++ ++fn mig_4_to_5(conn: &mut PooledConnection) -> Result { ++ info!("database schema needs update from 4->5"); ++ let upgrade_sql = r##" ++DROP TABLE IF EXISTS event_ref; ++DROP TABLE IF EXISTS pubkey_ref; ++PRAGMA user_version=5; ++"##; ++ match conn.execute_batch(upgrade_sql) { ++ Ok(()) => { ++ info!("database schema upgraded v4 -> v5"); ++ } ++ Err(err) => { ++ error!("update failed: {}", err); ++ panic!("database could not be upgraded"); ++ } ++ } ++ Ok(5) ++} ++ ++fn mig_5_to_6(conn: &mut PooledConnection) -> Result { ++ info!("database schema needs update from 5->6"); ++ // We need to rebuild the tags table. iterate through the ++ // event table. build event from json, insert tags into a ++ // fresh tag table. This was needed due to a logic error in ++ // how hex-like tags got indexed. ++ let start = Instant::now(); ++ let tx = conn.transaction()?; ++ { ++ // Clear out table ++ tx.execute("DELETE FROM tag;", [])?; ++ let mut stmt = tx.prepare("select id, content from event order by id;")?; ++ let mut tag_rows = stmt.query([])?; ++ while let Some(row) = tag_rows.next()? { ++ let event_id: u64 = row.get(0)?; ++ let event_json: String = row.get(1)?; ++ let event: Event = serde_json::from_str(&event_json)?; ++ // look at each event, and each tag, creating new tag entries if appropriate. ++ for t in event.tags.iter().filter(|x| x.len() > 1) { ++ let tagname = t.get(0).unwrap(); ++ let tagnamechar_opt = single_char_tagname(tagname); ++ if tagnamechar_opt.is_none() { ++ continue; ++ } ++ // safe because len was > 1 ++ let tagval = t.get(1).unwrap(); ++ // insert as BLOB if we can restore it losslessly. ++ // this means it needs to be even length and lowercase. ++ if (tagval.len() % 2 == 0) && is_lower_hex(tagval) { ++ tx.execute( ++ "INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);", ++ params![event_id, tagname, hex::decode(tagval).ok()], ++ )?; ++ } else { ++ // otherwise, insert as text ++ tx.execute( ++ "INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);", ++ params![event_id, tagname, &tagval], ++ )?; ++ } ++ } ++ } ++ tx.execute("PRAGMA user_version = 6;", [])?; ++ } ++ tx.commit()?; ++ info!("database schema upgraded v5 -> v6 in {:?}", start.elapsed()); ++ // vacuum after large table modification ++ let start = Instant::now(); ++ conn.execute("VACUUM;", [])?; ++ info!("vacuumed DB after tags rebuild in {:?}", start.elapsed()); ++ Ok(6) ++} ++ ++fn mig_6_to_7(conn: &mut PooledConnection) -> Result { ++ info!("database schema needs update from 6->7"); ++ let upgrade_sql = r##" ++ALTER TABLE event ADD delegated_by BLOB; ++CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by); ++PRAGMA user_version = 7; ++"##; ++ match conn.execute_batch(upgrade_sql) { ++ Ok(()) => { ++ info!("database schema upgraded v6 -> v7"); ++ } ++ Err(err) => { ++ error!("update failed: {}", err); ++ panic!("database could not be upgraded"); ++ } ++ } ++ Ok(7) ++} ++ ++fn mig_7_to_8(conn: &mut PooledConnection) -> Result { ++ info!("database schema needs update from 7->8"); ++ // Remove redundant indexes, and add a better multi-column index. ++ let upgrade_sql = r##" ++DROP INDEX IF EXISTS created_at_index; ++DROP INDEX IF EXISTS kind_index; ++CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at); ++PRAGMA user_version = 8; ++"##; ++ match conn.execute_batch(upgrade_sql) { ++ Ok(()) => { ++ info!("database schema upgraded v7 -> v8"); ++ } ++ Err(err) => { ++ error!("update failed: {}", err); ++ panic!("database could not be upgraded"); ++ } ++ } ++ Ok(8) ++} ++ ++fn mig_8_to_9(conn: &mut PooledConnection) -> Result { ++ info!("database schema needs update from 8->9"); ++ // Those old indexes were actually helpful... ++ let upgrade_sql = r##" ++CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at); ++CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at); ++PRAGMA user_version = 9; ++"##; ++ match conn.execute_batch(upgrade_sql) { ++ Ok(()) => { ++ info!("database schema upgraded v8 -> v9"); ++ } ++ Err(err) => { ++ error!("update failed: {}", err); ++ panic!("database could not be upgraded"); ++ } ++ } ++ Ok(9) ++} ++ ++fn mig_9_to_10(conn: &mut PooledConnection) -> Result { ++ info!("database schema needs update from 9->10"); ++ // Those old indexes were actually helpful... ++ let upgrade_sql = r##" ++CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value_hex,value); ++PRAGMA user_version = 10; ++"##; ++ match conn.execute_batch(upgrade_sql) { ++ Ok(()) => { ++ info!("database schema upgraded v9 -> v10"); ++ } ++ Err(err) => { ++ error!("update failed: {}", err); ++ panic!("database could not be upgraded"); ++ } ++ } ++ Ok(10) ++} ++ ++fn mig_10_to_11(conn: &mut PooledConnection) -> Result { ++ info!("database schema needs update from 10->11"); ++ // Those old indexes were actually helpful... ++ let upgrade_sql = r##" ++CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value_hex); ++reindex; ++pragma optimize; ++PRAGMA user_version = 11; ++"##; ++ match conn.execute_batch(upgrade_sql) { ++ Ok(()) => { ++ info!("database schema upgraded v10 -> v11"); ++ } ++ Err(err) => { ++ error!("update failed: {}", err); ++ panic!("database could not be upgraded"); ++ } ++ } ++ Ok(11) ++} ++ ++fn mig_11_to_12(conn: &mut PooledConnection) -> Result { ++ info!("database schema needs update from 11->12"); ++ let start = Instant::now(); ++ let tx = conn.transaction()?; ++ { ++ // Lookup every replaceable event ++ let mut stmt = tx.prepare("select kind,author from event where kind in (0,3,41) or (kind>=10000 and kind<20000) order by id;")?; ++ let mut replaceable_rows = stmt.query([])?; ++ info!("updating replaceable events; this could take awhile..."); ++ while let Some(row) = replaceable_rows.next()? { ++ // we want to capture the event_id that had the tag, the tag name, and the tag hex value. ++ let event_kind: u64 = row.get(0)?; ++ let event_author: Vec = row.get(1)?; ++ tx.execute( ++ "UPDATE event SET hidden=TRUE WHERE hidden!=TRUE and kind=? and author=? and id NOT IN (SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1)", ++ params![event_kind, event_author, event_kind, event_author], ++ )?; ++ } ++ tx.execute("PRAGMA user_version = 12;", [])?; ++ } ++ tx.commit()?; ++ info!( ++ "database schema upgraded v11 -> v12 in {:?}", ++ start.elapsed() ++ ); ++ // vacuum after large table modification ++ let start = Instant::now(); ++ conn.execute("VACUUM;", [])?; ++ info!( ++ "vacuumed DB after hidden event cleanup in {:?}", ++ start.elapsed() ++ ); ++ Ok(12) ++} ++ ++fn mig_12_to_13(conn: &mut PooledConnection) -> Result { ++ info!("database schema needs update from 12->13"); ++ let upgrade_sql = r##" ++CREATE INDEX IF NOT EXISTS kind_author_index ON event(kind,author); ++reindex; ++pragma optimize; ++PRAGMA user_version = 13; ++"##; ++ match conn.execute_batch(upgrade_sql) { ++ Ok(()) => { ++ info!("database schema upgraded v12 -> v13"); ++ } ++ Err(err) => { ++ error!("update failed: {}", err); ++ panic!("database could not be upgraded"); ++ } ++ } ++ Ok(13) ++} ++ ++fn mig_13_to_14(conn: &mut PooledConnection) -> Result { ++ info!("database schema needs update from 13->14"); ++ let upgrade_sql = r##" ++CREATE INDEX IF NOT EXISTS kind_index ON event(kind); ++CREATE INDEX IF NOT EXISTS kind_created_at_index ON event(kind,created_at); ++pragma optimize; ++PRAGMA user_version = 14; ++"##; ++ match conn.execute_batch(upgrade_sql) { ++ Ok(()) => { ++ info!("database schema upgraded v13 -> v14"); ++ } ++ Err(err) => { ++ error!("update failed: {}", err); ++ panic!("database could not be upgraded"); ++ } ++ } ++ Ok(14) ++} ++ ++fn mig_14_to_15(conn: &mut PooledConnection) -> Result { ++ info!("database schema needs update from 14->15"); ++ let upgrade_sql = r##" ++CREATE INDEX IF NOT EXISTS author_created_at_index ON event(author,created_at); ++CREATE INDEX IF NOT EXISTS author_kind_index ON event(author,kind); ++PRAGMA user_version = 15; ++"##; ++ match conn.execute_batch(upgrade_sql) { ++ Ok(()) => { ++ info!("database schema upgraded v14 -> v15"); ++ } ++ Err(err) => { ++ error!("update failed: {}", err); ++ panic!("database could not be upgraded"); ++ } ++ } ++ // clear out hidden events ++ let clear_hidden_sql = r##"DELETE FROM event WHERE HIDDEN=true;"##; ++ info!("removing hidden events; this may take awhile..."); ++ match conn.execute_batch(clear_hidden_sql) { ++ Ok(()) => { ++ info!("all hidden events removed"); ++ } ++ Err(err) => { ++ error!("delete failed: {}", err); ++ panic!("could not remove hidden events"); ++ } ++ } ++ Ok(15) ++} ++ ++fn mig_15_to_16(conn: &mut PooledConnection) -> Result { ++ let count = db_event_count(conn)?; ++ info!("database schema needs update from 15->16 (this may take a few minutes)"); ++ let upgrade_sql = r##" ++DROP TABLE tag; ++CREATE TABLE tag ( ++id INTEGER PRIMARY KEY, ++event_id INTEGER NOT NULL, -- an event ID that contains a tag. ++name TEXT, -- the tag name ("p", "e", whatever) ++value TEXT, -- the tag value, if not hex. ++created_at INTEGER NOT NULL, -- when the event was authored ++kind INTEGER NOT NULL, -- event kind ++FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE ++); ++CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value); ++CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value); ++CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value); ++CREATE INDEX IF NOT EXISTS tag_covering_index ON tag(name,kind,value,created_at,event_id); ++"##; ++ ++ let start = Instant::now(); ++ let tx = conn.transaction()?; ++ ++ let bar = ProgressBar::new(count.try_into().unwrap()).with_message("rebuilding tags table"); ++ bar.set_style( ++ ProgressStyle::with_template( ++ "[{elapsed_precise}] {bar:40.white/blue} {pos:>7}/{len:7} [{percent}%] {msg}", ++ ) ++ .unwrap(), ++ ); ++ { ++ tx.execute_batch(upgrade_sql)?; ++ let mut stmt = ++ tx.prepare("select id, kind, created_at, content from event order by id;")?; ++ let mut tag_rows = stmt.query([])?; ++ let mut count = 0; ++ while let Some(row) = tag_rows.next()? { ++ count += 1; ++ if count % 10 == 0 { ++ bar.inc(10); ++ } ++ let event_id: u64 = row.get(0)?; ++ let kind: u64 = row.get(1)?; ++ let created_at: u64 = row.get(2)?; ++ let event_json: String = row.get(3)?; ++ let event: Event = serde_json::from_str(&event_json)?; ++ // look at each event, and each tag, creating new tag entries if appropriate. ++ for t in event.tags.iter().filter(|x| x.len() > 1) { ++ let tagname = t.get(0).unwrap(); ++ let tagnamechar_opt = single_char_tagname(tagname); ++ if tagnamechar_opt.is_none() { ++ continue; ++ } ++ // safe because len was > 1 ++ let tagval = t.get(1).unwrap(); ++ // otherwise, insert as text ++ tx.execute( ++ "INSERT INTO tag (event_id, name, value, kind, created_at) VALUES (?1, ?2, ?3, ?4, ?5);", ++ params![event_id, tagname, &tagval, kind, created_at], ++ )?; ++ } ++ } ++ tx.execute("PRAGMA user_version = 16;", [])?; ++ } ++ bar.finish(); ++ tx.commit()?; ++ info!( ++ "database schema upgraded v15 -> v16 in {:?}", ++ start.elapsed() ++ ); ++ Ok(16) ++} ++ ++fn mig_16_to_17(conn: &mut PooledConnection) -> Result { ++ info!("database schema needs update from 16->17"); ++ let upgrade_sql = r##" ++ALTER TABLE event ADD COLUMN expires_at INTEGER; ++CREATE INDEX IF NOT EXISTS event_expiration ON event(expires_at); ++PRAGMA user_version = 17; ++"##; ++ match conn.execute_batch(upgrade_sql) { ++ Ok(()) => { ++ info!("database schema upgraded v16 -> v17"); ++ } ++ Err(err) => { ++ error!("update failed: {}", err); ++ panic!("database could not be upgraded"); ++ } ++ } ++ Ok(17) ++} ++ ++fn mig_17_to_18(conn: &mut PooledConnection) -> Result { ++ info!("database schema needs update from 17->18"); ++ let upgrade_sql = r##" ++-- Create invoices table ++CREATE TABLE IF NOT EXISTS invoice ( ++payment_hash TEXT PRIMARY KEY, ++pubkey TEXT NOT NULL, ++invoice TEXT NOT NULL, ++amount INTEGER NOT NULL, ++status TEXT CHECK ( status IN ('Paid', 'Unpaid', 'Expired' ) ) NOT NUll DEFAULT 'Unpaid', ++description TEXT, ++created_at INTEGER NOT NULL, ++confirmed_at INTEGER, ++CONSTRAINT invoice_pubkey_fkey FOREIGN KEY (pubkey) REFERENCES account (pubkey) ON DELETE CASCADE ++); ++ ++-- Create invoice index ++CREATE INDEX IF NOT EXISTS invoice_pubkey_index ON invoice(pubkey); ++ ++-- Create account table ++ ++CREATE TABLE IF NOT EXISTS account ( ++pubkey TEXT PRIMARY KEY, ++is_admitted INTEGER NOT NULL DEFAULT 0, ++balance INTEGER NOT NULL DEFAULT 0, ++tos_accepted_at INTEGER ++); ++ ++-- Create account index ++CREATE INDEX IF NOT EXISTS account_pubkey_index ON account(pubkey); ++ ++ ++pragma optimize; ++PRAGMA user_version = 18; ++"##; ++ match conn.execute_batch(upgrade_sql) { ++ Ok(()) => { ++ info!("database schema upgraded v17 -> v18"); ++ } ++ Err(err) => { ++ error!("update failed: {}", err); ++ panic!("database could not be upgraded"); ++ } ++ } ++ Ok(18) ++} +diff --git a/src/server.rs b/src/server.rs +new file mode 100644 +index 0000000..3c2b9f2 +--- /dev/null ++++ b/src/server.rs +@@ -0,0 +1,1434 @@ ++//! Server process ++use crate::close::Close; ++use crate::close::CloseCmd; ++use crate::config::{Settings, VerifiedUsersMode}; ++use crate::conn; ++use crate::db; ++use crate::db::SubmittedEvent; ++use crate::error::{Error, Result}; ++use crate::event::Event; ++use crate::event::EventCmd; ++use crate::event::EventWrapper; ++use crate::info::RelayInfo; ++use crate::nip05; ++use crate::notice::Notice; ++use crate::payment; ++use crate::payment::InvoiceInfo; ++use crate::payment::PaymentMessage; ++use crate::repo::NostrRepo; ++use crate::server::Error::CommandUnknownError; ++use crate::server::EventWrapper::{WrappedAuth, WrappedEvent}; ++use crate::subscription::Subscription; ++use futures::SinkExt; ++use futures::StreamExt; ++use governor::{Jitter, Quota, RateLimiter}; ++use http::header::HeaderMap; ++use hyper::body::to_bytes; ++use hyper::header::ACCEPT; ++use hyper::service::{make_service_fn, service_fn}; ++use hyper::upgrade::Upgraded; ++use hyper::{ ++ header, server::conn::AddrStream, upgrade, Body, Request, Response, Server, StatusCode, ++}; ++use nostr::key::FromPkStr; ++use nostr::key::Keys; ++use prometheus::IntCounterVec; ++use prometheus::IntGauge; ++use prometheus::{Encoder, Histogram, HistogramOpts, IntCounter, Opts, Registry, TextEncoder}; ++use qrcode::render::svg; ++use qrcode::QrCode; ++use serde::{Deserialize, Serialize}; ++use serde_json::json; ++use std::collections::HashMap; ++use std::convert::Infallible; ++use std::fs::File; ++use std::io::BufReader; ++use std::io::Read; ++use std::net::SocketAddr; ++use std::path::Path; ++use std::sync::atomic::Ordering; ++use std::sync::mpsc::Receiver as MpscReceiver; ++use std::sync::Arc; ++use std::time::Duration; ++use std::time::Instant; ++use tokio::runtime::Builder; ++use tokio::sync::broadcast::{self, Receiver, Sender}; ++use tokio::sync::mpsc; ++use tokio::sync::oneshot; ++use tokio_tungstenite::WebSocketStream; ++use tracing::{debug, error, info, trace, warn}; ++use tungstenite::error::CapacityError::MessageTooLong; ++use tungstenite::error::Error as WsError; ++use tungstenite::handshake; ++use tungstenite::protocol::Message; ++use tungstenite::protocol::WebSocketConfig; ++ ++/// Handle arbitrary HTTP requests, including for `WebSocket` upgrades. ++#[allow(clippy::too_many_arguments)] ++async fn handle_web_request( ++ mut request: Request, ++ repo: Arc, ++ settings: Settings, ++ remote_addr: SocketAddr, ++ broadcast: Sender, ++ event_tx: tokio::sync::mpsc::Sender, ++ payment_tx: tokio::sync::broadcast::Sender, ++ shutdown: Receiver<()>, ++ favicon: Option>, ++ registry: Registry, ++ metrics: NostrMetrics, ++) -> Result, Infallible> { ++ match ( ++ request.uri().path(), ++ request.headers().contains_key(header::UPGRADE), ++ ) { ++ // Request for / as websocket ++ ("/", true) => { ++ trace!("websocket with upgrade request"); ++ //assume request is a handshake, so create the handshake response ++ let response = match handshake::server::create_response_with_body(&request, || { ++ Body::empty() ++ }) { ++ Ok(response) => { ++ //in case the handshake response creation succeeds, ++ //spawn a task to handle the websocket connection ++ tokio::spawn(async move { ++ //using the hyper feature of upgrading a connection ++ match upgrade::on(&mut request).await { ++ //if successfully upgraded ++ Ok(upgraded) => { ++ // set WebSocket configuration options ++ let config = WebSocketConfig { ++ max_send_queue: Some(1024), ++ max_message_size: settings.limits.max_ws_message_bytes, ++ max_frame_size: settings.limits.max_ws_frame_bytes, ++ ..Default::default() ++ }; ++ //create a websocket stream from the upgraded object ++ let ws_stream = WebSocketStream::from_raw_socket( ++ //pass the upgraded object ++ //as the base layer stream of the Websocket ++ upgraded, ++ tokio_tungstenite::tungstenite::protocol::Role::Server, ++ Some(config), ++ ) ++ .await; ++ let origin = get_header_string("origin", request.headers()); ++ let user_agent = get_header_string("user-agent", request.headers()); ++ // determine the remote IP from headers if the exist ++ let header_ip = settings ++ .network ++ .remote_ip_header ++ .as_ref() ++ .and_then(|x| get_header_string(x, request.headers())); ++ // use the socket addr as a backup ++ let remote_ip = ++ header_ip.unwrap_or_else(|| remote_addr.ip().to_string()); ++ let client_info = ClientInfo { ++ remote_ip, ++ user_agent, ++ origin, ++ }; ++ // spawn a nostr server with our websocket ++ tokio::spawn(nostr_server( ++ repo, ++ client_info, ++ settings, ++ ws_stream, ++ broadcast, ++ event_tx, ++ shutdown, ++ metrics, ++ )); ++ } ++ // todo: trace, don't print... ++ Err(e) => println!( ++ "error when trying to upgrade connection \ ++ from address {remote_addr} to websocket connection. \ ++ Error is: {e}", ++ ), ++ } ++ }); ++ //return the response to the handshake request ++ response ++ } ++ Err(error) => { ++ warn!("websocket response failed"); ++ let mut res = ++ Response::new(Body::from(format!("Failed to create websocket: {error}"))); ++ *res.status_mut() = StatusCode::BAD_REQUEST; ++ return Ok(res); ++ } ++ }; ++ Ok::<_, Infallible>(response) ++ } ++ // Request for Relay info ++ ("/", false) => { ++ // handle request at root with no upgrade header ++ // Check if this is a nostr server info request ++ let accept_header = &request.headers().get(ACCEPT); ++ // check if application/nostr+json is included ++ if let Some(media_types) = accept_header { ++ if let Ok(mt_str) = media_types.to_str() { ++ if mt_str.contains("application/nostr+json") { ++ // build a relay info response ++ debug!("Responding to server info request"); ++ let rinfo = RelayInfo::from(settings); ++ let b = Body::from(serde_json::to_string_pretty(&rinfo).unwrap()); ++ return Ok(Response::builder() ++ .status(200) ++ .header("Content-Type", "application/nostr+json") ++ .header("Access-Control-Allow-Origin", "*") ++ .body(b) ++ .unwrap()); ++ } ++ } ++ } ++ ++ // Redirect users to join page when pay to relay enabled ++ if settings.pay_to_relay.enabled { ++ return Ok(Response::builder() ++ .status(StatusCode::TEMPORARY_REDIRECT) ++ .header("location", "/join") ++ .body(Body::empty()) ++ .unwrap()); ++ } ++ ++ Ok(Response::builder() ++ .status(200) ++ .header("Content-Type", "text/plain") ++ .body(Body::from("Please use a Nostr client to connect.")) ++ .unwrap()) ++ } ++ ("/metrics", false) => { ++ let mut buffer = vec![]; ++ let encoder = TextEncoder::new(); ++ let metric_families = registry.gather(); ++ encoder.encode(&metric_families, &mut buffer).unwrap(); ++ ++ Ok(Response::builder() ++ .status(StatusCode::OK) ++ .header("Content-Type", "text/plain") ++ .body(Body::from(buffer)) ++ .unwrap()) ++ } ++ ("/favicon.ico", false) => { ++ if let Some(favicon_bytes) = favicon { ++ info!("returning favicon"); ++ Ok(Response::builder() ++ .status(StatusCode::OK) ++ .header("Content-Type", "image/x-icon") ++ // 1 month cache ++ .header("Cache-Control", "public, max-age=2419200") ++ .body(Body::from(favicon_bytes)) ++ .unwrap()) ++ } else { ++ Ok(Response::builder() ++ .status(StatusCode::NOT_FOUND) ++ .body(Body::from("")) ++ .unwrap()) ++ } ++ } ++ // LN bits callback endpoint for paid invoices ++ ("/lnbits", false) => { ++ let callback: payment::lnbits::LNBitsCallback = ++ serde_json::from_slice(&to_bytes(request.into_body()).await.unwrap()).unwrap(); ++ debug!("LNBits callback: {callback:?}"); ++ ++ if let Err(e) = payment_tx.send(PaymentMessage::InvoicePaid(callback.payment_hash)) { ++ warn!("Could not send invoice update: {}", e); ++ return Ok(Response::builder() ++ .status(StatusCode::INTERNAL_SERVER_ERROR) ++ .body(Body::from("Error processing callback")) ++ .unwrap()); ++ } ++ ++ Ok(Response::builder() ++ .status(StatusCode::OK) ++ .body(Body::from("ok")) ++ .unwrap()) ++ } ++ // Endpoint for relays terms ++ ("/terms", false) => Ok(Response::builder() ++ .status(200) ++ .header("Content-Type", "text/plain") ++ .body(Body::from(settings.pay_to_relay.terms_message)) ++ .unwrap()), ++ // Endpoint to allow users to sign up ++ ("/join", false) => { ++ // Stops sign ups if disabled ++ if !settings.pay_to_relay.sign_ups { ++ return Ok(Response::builder() ++ .status(401) ++ .header("Content-Type", "text/plain") ++ .body(Body::from("Sorry, joining is not allowed at the moment")) ++ .unwrap()); ++ } ++ ++ let html = r#" ++ ++ ++ ++ ++ ++ ++
++

Enter your pubkey

++
++

++ ++

++ ++
++ ++
++ ++ ++ ++ "#; ++ Ok(Response::builder() ++ .status(StatusCode::OK) ++ .body(Body::from(html)) ++ .unwrap()) ++ } ++ // Endpoint to display invoice ++ ("/invoice", false) => { ++ // Stops sign ups if disabled ++ if !settings.pay_to_relay.sign_ups { ++ return Ok(Response::builder() ++ .status(401) ++ .header("Content-Type", "text/plain") ++ .body(Body::from("Sorry, joining is not allowed at the moment")) ++ .unwrap()); ++ } ++ ++ // Get query pubkey from query string ++ let pubkey = get_pubkey(request); ++ ++ // Redirect back to join page if no pub key is found in query string ++ if pubkey.is_none() { ++ return Ok(Response::builder() ++ .status(404) ++ .header("location", "/join") ++ .body(Body::empty()) ++ .unwrap()); ++ } ++ ++ // Checks key is valid ++ let pubkey = pubkey.unwrap(); ++ let key = Keys::from_pk_str(&pubkey); ++ if key.is_err() { ++ return Ok(Response::builder() ++ .status(401) ++ .header("Content-Type", "text/plain") ++ .body(Body::from("Looks like your key is invalid")) ++ .unwrap()); ++ } ++ ++ // Checks if user is already admitted ++ let payment_message; ++ if let Ok((admission_status, _)) = repo.get_account_balance(&key.unwrap()).await { ++ if admission_status { ++ return Ok(Response::builder() ++ .status(StatusCode::OK) ++ .body(Body::from("Already admitted")) ++ .unwrap()); ++ } else { ++ payment_message = PaymentMessage::CheckAccount(pubkey.clone()); ++ } ++ } else { ++ payment_message = PaymentMessage::NewAccount(pubkey.clone()); ++ } ++ ++ // Send message on payment channel requesting invoice ++ if payment_tx.send(payment_message).is_err() { ++ warn!("Could not send payment tx"); ++ return Ok(Response::builder() ++ .status(501) ++ .header("Content-Type", "text/plain") ++ .body(Body::from("Sorry, something went wrong")) ++ .unwrap()); ++ } ++ ++ // wait for message with invoice back that matched the pub key ++ let mut invoice_info: Option = None; ++ while let Ok(msg) = payment_tx.subscribe().recv().await { ++ match msg { ++ PaymentMessage::Invoice(m_pubkey, m_invoice_info) => { ++ if m_pubkey == pubkey.clone() { ++ invoice_info = Some(m_invoice_info); ++ break; ++ } ++ } ++ PaymentMessage::AccountAdmitted(m_pubkey) => { ++ if m_pubkey == pubkey.clone() { ++ return Ok(Response::builder() ++ .status(StatusCode::OK) ++ .body(Body::from("Already admitted")) ++ .unwrap()); ++ } ++ } ++ _ => (), ++ } ++ } ++ ++ // Return early if cant get invoice ++ if invoice_info.is_none() { ++ return Ok(Response::builder() ++ .status(StatusCode::INTERNAL_SERVER_ERROR) ++ .body(Body::from("Sorry, could not get invoice")) ++ .unwrap()); ++ } ++ ++ // Since invoice is checked to be not none, unwrap ++ let invoice_info = invoice_info.unwrap(); ++ ++ let qr_code: String; ++ if let Ok(code) = QrCode::new(invoice_info.bolt11.as_bytes()) { ++ qr_code = code ++ .render() ++ .min_dimensions(200, 200) ++ .dark_color(svg::Color("#800000")) ++ .light_color(svg::Color("#ffff80")) ++ .build(); ++ } else { ++ qr_code = "Could not render image".to_string(); ++ } ++ ++ let html_result = format!( ++ r#" ++ ++ ++ ++ ++ ++ ++ ++
++

++ To use this relay, an admission fee of {} sats is required. By paying the fee, you agree to the terms. ++

++
++
++
++ {} ++
++
++
++
++

{}

++ ++
++
++

This page will not refresh

++

Verify admission here once you have paid

++
++
++ ++ ++ ++ ++ ++"#, ++ settings.pay_to_relay.admission_cost, ++ qr_code, ++ invoice_info.bolt11, ++ pubkey, ++ invoice_info.bolt11 ++ ); ++ ++ Ok(Response::builder() ++ .status(StatusCode::OK) ++ .body(Body::from(html_result)) ++ .unwrap()) ++ } ++ ("/account", false) => { ++ // Stops sign ups if disabled ++ if !settings.pay_to_relay.enabled { ++ return Ok(Response::builder() ++ .status(401) ++ .header("Content-Type", "text/plain") ++ .body(Body::from("This relay is not paid")) ++ .unwrap()); ++ } ++ ++ // Gets the pubkey from query string ++ let pubkey = get_pubkey(request); ++ ++ // Redirect back to join page if no pub key is found in query string ++ if pubkey.is_none() { ++ return Ok(Response::builder() ++ .status(404) ++ .header("location", "/join") ++ .body(Body::empty()) ++ .unwrap()); ++ } ++ ++ // Checks key is valid ++ let pubkey = pubkey.unwrap(); ++ let key = Keys::from_pk_str(&pubkey); ++ if key.is_err() { ++ return Ok(Response::builder() ++ .status(401) ++ .header("Content-Type", "text/plain") ++ .body(Body::from("Looks like your key is invalid")) ++ .unwrap()); ++ } ++ ++ // Checks if user is already admitted ++ let text = ++ if let Ok((admission_status, _)) = repo.get_account_balance(&key.unwrap()).await { ++ if admission_status { ++ r#"is"# ++ } else { ++ r#"is not"# ++ } ++ } else { ++ "Could not get admission status" ++ }; ++ ++ let html_result = format!( ++ r#" ++ ++ ++ ++ ++ ++ ++ ++
++
{} {} admitted
++
++ ++ ++ ++ ++ "#, ++ pubkey, text ++ ); ++ ++ Ok(Response::builder() ++ .status(StatusCode::OK) ++ .body(Body::from(html_result)) ++ .unwrap()) ++ } ++ // later balance ++ (_, _) => { ++ // handle any other url ++ Ok(Response::builder() ++ .status(StatusCode::NOT_FOUND) ++ .body(Body::from("Nothing here.")) ++ .unwrap()) ++ } ++ } ++} ++ ++// Get pubkey from request query string ++fn get_pubkey(request: Request) -> Option { ++ let query = request.uri().query().unwrap_or("").to_string(); ++ ++ // Gets the pubkey value from query string ++ query.split('&').fold(None, |acc, pair| { ++ let mut parts = pair.splitn(2, '='); ++ let key = parts.next(); ++ let value = parts.next(); ++ if key == Some("pubkey") { ++ return value.map(|s| s.to_owned()); ++ } ++ acc ++ }) ++} ++ ++fn get_header_string(header: &str, headers: &HeaderMap) -> Option { ++ headers ++ .get(header) ++ .and_then(|x| x.to_str().ok().map(std::string::ToString::to_string)) ++} ++ ++// return on a control-c or internally requested shutdown signal ++async fn ctrl_c_or_signal(mut shutdown_signal: Receiver<()>) { ++ let mut term_signal = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate()) ++ .expect("could not define signal"); ++ loop { ++ tokio::select! { ++ _ = shutdown_signal.recv() => { ++ info!("Shutting down webserver as requested"); ++ // server shutting down, exit loop ++ break; ++ }, ++ _ = tokio::signal::ctrl_c() => { ++ info!("Shutting down webserver due to SIGINT"); ++ break; ++ }, ++ _ = term_signal.recv() => { ++ info!("Shutting down webserver due to SIGTERM"); ++ break; ++ }, ++ } ++ } ++} ++ ++fn create_metrics() -> (Registry, NostrMetrics) { ++ // setup prometheus registry ++ let registry = Registry::new(); ++ ++ let query_sub = Histogram::with_opts(HistogramOpts::new( ++ "nostr_query_seconds", ++ "Subscription response times", ++ )) ++ .unwrap(); ++ let query_db = Histogram::with_opts(HistogramOpts::new( ++ "nostr_filter_seconds", ++ "Filter SQL query times", ++ )) ++ .unwrap(); ++ let write_events = Histogram::with_opts(HistogramOpts::new( ++ "nostr_events_write_seconds", ++ "Event writing response times", ++ )) ++ .unwrap(); ++ let sent_events = IntCounterVec::new( ++ Opts::new("nostr_events_sent_total", "Events sent to clients"), ++ vec!["source"].as_slice(), ++ ) ++ .unwrap(); ++ let connections = ++ IntCounter::with_opts(Opts::new("nostr_connections_total", "New connections")).unwrap(); ++ let db_connections = IntGauge::with_opts(Opts::new( ++ "nostr_db_connections", ++ "Active database connections", ++ )) ++ .unwrap(); ++ let query_aborts = IntCounterVec::new( ++ Opts::new("nostr_query_abort_total", "Aborted queries"), ++ vec!["reason"].as_slice(), ++ ) ++ .unwrap(); ++ let cmd_req = IntCounter::with_opts(Opts::new("nostr_cmd_req_total", "REQ commands")).unwrap(); ++ let cmd_event = ++ IntCounter::with_opts(Opts::new("nostr_cmd_event_total", "EVENT commands")).unwrap(); ++ let cmd_close = ++ IntCounter::with_opts(Opts::new("nostr_cmd_close_total", "CLOSE commands")).unwrap(); ++ let cmd_auth = ++ IntCounter::with_opts(Opts::new("nostr_cmd_auth_total", "AUTH commands")).unwrap(); ++ let disconnects = IntCounterVec::new( ++ Opts::new("nostr_disconnects_total", "Client disconnects"), ++ vec!["reason"].as_slice(), ++ ) ++ .unwrap(); ++ registry.register(Box::new(query_sub.clone())).unwrap(); ++ registry.register(Box::new(query_db.clone())).unwrap(); ++ registry.register(Box::new(write_events.clone())).unwrap(); ++ registry.register(Box::new(sent_events.clone())).unwrap(); ++ registry.register(Box::new(connections.clone())).unwrap(); ++ registry.register(Box::new(db_connections.clone())).unwrap(); ++ registry.register(Box::new(query_aborts.clone())).unwrap(); ++ registry.register(Box::new(cmd_req.clone())).unwrap(); ++ registry.register(Box::new(cmd_event.clone())).unwrap(); ++ registry.register(Box::new(cmd_close.clone())).unwrap(); ++ registry.register(Box::new(cmd_auth.clone())).unwrap(); ++ registry.register(Box::new(disconnects.clone())).unwrap(); ++ let metrics = NostrMetrics { ++ query_sub, ++ query_db, ++ write_events, ++ sent_events, ++ connections, ++ db_connections, ++ disconnects, ++ query_aborts, ++ cmd_req, ++ cmd_event, ++ cmd_close, ++ cmd_auth, ++ }; ++ (registry, metrics) ++} ++ ++fn file_bytes(path: &str) -> Result> { ++ let f = File::open(path)?; ++ let mut reader = BufReader::new(f); ++ let mut buffer = Vec::new(); ++ // Read file into vector. ++ reader.read_to_end(&mut buffer)?; ++ Ok(buffer) ++} ++ ++/// Start running a Nostr relay server. ++pub fn start_server(settings: &Settings, shutdown_rx: MpscReceiver<()>) -> Result<(), Error> { ++ trace!("Config: {:?}", settings); ++ // do some config validation. ++ if !Path::new(&settings.database.data_directory).is_dir() { ++ error!("Database directory does not exist"); ++ return Err(Error::DatabaseDirError); ++ } ++ let addr = format!( ++ "{}:{}", ++ settings.network.address.trim(), ++ settings.network.port ++ ); ++ let socket_addr = addr.parse().expect("listening address not valid"); ++ // address whitelisting settings ++ if let Some(addr_whitelist) = &settings.authorization.pubkey_whitelist { ++ info!( ++ "Event publishing restricted to {} pubkey(s)", ++ addr_whitelist.len() ++ ); ++ } ++ // check if NIP-05 enforced user verification is on ++ if settings.verified_users.is_active() { ++ info!( ++ "NIP-05 user verification mode:{:?}", ++ settings.verified_users.mode ++ ); ++ if let Some(d) = settings.verified_users.verify_update_duration() { ++ info!("NIP-05 check user verification every: {:?}", d); ++ } ++ if let Some(d) = settings.verified_users.verify_expiration_duration() { ++ info!("NIP-05 user verification expires after: {:?}", d); ++ } ++ if let Some(wl) = &settings.verified_users.domain_whitelist { ++ info!("NIP-05 domain whitelist: {:?}", wl); ++ } ++ if let Some(bl) = &settings.verified_users.domain_blacklist { ++ info!("NIP-05 domain blacklist: {:?}", bl); ++ } ++ } ++ // configure tokio runtime ++ let rt = Builder::new_multi_thread() ++ .enable_all() ++ .thread_name_fn(|| { ++ // give each thread a unique numeric name ++ static ATOMIC_ID: std::sync::atomic::AtomicUsize = ++ std::sync::atomic::AtomicUsize::new(0); ++ let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst); ++ format!("tokio-ws-{id}") ++ }) ++ // limit concurrent SQLite blocking threads ++ .max_blocking_threads(settings.limits.max_blocking_threads) ++ .on_thread_start(|| { ++ trace!("started new thread: {:?}", std::thread::current().name()); ++ }) ++ .on_thread_stop(|| { ++ trace!("stopped thread: {:?}", std::thread::current().name()); ++ }) ++ .build() ++ .unwrap(); ++ // start tokio ++ rt.block_on(async { ++ let broadcast_buffer_limit = settings.limits.broadcast_buffer; ++ let persist_buffer_limit = settings.limits.event_persist_buffer; ++ let verified_users_active = settings.verified_users.is_active(); ++ let settings = settings.clone(); ++ info!("listening on: {}", socket_addr); ++ // all client-submitted valid events are broadcast to every ++ // other client on this channel. This should be large enough ++ // to accommodate slower readers (messages are dropped if ++ // clients can not keep up). ++ let (bcast_tx, _) = broadcast::channel::(broadcast_buffer_limit); ++ // validated events that need to be persisted are sent to the ++ // database on via this channel. ++ let (event_tx, event_rx) = mpsc::channel::(persist_buffer_limit); ++ // establish a channel for letting all threads now about a ++ // requested server shutdown. ++ let (invoke_shutdown, shutdown_listen) = broadcast::channel::<()>(1); ++ // create a channel for sending any new metadata event. These ++ // will get processed relatively slowly (a potentially ++ // multi-second blocking HTTP call) on a single thread, so we ++ // buffer requests on the channel. No harm in dropping events ++ // here, since we are protecting against DoS. This can make ++ // it difficult to setup initial metadata in bulk, since ++ // overwhelming this will drop events and won't register ++ // metadata events. ++ let (metadata_tx, metadata_rx) = broadcast::channel::(4096); ++ ++ let (payment_tx, payment_rx) = broadcast::channel::(4096); ++ ++ let (registry, metrics) = create_metrics(); ++ ++ // build a repository for events ++ let repo = db::build_repo(&settings, metrics.clone()).await; ++ // start the database writer task. Give it a channel for ++ // writing events, and for publishing events that have been ++ // written (to all connected clients). ++ tokio::task::spawn(db::db_writer( ++ repo.clone(), ++ settings.clone(), ++ event_rx, ++ bcast_tx.clone(), ++ metadata_tx.clone(), ++ payment_tx.clone(), ++ shutdown_listen, ++ )); ++ info!("db writer created"); ++ ++ // create a nip-05 verifier thread; if enabled. ++ if settings.verified_users.mode != VerifiedUsersMode::Disabled { ++ let verifier_opt = nip05::Verifier::new( ++ repo.clone(), ++ metadata_rx, ++ bcast_tx.clone(), ++ settings.clone(), ++ ); ++ if let Ok(mut v) = verifier_opt { ++ if verified_users_active { ++ tokio::task::spawn(async move { ++ info!("starting up NIP-05 verifier..."); ++ v.run().await; ++ }); ++ } ++ } ++ } ++ ++ // Create payments thread if pay to relay enabled ++ if settings.pay_to_relay.enabled { ++ let payment_opt = payment::Payment::new( ++ repo.clone(), ++ payment_tx.clone(), ++ payment_rx, ++ bcast_tx.clone(), ++ settings.clone(), ++ ); ++ if let Ok(mut p) = payment_opt { ++ tokio::task::spawn(async move { ++ info!("starting payment process ..."); ++ p.run().await; ++ }); ++ } ++ } ++ ++ // listen for (external to tokio) shutdown request ++ let controlled_shutdown = invoke_shutdown.clone(); ++ tokio::spawn(async move { ++ info!("control message listener started"); ++ match shutdown_rx.recv() { ++ Ok(()) => { ++ info!("control message requesting shutdown"); ++ controlled_shutdown.send(()).ok(); ++ } ++ Err(std::sync::mpsc::RecvError) => { ++ trace!("shutdown requestor is disconnected (this is normal)"); ++ } ++ }; ++ }); ++ // listen for ctrl-c interruupts ++ let ctrl_c_shutdown = invoke_shutdown.clone(); ++ // listener for webserver shutdown ++ let webserver_shutdown_listen = invoke_shutdown.subscribe(); ++ ++ tokio::spawn(async move { ++ tokio::signal::ctrl_c().await.unwrap(); ++ info!("shutting down due to SIGINT (main)"); ++ ctrl_c_shutdown.send(()).ok(); ++ }); ++ // spawn a task to check the pool size. ++ //let pool_monitor = pool.clone(); ++ //tokio::spawn(async move {db::monitor_pool("reader", pool_monitor).await;}); ++ ++ // Read in the favicon if it exists ++ let favicon = settings.info.favicon.as_ref().and_then(|x| { ++ info!("reading favicon..."); ++ file_bytes(x).ok() ++ }); ++ ++ // A `Service` is needed for every connection, so this ++ // creates one from our `handle_request` function. ++ let make_svc = make_service_fn(|conn: &AddrStream| { ++ let repo = repo.clone(); ++ let remote_addr = conn.remote_addr(); ++ let bcast = bcast_tx.clone(); ++ let event = event_tx.clone(); ++ let payment_tx = payment_tx.clone(); ++ let stop = invoke_shutdown.clone(); ++ let settings = settings.clone(); ++ let favicon = favicon.clone(); ++ let registry = registry.clone(); ++ let metrics = metrics.clone(); ++ async move { ++ // service_fn converts our function into a `Service` ++ Ok::<_, Infallible>(service_fn(move |request: Request| { ++ handle_web_request( ++ request, ++ repo.clone(), ++ settings.clone(), ++ remote_addr, ++ bcast.clone(), ++ event.clone(), ++ payment_tx.clone(), ++ stop.subscribe(), ++ favicon.clone(), ++ registry.clone(), ++ metrics.clone(), ++ ) ++ })) ++ } ++ }); ++ let server = Server::bind(&socket_addr) ++ .serve(make_svc) ++ .with_graceful_shutdown(ctrl_c_or_signal(webserver_shutdown_listen)); ++ // run hyper in this thread. This is why the thread does not return. ++ if let Err(e) = server.await { ++ eprintln!("server error: {e}"); ++ } ++ }); ++ Ok(()) ++} ++ ++/// Nostr protocol messages from a client ++#[derive(Deserialize, Serialize, Clone, PartialEq, Eq, Debug)] ++#[serde(untagged)] ++pub enum NostrMessage { ++ /// `EVENT` and `AUTH` messages ++ EventMsg(EventCmd), ++ /// A `REQ` message ++ SubMsg(Subscription), ++ /// A `CLOSE` message ++ CloseMsg(CloseCmd), ++} ++ ++/// Convert Message to `NostrMessage` ++fn convert_to_msg(msg: &str, max_bytes: Option) -> Result { ++ let parsed_res: Result = ++ serde_json::from_str(msg).map_err(std::convert::Into::into); ++ match parsed_res { ++ Ok(m) => { ++ if let NostrMessage::SubMsg(_) = m { ++ // note; this only prints the first 16k of a REQ and then truncates. ++ trace!("REQ: {:?}", msg); ++ }; ++ if let NostrMessage::EventMsg(_) = m { ++ if let Some(max_size) = max_bytes { ++ // check length, ensure that some max size is set. ++ if msg.len() > max_size && max_size > 0 { ++ return Err(Error::EventMaxLengthError(msg.len())); ++ } ++ } ++ } ++ Ok(m) ++ } ++ Err(e) => { ++ trace!("proto parse error: {:?}", e); ++ trace!("parse error on message: {:?}", msg.trim()); ++ Err(Error::ProtoParseError) ++ } ++ } ++} ++ ++/// Turn a string into a NOTICE message ready to send over a `WebSocket` ++fn make_notice_message(notice: &Notice) -> Message { ++ let json = match notice { ++ Notice::Message(ref msg) => json!(["NOTICE", msg]), ++ Notice::EventResult(ref res) => json!(["OK", res.id, res.status.to_bool(), res.msg]), ++ Notice::AuthChallenge(ref challenge) => json!(["AUTH", challenge]), ++ }; ++ ++ Message::text(json.to_string()) ++} ++ ++fn allowed_to_send(event_str: &str, conn: &conn::ClientConn, settings: &Settings) -> bool { ++ // TODO: pass in kind so that we can avoid deserialization for most events ++ if settings.authorization.nip42_dms { ++ match serde_json::from_str::(event_str) { ++ Ok(event) => { ++ if event.kind == 4 || event.kind == 44 || event.kind == 1059 { ++ match (conn.auth_pubkey(), event.tag_values_by_name("p").first()) { ++ (Some(auth_pubkey), Some(recipient_pubkey)) => { ++ recipient_pubkey == auth_pubkey || &event.pubkey == auth_pubkey ++ } ++ (_, _) => false, ++ } ++ } else { ++ true ++ } ++ } ++ Err(_) => false, ++ } ++ } else { ++ true ++ } ++} ++ ++struct ClientInfo { ++ remote_ip: String, ++ user_agent: Option, ++ origin: Option, ++} ++ ++/// Handle new client connections. This runs through an event loop ++/// for all client communication. ++#[allow(clippy::too_many_arguments)] ++async fn nostr_server( ++ repo: Arc, ++ client_info: ClientInfo, ++ settings: Settings, ++ mut ws_stream: WebSocketStream, ++ broadcast: Sender, ++ event_tx: mpsc::Sender, ++ mut shutdown: Receiver<()>, ++ metrics: NostrMetrics, ++) { ++ // the time this websocket nostr server started ++ let orig_start = Instant::now(); ++ // get a broadcast channel for clients to communicate on ++ let mut bcast_rx = broadcast.subscribe(); ++ // Track internal client state ++ let mut conn = conn::ClientConn::new(client_info.remote_ip); ++ // subscription creation rate limiting ++ let mut sub_lim_opt = None; ++ // 100ms jitter when the rate limiter returns ++ let jitter = Jitter::up_to(Duration::from_millis(100)); ++ let sub_per_min_setting = settings.limits.subscriptions_per_min; ++ if let Some(sub_per_min) = sub_per_min_setting { ++ if sub_per_min > 0 { ++ trace!("Rate limits for sub creation ({}/min)", sub_per_min); ++ let quota_time = core::num::NonZeroU32::new(sub_per_min).unwrap(); ++ let quota = Quota::per_minute(quota_time); ++ sub_lim_opt = Some(RateLimiter::direct(quota)); ++ } ++ } ++ // Use the remote IP as the client identifier ++ let cid = conn.get_client_prefix(); ++ // Create a channel for receiving query results from the database. ++ // we will send out the tx handle to any query we generate. ++ // this has capacity for some of the larger requests we see, which ++ // should allow the DB thread to release the handle earlier. ++ let (query_tx, mut query_rx) = mpsc::channel::(20_000); ++ // Create channel for receiving NOTICEs ++ let (notice_tx, mut notice_rx) = mpsc::channel::(128); ++ ++ // last time this client sent data (message, ping, etc.) ++ let mut last_message_time = Instant::now(); ++ ++ // ping interval (every 5 minutes) ++ let default_ping_dur = Duration::from_secs(settings.network.ping_interval_seconds.into()); ++ ++ // disconnect after 20 minutes without a ping response or event. ++ let max_quiet_time = Duration::from_secs(60 * 20); ++ ++ let start = tokio::time::Instant::now() + default_ping_dur; ++ let mut ping_interval = tokio::time::interval_at(start, default_ping_dur); ++ ++ // maintain a hashmap of a oneshot channel for active subscriptions. ++ // when these subscriptions are cancelled, make a message ++ // available to the executing query so it knows to stop. ++ let mut running_queries: HashMap> = HashMap::new(); ++ // for stats, keep track of how many events the client published, ++ // and how many it received from queries. ++ let mut client_published_event_count: usize = 0; ++ let mut client_received_event_count: usize = 0; ++ ++ let unspec = "".to_string(); ++ info!("new client connection (cid: {}, ip: {:?})", cid, conn.ip()); ++ let origin = client_info.origin.as_ref().unwrap_or(&unspec); ++ let user_agent = client_info.user_agent.as_ref().unwrap_or(&unspec); ++ info!( ++ "cid: {}, origin: {:?}, user-agent: {:?}", ++ cid, origin, user_agent ++ ); ++ ++ // Measure connections ++ metrics.connections.inc(); ++ ++ if settings.authorization.nip42_auth { ++ conn.generate_auth_challenge(); ++ if let Some(challenge) = conn.auth_challenge() { ++ ws_stream ++ .send(make_notice_message(&Notice::AuthChallenge( ++ challenge.to_string(), ++ ))) ++ .await ++ .ok(); ++ } ++ } ++ ++ loop { ++ tokio::select! { ++ _ = shutdown.recv() => { ++ metrics.disconnects.with_label_values(&["shutdown"]).inc(); ++ info!("Close connection down due to shutdown, client: {}, ip: {:?}, connected: {:?}", cid, conn.ip(), orig_start.elapsed()); ++ // server shutting down, exit loop ++ break; ++ }, ++ _ = ping_interval.tick() => { ++ // check how long since we talked to client ++ // if it has been too long, disconnect ++ if last_message_time.elapsed() > max_quiet_time { ++ debug!("ending connection due to lack of client ping response"); ++ metrics.disconnects.with_label_values(&["timeout"]).inc(); ++ break; ++ } ++ // Send a ping ++ ws_stream.send(Message::Ping(Vec::new())).await.ok(); ++ }, ++ Some(notice_msg) = notice_rx.recv() => { ++ ws_stream.send(make_notice_message(¬ice_msg)).await.ok(); ++ }, ++ Some(query_result) = query_rx.recv() => { ++ // database informed us of a query result we asked for ++ let subesc = query_result.sub_id.replace('"', ""); ++ if query_result.event == "EOSE" { ++ let send_str = format!("[\"EOSE\",\"{subesc}\"]"); ++ ws_stream.send(Message::Text(send_str)).await.ok(); ++ } else if allowed_to_send(&query_result.event, &conn, &settings) { ++ metrics.sent_events.with_label_values(&["db"]).inc(); ++ client_received_event_count += 1; ++ // send a result ++ let send_str = format!("[\"EVENT\",\"{}\",{}]", subesc, &query_result.event); ++ ws_stream.send(Message::Text(send_str)).await.ok(); ++ } ++ }, ++ // TODO: consider logging the LaggedRecv error ++ Ok(global_event) = bcast_rx.recv() => { ++ // an event has been broadcast to all clients ++ // first check if there is a subscription for this event. ++ for (s, sub) in conn.subscriptions() { ++ if !sub.interested_in_event(&global_event) { ++ continue; ++ } ++ // TODO: serialize at broadcast time, instead of ++ // once for each consumer. ++ if let Ok(event_str) = serde_json::to_string(&global_event) { ++ if allowed_to_send(&event_str, &conn, &settings) { ++ // create an event response and send it ++ trace!("sub match for client: {}, sub: {:?}, event: {:?}", ++ cid, s, ++ global_event.get_event_id_prefix()); ++ let subesc = s.replace('"', ""); ++ metrics.sent_events.with_label_values(&["realtime"]).inc(); ++ ws_stream.send(Message::Text(format!("[\"EVENT\",\"{subesc}\",{event_str}]"))).await.ok(); ++ } ++ } else { ++ warn!("could not serialize event: {:?}", global_event.get_event_id_prefix()); ++ } ++ } ++ }, ++ ws_next = ws_stream.next() => { ++ // update most recent message time for client ++ last_message_time = Instant::now(); ++ // Consume text messages from the client, parse into Nostr messages. ++ let nostr_msg = match ws_next { ++ Some(Ok(Message::Text(m))) => { ++ convert_to_msg(&m,settings.limits.max_event_bytes) ++ }, ++ Some(Ok(Message::Binary(_))) => { ++ ws_stream.send( ++ make_notice_message(&Notice::message("binary messages are not accepted".into()))).await.ok(); ++ continue; ++ }, ++ Some(Ok(Message::Ping(_) | Message::Pong(_))) => { ++ // get a ping/pong, ignore. tungstenite will ++ // send responses automatically. ++ continue; ++ }, ++ Some(Err(WsError::Capacity(MessageTooLong{size, max_size}))) => { ++ ws_stream.send( ++ make_notice_message(&Notice::message(format!("message too large ({size} > {max_size})")))).await.ok(); ++ continue; ++ }, ++ None | ++ Some(Ok(Message::Close(_)) | ++ Err(WsError::AlreadyClosed | WsError::ConnectionClosed | ++ WsError::Protocol(tungstenite::error::ProtocolError::ResetWithoutClosingHandshake))) ++ => { ++ debug!("websocket close from client (cid: {}, ip: {:?})",cid, conn.ip()); ++ metrics.disconnects.with_label_values(&["normal"]).inc(); ++ break; ++ }, ++ Some(Err(WsError::Io(e))) => { ++ // IO errors are considered fatal ++ warn!("IO error (cid: {}, ip: {:?}): {:?}", cid, conn.ip(), e); ++ metrics.disconnects.with_label_values(&["error"]).inc(); ++ ++ break; ++ } ++ x => { ++ // default condition on error is to close the client connection ++ info!("unknown error (cid: {}, ip: {:?}): {:?} (closing conn)", cid, conn.ip(), x); ++ metrics.disconnects.with_label_values(&["error"]).inc(); ++ ++ break; ++ } ++ }; ++ ++ // convert ws_next into proto_next ++ match nostr_msg { ++ Ok(NostrMessage::EventMsg(ec)) => { ++ // An EventCmd needs to be validated to be converted into an Event ++ // handle each type of message ++ let evid = ec.event_id().to_owned(); ++ let parsed : Result = Result::::from(ec); ++ metrics.cmd_event.inc(); ++ match parsed { ++ Ok(WrappedEvent(e)) => { ++ metrics.cmd_event.inc(); ++ let id_prefix:String = e.id.chars().take(8).collect(); ++ debug!("successfully parsed/validated event: {:?} (cid: {}, kind: {})", id_prefix, cid, e.kind); ++ // check if event is expired ++ if e.is_expired() { ++ let notice = Notice::invalid(e.id, "The event has already expired"); ++ ws_stream.send(make_notice_message(¬ice)).await.ok(); ++ // check if the event is too far in the future. ++ } else if e.is_valid_timestamp(settings.options.reject_future_seconds) { ++ // Write this to the database. ++ let auth_pubkey = conn.auth_pubkey().and_then(|pubkey| hex::decode(pubkey).ok()); ++ let submit_event = SubmittedEvent { ++ event: e.clone(), ++ notice_tx: notice_tx.clone(), ++ source_ip: conn.ip().to_string(), ++ origin: client_info.origin.clone(), ++ user_agent: client_info.user_agent.clone(), ++ auth_pubkey }; ++ event_tx.send(submit_event).await.ok(); ++ client_published_event_count += 1; ++ } else { ++ info!("client: {} sent a far future-dated event", cid); ++ if let Some(fut_sec) = settings.options.reject_future_seconds { ++ let msg = format!("The event created_at field is out of the acceptable range (+{fut_sec}sec) for this relay."); ++ let notice = Notice::invalid(e.id, &msg); ++ ws_stream.send(make_notice_message(¬ice)).await.ok(); ++ } ++ } ++ }, ++ Ok(WrappedAuth(event)) => { ++ metrics.cmd_auth.inc(); ++ if settings.authorization.nip42_auth { ++ let id_prefix:String = event.id.chars().take(8).collect(); ++ debug!("successfully parsed auth: {:?} (cid: {})", id_prefix, cid); ++ match &settings.info.relay_url { ++ None => { ++ error!("AUTH command received, but relay_url is not set in the config file (cid: {})", cid); ++ }, ++ Some(relay) => { ++ match conn.authenticate(&event, relay) { ++ Ok(_) => { ++ let pubkey = match conn.auth_pubkey() { ++ Some(k) => k.chars().take(8).collect(), ++ None => "".to_string(), ++ }; ++ info!("client is authenticated: (cid: {}, pubkey: {:?})", cid, pubkey); ++ }, ++ Err(e) => { ++ info!("authentication error: {} (cid: {})", e, cid); ++ ws_stream.send(make_notice_message(&Notice::restricted(event.id, format!("authentication error: {e}").as_str()))).await.ok(); ++ }, ++ } ++ } ++ } ++ } else { ++ let e = CommandUnknownError; ++ info!("client sent an invalid event (cid: {})", cid); ++ ws_stream.send(make_notice_message(&Notice::invalid(evid, &format!("{e}")))).await.ok(); ++ } ++ }, ++ Err(e) => { ++ metrics.cmd_event.inc(); ++ info!("client sent an invalid event (cid: {})", cid); ++ ws_stream.send(make_notice_message(&Notice::invalid(evid, &format!("{e}")))).await.ok(); ++ } ++ } ++ }, ++ Ok(NostrMessage::SubMsg(s)) => { ++ debug!("subscription requested (cid: {}, sub: {:?})", cid, s.id); ++ // subscription handling consists of: ++ // * check for rate limits ++ // * registering the subscription so future events can be matched ++ // * making a channel to cancel to request later ++ // * sending a request for a SQL query ++ // Do nothing if the sub already exists. ++ if conn.has_subscription(&s) { ++ info!("client sent duplicate subscription, ignoring (cid: {}, sub: {:?})", cid, s.id); ++ } else { ++ metrics.cmd_req.inc(); ++ if let Some(ref lim) = sub_lim_opt { ++ lim.until_ready_with_jitter(jitter).await; ++ } ++ let (abandon_query_tx, abandon_query_rx) = oneshot::channel::<()>(); ++ match conn.subscribe(s.clone()) { ++ Ok(()) => { ++ // when we insert, if there was a previous query running with the same name, cancel it. ++ if let Some(previous_query) = running_queries.insert(s.id.clone(), abandon_query_tx) { ++ previous_query.send(()).ok(); ++ } ++ if s.needs_historical_events() { ++ // start a database query. this spawns a blocking database query on a worker thread. ++ repo.query_subscription(s, cid.clone(), query_tx.clone(), abandon_query_rx).await.ok(); ++ } ++ }, ++ Err(e) => { ++ info!("Subscription error: {} (cid: {}, sub: {:?})", e, cid, s.id); ++ ws_stream.send(make_notice_message(&Notice::message(format!("Subscription error: {e}")))).await.ok(); ++ } ++ } ++ } ++ }, ++ Ok(NostrMessage::CloseMsg(cc)) => { ++ // closing a request simply removes the subscription. ++ let parsed : Result = Result::::from(cc); ++ if let Ok(c) = parsed { ++ metrics.cmd_close.inc(); ++ // check if a query is currently ++ // running, and remove it if so. ++ let stop_tx = running_queries.remove(&c.id); ++ if let Some(tx) = stop_tx { ++ tx.send(()).ok(); ++ } ++ // stop checking new events against ++ // the subscription ++ conn.unsubscribe(&c); ++ } else { ++ info!("invalid command ignored"); ++ ws_stream.send(make_notice_message(&Notice::message("could not parse command".into()))).await.ok(); ++ } ++ }, ++ Err(Error::ConnError) => { ++ debug!("got connection close/error, disconnecting cid: {}, ip: {:?}",cid, conn.ip()); ++ break; ++ } ++ Err(Error::EventMaxLengthError(s)) => { ++ info!("client sent command larger ({} bytes) than max size (cid: {})", s, cid); ++ ws_stream.send(make_notice_message(&Notice::message("event exceeded max size".into()))).await.ok(); ++ }, ++ Err(Error::ProtoParseError) => { ++ info!("client sent command that could not be parsed (cid: {})", cid); ++ ws_stream.send(make_notice_message(&Notice::message("could not parse command".into()))).await.ok(); ++ }, ++ Err(e) => { ++ info!("got non-fatal error from client (cid: {}, error: {:?}", cid, e); ++ }, ++ } ++ }, ++ } ++ } ++ // connection cleanup - ensure any still running queries are terminated. ++ for (_, stop_tx) in running_queries { ++ stop_tx.send(()).ok(); ++ } ++ info!( ++ "stopping client connection (cid: {}, ip: {:?}, sent: {} events, recv: {} events, connected: {:?})", ++ cid, ++ conn.ip(), ++ client_published_event_count, ++ client_received_event_count, ++ orig_start.elapsed() ++ ); ++} ++ ++#[derive(Clone)] ++pub struct NostrMetrics { ++ pub query_sub: Histogram, // response time of successful subscriptions ++ pub query_db: Histogram, // individual database query execution time ++ pub db_connections: IntGauge, // database connections in use ++ pub write_events: Histogram, // response time of event writes ++ pub sent_events: IntCounterVec, // count of events sent to clients ++ pub connections: IntCounter, // count of websocket connections ++ pub disconnects: IntCounterVec, // client disconnects ++ pub query_aborts: IntCounterVec, // count of queries aborted by server ++ pub cmd_req: IntCounter, // count of REQ commands received ++ pub cmd_event: IntCounter, // count of EVENT commands received ++ pub cmd_close: IntCounter, // count of CLOSE commands received ++ pub cmd_auth: IntCounter, // count of AUTH commands received ++} +diff --git a/src/subscription.rs b/src/subscription.rs +new file mode 100644 +index 0000000..17aaceb +--- /dev/null ++++ b/src/subscription.rs +@@ -0,0 +1,650 @@ ++//! Subscription and filter parsing ++use crate::error::Result; ++use crate::event::Event; ++use serde::de::Unexpected; ++use serde::ser::SerializeMap; ++use serde::{Deserialize, Deserializer, Serialize, Serializer}; ++use serde_json::Value; ++use std::collections::HashMap; ++use std::collections::HashSet; ++ ++/// Subscription identifier and set of request filters ++#[derive(Serialize, PartialEq, Eq, Debug, Clone)] ++pub struct Subscription { ++ pub id: String, ++ pub filters: Vec, ++} ++ ++/// Filter for requests ++/// ++/// Corresponds to client-provided subscription request elements. Any ++/// element can be present if it should be used in filtering, or ++/// absent ([`None`]) if it should be ignored. ++#[derive(PartialEq, Eq, Debug, Clone)] ++pub struct ReqFilter { ++ /// Event hashes ++ pub ids: Option>, ++ /// Event kinds ++ pub kinds: Option>, ++ /// Events published after this time ++ pub since: Option, ++ /// Events published before this time ++ pub until: Option, ++ /// List of author public keys ++ pub authors: Option>, ++ /// Limit number of results ++ pub limit: Option, ++ /// Set of tags ++ pub tags: Option>>, ++ /// Force no matches due to malformed data ++ // we can't represent it in the req filter, so we don't want to ++ // erroneously match. This basically indicates the req tried to ++ // do something invalid. ++ pub force_no_match: bool, ++} ++ ++impl Serialize for ReqFilter { ++ fn serialize(&self, serializer: S) -> Result ++ where ++ S: Serializer, ++ { ++ let mut map = serializer.serialize_map(None)?; ++ if let Some(ids) = &self.ids { ++ map.serialize_entry("ids", &ids)?; ++ } ++ if let Some(kinds) = &self.kinds { ++ map.serialize_entry("kinds", &kinds)?; ++ } ++ if let Some(until) = &self.until { ++ map.serialize_entry("until", until)?; ++ } ++ if let Some(since) = &self.since { ++ map.serialize_entry("since", since)?; ++ } ++ if let Some(limit) = &self.limit { ++ map.serialize_entry("limit", limit)?; ++ } ++ if let Some(authors) = &self.authors { ++ map.serialize_entry("authors", &authors)?; ++ } ++ // serialize tags ++ if let Some(tags) = &self.tags { ++ for (k, v) in tags { ++ let vals: Vec<&String> = v.iter().collect(); ++ map.serialize_entry(&format!("#{k}"), &vals)?; ++ } ++ } ++ map.end() ++ } ++} ++ ++impl<'de> Deserialize<'de> for ReqFilter { ++ fn deserialize(deserializer: D) -> Result ++ where ++ D: Deserializer<'de>, ++ { ++ let received: Value = Deserialize::deserialize(deserializer)?; ++ let filter = received.as_object().ok_or_else(|| { ++ serde::de::Error::invalid_type( ++ Unexpected::Other("reqfilter is not an object"), ++ &"a json object", ++ ) ++ })?; ++ let mut rf = ReqFilter { ++ ids: None, ++ kinds: None, ++ since: None, ++ until: None, ++ authors: None, ++ limit: None, ++ tags: None, ++ force_no_match: false, ++ }; ++ let empty_string = "".into(); ++ let mut ts = None; ++ // iterate through each key, and assign values that exist ++ for (key, val) in filter { ++ // ids ++ if key == "ids" { ++ let raw_ids: Option> = Deserialize::deserialize(val).ok(); ++ if let Some(a) = raw_ids.as_ref() { ++ if a.contains(&empty_string) { ++ return Err(serde::de::Error::invalid_type( ++ Unexpected::Other("prefix matches must not be empty strings"), ++ &"a json object", ++ )); ++ } ++ } ++ rf.ids = raw_ids; ++ } else if key == "kinds" { ++ rf.kinds = Deserialize::deserialize(val).ok(); ++ } else if key == "since" { ++ rf.since = Deserialize::deserialize(val).ok(); ++ } else if key == "until" { ++ rf.until = Deserialize::deserialize(val).ok(); ++ } else if key == "limit" { ++ rf.limit = Deserialize::deserialize(val).ok(); ++ } else if key == "authors" { ++ let raw_authors: Option> = Deserialize::deserialize(val).ok(); ++ if let Some(a) = raw_authors.as_ref() { ++ if a.contains(&empty_string) { ++ return Err(serde::de::Error::invalid_type( ++ Unexpected::Other("prefix matches must not be empty strings"), ++ &"a json object", ++ )); ++ } ++ } ++ rf.authors = raw_authors; ++ } else if key.starts_with('#') && key.len() > 1 && val.is_array() { ++ if let Some(tag_search) = tag_search_char_from_filter(key) { ++ if ts.is_none() { ++ // Initialize the tag if necessary ++ ts = Some(HashMap::new()); ++ } ++ if let Some(m) = ts.as_mut() { ++ let tag_vals: Option> = Deserialize::deserialize(val).ok(); ++ if let Some(v) = tag_vals { ++ let hs = v.into_iter().collect::>(); ++ m.insert(tag_search.to_owned(), hs); ++ } ++ }; ++ } else { ++ // tag search that is multi-character, don't add to subscription ++ rf.force_no_match = true; ++ continue; ++ } ++ } ++ } ++ rf.tags = ts; ++ Ok(rf) ++ } ++} ++ ++/// Attempt to form a single-char identifier from a tag search filter ++fn tag_search_char_from_filter(tagname: &str) -> Option { ++ let tagname_nohash = &tagname[1..]; ++ // We return the tag character if and only if the tagname consists ++ // of a single char. ++ let mut tagnamechars = tagname_nohash.chars(); ++ let firstchar = tagnamechars.next(); ++ match firstchar { ++ Some(_) => { ++ // check second char ++ if tagnamechars.next().is_none() { ++ firstchar ++ } else { ++ None ++ } ++ } ++ None => None, ++ } ++} ++ ++impl<'de> Deserialize<'de> for Subscription { ++ /// Custom deserializer for subscriptions, which have a more ++ /// complex structure than the other message types. ++ fn deserialize(deserializer: D) -> Result ++ where ++ D: Deserializer<'de>, ++ { ++ let mut v: Value = Deserialize::deserialize(deserializer)?; ++ // this should be a 3-or-more element array. ++ // verify the first element is a String, REQ ++ // get the subscription from the second element. ++ // convert each of the remaining objects into filters ++ ++ // check for array ++ let va = v ++ .as_array_mut() ++ .ok_or_else(|| serde::de::Error::custom("not array"))?; ++ ++ // check length ++ if va.len() < 3 { ++ return Err(serde::de::Error::custom("not enough fields")); ++ } ++ let mut i = va.iter_mut(); ++ // get command ("REQ") and ensure it is a string ++ let req_cmd_str: serde_json::Value = i.next().unwrap().take(); ++ let req = req_cmd_str ++ .as_str() ++ .ok_or_else(|| serde::de::Error::custom("first element of request was not a string"))?; ++ if req != "REQ" { ++ return Err(serde::de::Error::custom("missing REQ command")); ++ } ++ ++ // ensure sub id is a string ++ let sub_id_str: serde_json::Value = i.next().unwrap().take(); ++ let sub_id = sub_id_str ++ .as_str() ++ .ok_or_else(|| serde::de::Error::custom("missing subscription id"))?; ++ ++ let mut filters = vec![]; ++ for fv in i { ++ let f: ReqFilter = serde_json::from_value(fv.take()) ++ .map_err(|_| serde::de::Error::custom("could not parse filter"))?; ++ // create indexes ++ filters.push(f); ++ } ++ filters.dedup(); ++ Ok(Subscription { ++ id: sub_id.to_owned(), ++ filters, ++ }) ++ } ++} ++ ++impl Subscription { ++ /// Get a copy of the subscription identifier. ++ #[must_use] ++ pub fn get_id(&self) -> String { ++ self.id.clone() ++ } ++ ++ /// Determine if any filter is requesting historical (database) ++ /// queries. If every filter has limit:0, we do not need to query the DB. ++ #[must_use] ++ pub fn needs_historical_events(&self) -> bool { ++ self.filters.iter().any(|f| f.limit != Some(0)) ++ } ++ ++ /// Determine if this subscription matches a given [`Event`]. Any ++ /// individual filter match is sufficient. ++ #[must_use] ++ pub fn interested_in_event(&self, event: &Event) -> bool { ++ for f in &self.filters { ++ if f.interested_in_event(event) { ++ return true; ++ } ++ } ++ false ++ } ++} ++ ++fn prefix_match(prefixes: &[String], target: &str) -> bool { ++ for prefix in prefixes { ++ if target.starts_with(prefix) { ++ return true; ++ } ++ } ++ // none matched ++ false ++} ++ ++impl ReqFilter { ++ fn ids_match(&self, event: &Event) -> bool { ++ self.ids ++ .as_ref() ++ .map_or(true, |vs| prefix_match(vs, &event.id)) ++ } ++ ++ fn authors_match(&self, event: &Event) -> bool { ++ self.authors ++ .as_ref() ++ .map_or(true, |vs| prefix_match(vs, &event.pubkey)) ++ } ++ ++ fn delegated_authors_match(&self, event: &Event) -> bool { ++ if let Some(delegated_pubkey) = &event.delegated_by { ++ self.authors ++ .as_ref() ++ .map_or(true, |vs| prefix_match(vs, delegated_pubkey)) ++ } else { ++ false ++ } ++ } ++ ++ fn tag_match(&self, event: &Event) -> bool { ++ // get the hashset from the filter. ++ if let Some(map) = &self.tags { ++ for (key, val) in map.iter() { ++ let tag_match = event.generic_tag_val_intersect(*key, val); ++ // if there is no match for this tag, the match fails. ++ if !tag_match { ++ return false; ++ } ++ // if there was a match, we move on to the next one. ++ } ++ } ++ // if the tag map is empty, the match succeeds (there was no filter) ++ true ++ } ++ ++ /// Check if this filter either matches, or does not care about the kind. ++ fn kind_match(&self, kind: u64) -> bool { ++ self.kinds.as_ref().map_or(true, |ks| ks.contains(&kind)) ++ } ++ ++ /// Determine if all populated fields in this filter match the provided event. ++ #[must_use] ++ pub fn interested_in_event(&self, event: &Event) -> bool { ++ // self.id.as_ref().map(|v| v == &event.id).unwrap_or(true) ++ self.ids_match(event) ++ && self.since.map_or(true, |t| event.created_at >= t) ++ && self.until.map_or(true, |t| event.created_at <= t) ++ && self.kind_match(event.kind) ++ && (self.authors_match(event) || self.delegated_authors_match(event)) ++ && self.tag_match(event) ++ && !self.force_no_match ++ } ++} ++ ++#[cfg(test)] ++mod tests { ++ use super::*; ++ ++ #[test] ++ fn empty_request_parse() -> Result<()> { ++ let raw_json = "[\"REQ\",\"some-id\",{}]"; ++ let s: Subscription = serde_json::from_str(raw_json)?; ++ assert_eq!(s.id, "some-id"); ++ assert_eq!(s.filters.len(), 1); ++ assert_eq!(s.filters.get(0).unwrap().authors, None); ++ Ok(()) ++ } ++ ++ #[test] ++ fn incorrect_header() { ++ let raw_json = "[\"REQUEST\",\"some-id\",\"{}\"]"; ++ assert!(serde_json::from_str::(raw_json).is_err()); ++ } ++ ++ #[test] ++ fn req_missing_filters() { ++ let raw_json = "[\"REQ\",\"some-id\"]"; ++ assert!(serde_json::from_str::(raw_json).is_err()); ++ } ++ ++ #[test] ++ fn req_empty_authors_prefix() { ++ let raw_json = "[\"REQ\",\"some-id\",{\"authors\": [\"\"]}]"; ++ assert!(serde_json::from_str::(raw_json).is_err()); ++ } ++ ++ #[test] ++ fn req_empty_ids_prefix() { ++ let raw_json = "[\"REQ\",\"some-id\",{\"ids\": [\"\"]}]"; ++ assert!(serde_json::from_str::(raw_json).is_err()); ++ } ++ ++ #[test] ++ fn req_empty_ids_prefix_mixed() { ++ let raw_json = "[\"REQ\",\"some-id\",{\"ids\": [\"\",\"aaa\"]}]"; ++ assert!(serde_json::from_str::(raw_json).is_err()); ++ } ++ ++ #[test] ++ fn legacy_filter() { ++ // legacy field in filter ++ let raw_json = "[\"REQ\",\"some-id\",{\"kind\": 3}]"; ++ assert!(serde_json::from_str::(raw_json).is_ok()); ++ } ++ ++ #[test] ++ fn dupe_filter() -> Result<()> { ++ let raw_json = r#"["REQ","some-id",{"kinds": [1984]}, {"kinds": [1984]}]"#; ++ let s: Subscription = serde_json::from_str(raw_json)?; ++ assert_eq!(s.filters.len(), 1); ++ Ok(()) ++ } ++ ++ #[test] ++ fn dupe_filter_many() -> Result<()> { ++ // duplicate filters in different order ++ let raw_json = r#"["REQ","some-id",{"kinds":[1984]},{"kinds":[1984]},{"kinds":[1984]},{"kinds":[1984]}]"#; ++ let s: Subscription = serde_json::from_str(raw_json)?; ++ assert_eq!(s.filters.len(), 1); ++ Ok(()) ++ } ++ ++ #[test] ++ fn author_filter() -> Result<()> { ++ let raw_json = r#"["REQ","some-id",{"authors": ["test-author-id"]}]"#; ++ let s: Subscription = serde_json::from_str(raw_json)?; ++ assert_eq!(s.id, "some-id"); ++ assert_eq!(s.filters.len(), 1); ++ let first_filter = s.filters.get(0).unwrap(); ++ assert_eq!( ++ first_filter.authors, ++ Some(vec!("test-author-id".to_owned())) ++ ); ++ Ok(()) ++ } ++ ++ #[test] ++ fn interest_author_prefix_match() -> Result<()> { ++ // subscription with a filter for ID ++ let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"authors": ["abc"]}]"#)?; ++ let e = Event { ++ id: "foo".to_owned(), ++ pubkey: "abcd".to_owned(), ++ delegated_by: None, ++ created_at: 0, ++ kind: 0, ++ tags: Vec::new(), ++ content: "".to_owned(), ++ sig: "".to_owned(), ++ tagidx: None, ++ }; ++ assert!(s.interested_in_event(&e)); ++ Ok(()) ++ } ++ ++ #[test] ++ fn interest_id_prefix_match() -> Result<()> { ++ // subscription with a filter for ID ++ let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"]}]"#)?; ++ let e = Event { ++ id: "abcd".to_owned(), ++ pubkey: "".to_owned(), ++ delegated_by: None, ++ created_at: 0, ++ kind: 0, ++ tags: Vec::new(), ++ content: "".to_owned(), ++ sig: "".to_owned(), ++ tagidx: None, ++ }; ++ assert!(s.interested_in_event(&e)); ++ Ok(()) ++ } ++ ++ #[test] ++ fn interest_id_nomatch() -> Result<()> { ++ // subscription with a filter for ID ++ let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"ids": ["xyz"]}]"#)?; ++ let e = Event { ++ id: "abcde".to_owned(), ++ pubkey: "".to_owned(), ++ delegated_by: None, ++ created_at: 0, ++ kind: 0, ++ tags: Vec::new(), ++ content: "".to_owned(), ++ sig: "".to_owned(), ++ tagidx: None, ++ }; ++ assert!(!s.interested_in_event(&e)); ++ Ok(()) ++ } ++ ++ #[test] ++ fn interest_until() -> Result<()> { ++ // subscription with a filter for ID and time ++ let s: Subscription = ++ serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "until": 1000}]"#)?; ++ let e = Event { ++ id: "abc".to_owned(), ++ pubkey: "".to_owned(), ++ delegated_by: None, ++ created_at: 50, ++ kind: 0, ++ tags: Vec::new(), ++ content: "".to_owned(), ++ sig: "".to_owned(), ++ tagidx: None, ++ }; ++ assert!(s.interested_in_event(&e)); ++ Ok(()) ++ } ++ ++ #[test] ++ fn interest_range() -> Result<()> { ++ // subscription with a filter for ID and time ++ let s_in: Subscription = ++ serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 100, "until": 200}]"#)?; ++ let s_before: Subscription = ++ serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 100, "until": 140}]"#)?; ++ let s_after: Subscription = ++ serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 160, "until": 200}]"#)?; ++ let e = Event { ++ id: "abc".to_owned(), ++ pubkey: "".to_owned(), ++ delegated_by: None, ++ created_at: 150, ++ kind: 0, ++ tags: Vec::new(), ++ content: "".to_owned(), ++ sig: "".to_owned(), ++ tagidx: None, ++ }; ++ assert!(s_in.interested_in_event(&e)); ++ assert!(!s_before.interested_in_event(&e)); ++ assert!(!s_after.interested_in_event(&e)); ++ Ok(()) ++ } ++ ++ #[test] ++ fn interest_time_and_id() -> Result<()> { ++ // subscription with a filter for ID and time ++ let s: Subscription = ++ serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 1000}]"#)?; ++ let e = Event { ++ id: "abc".to_owned(), ++ pubkey: "".to_owned(), ++ delegated_by: None, ++ created_at: 50, ++ kind: 0, ++ tags: Vec::new(), ++ content: "".to_owned(), ++ sig: "".to_owned(), ++ tagidx: None, ++ }; ++ assert!(!s.interested_in_event(&e)); ++ Ok(()) ++ } ++ ++ #[test] ++ fn interest_time_and_id2() -> Result<()> { ++ // subscription with a filter for ID and time ++ let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"id":"abc", "since": 1000}]"#)?; ++ let e = Event { ++ id: "abc".to_owned(), ++ pubkey: "".to_owned(), ++ delegated_by: None, ++ created_at: 1001, ++ kind: 0, ++ tags: Vec::new(), ++ content: "".to_owned(), ++ sig: "".to_owned(), ++ tagidx: None, ++ }; ++ assert!(s.interested_in_event(&e)); ++ Ok(()) ++ } ++ ++ #[test] ++ fn interest_id() -> Result<()> { ++ // subscription with a filter for ID ++ let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"id":"abc"}]"#)?; ++ let e = Event { ++ id: "abc".to_owned(), ++ pubkey: "".to_owned(), ++ delegated_by: None, ++ created_at: 0, ++ kind: 0, ++ tags: Vec::new(), ++ content: "".to_owned(), ++ sig: "".to_owned(), ++ tagidx: None, ++ }; ++ assert!(s.interested_in_event(&e)); ++ Ok(()) ++ } ++ ++ #[test] ++ fn authors_single() -> Result<()> { ++ // subscription with a filter for ID ++ let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"authors":["abc"]}]"#)?; ++ let e = Event { ++ id: "123".to_owned(), ++ pubkey: "abc".to_owned(), ++ delegated_by: None, ++ created_at: 0, ++ kind: 0, ++ tags: Vec::new(), ++ content: "".to_owned(), ++ sig: "".to_owned(), ++ tagidx: None, ++ }; ++ assert!(s.interested_in_event(&e)); ++ Ok(()) ++ } ++ ++ #[test] ++ fn authors_multi_pubkey() -> Result<()> { ++ // check for any of a set of authors, against the pubkey ++ let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"authors":["abc", "bcd"]}]"#)?; ++ let e = Event { ++ id: "123".to_owned(), ++ pubkey: "bcd".to_owned(), ++ delegated_by: None, ++ created_at: 0, ++ kind: 0, ++ tags: Vec::new(), ++ content: "".to_owned(), ++ sig: "".to_owned(), ++ tagidx: None, ++ }; ++ assert!(s.interested_in_event(&e)); ++ Ok(()) ++ } ++ ++ #[test] ++ fn authors_multi_no_match() -> Result<()> { ++ // check for any of a set of authors, against the pubkey ++ let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"authors":["abc", "bcd"]}]"#)?; ++ let e = Event { ++ id: "123".to_owned(), ++ pubkey: "xyz".to_owned(), ++ delegated_by: None, ++ created_at: 0, ++ kind: 0, ++ tags: Vec::new(), ++ content: "".to_owned(), ++ sig: "".to_owned(), ++ tagidx: None, ++ }; ++ assert!(!s.interested_in_event(&e)); ++ Ok(()) ++ } ++ ++ #[test] ++ fn serialize_filter() -> Result<()> { ++ let s: Subscription = serde_json::from_str( ++ r##"["REQ","xyz",{"authors":["abc", "bcd"], "since": 10, "until": 20, "limit":100, "#e": ["foo", "bar"], "#d": ["test"]}]"##, ++ )?; ++ let f = s.filters.get(0); ++ let serialized = serde_json::to_string(&f)?; ++ let serialized_wrapped = format!(r##"["REQ", "xyz",{}]"##, serialized); ++ let parsed: Subscription = serde_json::from_str(&serialized_wrapped)?; ++ let parsed_filter = parsed.filters.get(0); ++ if let Some(pf) = parsed_filter { ++ assert_eq!(pf.since, Some(10)); ++ assert_eq!(pf.until, Some(20)); ++ assert_eq!(pf.limit, Some(100)); ++ } else { ++ assert!(false, "filter could not be parsed"); ++ } ++ Ok(()) ++ } ++} +diff --git a/src/utils.rs b/src/utils.rs +new file mode 100644 +index 0000000..eae6846 +--- /dev/null ++++ b/src/utils.rs +@@ -0,0 +1,72 @@ ++//! Common utility functions ++use bech32::FromBase32; ++use std::time::SystemTime; ++use url::Url; ++ ++/// Seconds since 1970. ++#[must_use] ++pub fn unix_time() -> u64 { ++ SystemTime::now() ++ .duration_since(SystemTime::UNIX_EPOCH) ++ .map(|x| x.as_secs()) ++ .unwrap_or(0) ++} ++ ++/// Check if a string contains only hex characters. ++#[must_use] ++pub fn is_hex(s: &str) -> bool { ++ s.chars().all(|x| char::is_ascii_hexdigit(&x)) ++} ++ ++/// Check if string is a nip19 string ++pub fn is_nip19(s: &str) -> bool { ++ s.starts_with("npub") || s.starts_with("note") ++} ++ ++pub fn nip19_to_hex(s: &str) -> Result { ++ let (_hrp, data, _checksum) = bech32::decode(s)?; ++ let data = Vec::::from_base32(&data)?; ++ Ok(hex::encode(data)) ++} ++ ++/// Check if a string contains only lower-case hex chars. ++#[must_use] ++pub fn is_lower_hex(s: &str) -> bool { ++ s.chars().all(|x| { ++ (char::is_ascii_lowercase(&x) || char::is_ascii_digit(&x)) && char::is_ascii_hexdigit(&x) ++ }) ++} ++ ++pub fn host_str(url: &str) -> Option { ++ Url::parse(url) ++ .ok() ++ .and_then(|u| u.host_str().map(|s| s.to_string())) ++} ++ ++#[cfg(test)] ++mod tests { ++ use super::*; ++ ++ #[test] ++ fn lower_hex() { ++ let hexstr = "abcd0123"; ++ assert!(is_lower_hex(hexstr)); ++ } ++ ++ #[test] ++ fn nip19() { ++ let hexkey = "3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d"; ++ let nip19key = "npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6"; ++ assert!(!is_nip19(hexkey)); ++ assert!(is_nip19(nip19key)); ++ } ++ ++ #[test] ++ fn nip19_hex() { ++ let nip19key = "npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6"; ++ let expected = "3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d"; ++ let got = nip19_to_hex(nip19key).unwrap(); ++ ++ assert_eq!(expected, got); ++ } ++} +diff --git a/tests/cli.rs b/tests/cli.rs +new file mode 100644 +index 0000000..694382a +--- /dev/null ++++ b/tests/cli.rs +@@ -0,0 +1,10 @@ ++#[cfg(test)] ++mod tests { ++ use gnostr_relay::cli::CLIArgs; ++ ++ #[test] ++ fn cli_tests() { ++ use clap::CommandFactory; ++ CLIArgs::command().debug_assert(); ++ } ++} +diff --git a/tests/common/mod.rs b/tests/common/mod.rs +new file mode 100644 +index 0000000..b04312f +--- /dev/null ++++ b/tests/common/mod.rs +@@ -0,0 +1,110 @@ ++use anyhow::{anyhow, Result}; ++use gnostr_relay::config; ++use gnostr_relay::server::start_server; ++//use http::{Request, Response}; ++use hyper::{Client, StatusCode, Uri}; ++use std::net::TcpListener; ++use std::sync::atomic::{AtomicU16, Ordering}; ++use std::sync::mpsc as syncmpsc; ++use std::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender}; ++use std::thread; ++use std::thread::JoinHandle; ++use std::time::Duration; ++use tracing::{debug, info}; ++ ++pub struct Relay { ++ pub port: u16, ++ pub handle: JoinHandle<()>, ++ pub shutdown_tx: MpscSender<()>, ++} ++ ++pub fn start_relay() -> Result { ++ // setup tracing ++ let _trace_sub = tracing_subscriber::fmt::try_init(); ++ info!("Starting a new relay"); ++ // replace default settings ++ let mut settings = config::Settings::default(); ++ // identify open port ++ info!("Checking for address..."); ++ let port = get_available_port().unwrap(); ++ info!("Found open port: {}", port); ++ // bind to local interface only ++ settings.network.address = "127.0.0.1".to_owned(); ++ settings.network.port = port; ++ // create an in-memory DB with multiple readers ++ settings.database.in_memory = true; ++ settings.database.min_conn = 4; ++ settings.database.max_conn = 8; ++ let (shutdown_tx, shutdown_rx): (MpscSender<()>, MpscReceiver<()>) = syncmpsc::channel(); ++ let handle = thread::spawn(move || { ++ // server will block the thread it is run on. ++ let _ = start_server(&settings, shutdown_rx); ++ }); ++ // how do we know the relay has finished starting up? ++ Ok(Relay { ++ port, ++ handle, ++ shutdown_tx, ++ }) ++} ++ ++// check if the server is healthy via HTTP request ++async fn server_ready(relay: &Relay) -> Result { ++ let uri: String = format!("http://127.0.0.1:{}/", relay.port); ++ let client = Client::new(); ++ let uri: Uri = uri.parse().unwrap(); ++ let res = client.get(uri).await?; ++ Ok(res.status() == StatusCode::OK) ++} ++ ++pub async fn wait_for_healthy_relay(relay: &Relay) -> Result<()> { ++ // TODO: maximum time to wait for server to become healthy. ++ // give it a little time to start up before we start polling ++ tokio::time::sleep(Duration::from_millis(10)).await; ++ loop { ++ let server_check = server_ready(relay).await; ++ match server_check { ++ Ok(true) => { ++ // server responded with 200-OK. ++ break; ++ } ++ Ok(false) => { ++ // server responded with an error, we're done. ++ return Err(anyhow!("Got non-200-OK from relay")); ++ } ++ Err(_) => { ++ // server is not yet ready, probably connection refused... ++ debug!("Relay not ready, will try again..."); ++ tokio::time::sleep(Duration::from_millis(10)).await; ++ } ++ } ++ } ++ info!("relay is ready"); ++ Ok(()) ++ // simple message sent to web browsers ++ //let mut request = Request::builder() ++ // .uri("https://www.rust-lang.org/") ++ // .header("User-Agent", "my-awesome-agent/1.0"); ++} ++ ++// from https://elliotekj.com/posts/2017/07/25/find-available-tcp-port-rust/ ++// This needed some modification; if multiple tasks all ask for open ports, they will tend to get the same one. ++// instead we should try to try these incrementally/globally. ++ ++static PORT_COUNTER: AtomicU16 = AtomicU16::new(4030); ++ ++fn get_available_port() -> Option { ++ let startsearch = PORT_COUNTER.fetch_add(10, Ordering::SeqCst); ++ if startsearch >= 20000 { ++ // wrap around ++ PORT_COUNTER.store(4030, Ordering::Relaxed); ++ } ++ (startsearch..20000).find(|port| port_is_available(*port)) ++} ++pub fn port_is_available(port: u16) -> bool { ++ info!("checking on port {}", port); ++ match TcpListener::bind(("127.0.0.1", port)) { ++ Ok(_) => true, ++ Err(_) => false, ++ } ++} +diff --git a/tests/conn.rs b/tests/conn.rs +new file mode 100644 +index 0000000..193e4b2 +--- /dev/null ++++ b/tests/conn.rs +@@ -0,0 +1,356 @@ ++#[cfg(test)] ++mod tests { ++ use bitcoin_hashes::hex::ToHex; ++ use bitcoin_hashes::sha256; ++ use bitcoin_hashes::Hash; ++ use secp256k1::rand; ++ use secp256k1::{KeyPair, Secp256k1, XOnlyPublicKey}; ++ ++ use gnostr_relay::conn::ClientConn; ++ use gnostr_relay::error::Error; ++ use gnostr_relay::event::Event; ++ use gnostr_relay::utils::unix_time; ++ ++ const RELAY: &str = "wss://nostr.example.com/"; ++ ++ #[test] ++ fn test_generate_auth_challenge() { ++ let mut client_conn = ClientConn::new("127.0.0.1".into()); ++ ++ assert_eq!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ client_conn.generate_auth_challenge(); ++ ++ assert_ne!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ let last_auth_challenge = client_conn.auth_challenge().cloned(); ++ ++ client_conn.generate_auth_challenge(); ++ ++ assert_ne!(client_conn.auth_challenge(), None); ++ assert_ne!( ++ client_conn.auth_challenge().unwrap(), ++ &last_auth_challenge.unwrap() ++ ); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ } ++ ++ #[test] ++ fn test_authenticate_with_valid_event() { ++ let mut client_conn = ClientConn::new("127.0.0.1".into()); ++ ++ assert_eq!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ client_conn.generate_auth_challenge(); ++ ++ assert_ne!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ let challenge = client_conn.auth_challenge().unwrap(); ++ let event = auth_event(challenge); ++ ++ let result = client_conn.authenticate(&event, RELAY.into()); ++ ++ assert!(matches!(result, Ok(()))); ++ assert_eq!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), Some(&event.pubkey)); ++ } ++ ++ #[test] ++ fn test_fail_to_authenticate_in_invalid_state() { ++ let mut client_conn = ClientConn::new("127.0.0.1".into()); ++ ++ assert_eq!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ let event = auth_event(&"challenge".into()); ++ let result = client_conn.authenticate(&event, RELAY.into()); ++ ++ assert!(matches!(result, Err(Error::AuthFailure))); ++ } ++ ++ #[test] ++ fn test_authenticate_when_already_authenticated() { ++ let mut client_conn = ClientConn::new("127.0.0.1".into()); ++ ++ assert_eq!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ client_conn.generate_auth_challenge(); ++ ++ assert_ne!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ let challenge = client_conn.auth_challenge().unwrap().clone(); ++ ++ let event = auth_event(&challenge); ++ let result = client_conn.authenticate(&event, RELAY.into()); ++ ++ assert!(matches!(result, Ok(()))); ++ assert_eq!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), Some(&event.pubkey)); ++ ++ let event1 = auth_event(&challenge); ++ let result1 = client_conn.authenticate(&event1, RELAY.into()); ++ ++ assert!(matches!(result1, Ok(()))); ++ assert_eq!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), Some(&event.pubkey)); ++ assert_ne!(client_conn.auth_pubkey(), Some(&event1.pubkey)); ++ } ++ ++ #[test] ++ fn test_fail_to_authenticate_with_invalid_event() { ++ let mut client_conn = ClientConn::new("127.0.0.1".into()); ++ ++ assert_eq!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ client_conn.generate_auth_challenge(); ++ ++ assert_ne!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ let challenge = client_conn.auth_challenge().unwrap(); ++ let mut event = auth_event(challenge); ++ event.sig = event.sig.chars().rev().collect::(); ++ ++ let result = client_conn.authenticate(&event, RELAY.into()); ++ ++ assert!(matches!(result, Err(Error::AuthFailure))); ++ } ++ ++ #[test] ++ fn test_fail_to_authenticate_with_invalid_event_kind() { ++ let mut client_conn = ClientConn::new("127.0.0.1".into()); ++ ++ assert_eq!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ client_conn.generate_auth_challenge(); ++ ++ assert_ne!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ let challenge = client_conn.auth_challenge().unwrap(); ++ let event = auth_event_with_kind(challenge, 9999999999999999); ++ ++ let result = client_conn.authenticate(&event, RELAY.into()); ++ ++ assert!(matches!(result, Err(Error::AuthFailure))); ++ } ++ ++ #[test] ++ fn test_fail_to_authenticate_with_expired_timestamp() { ++ let mut client_conn = ClientConn::new("127.0.0.1".into()); ++ ++ assert_eq!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ client_conn.generate_auth_challenge(); ++ ++ assert_ne!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ let challenge = client_conn.auth_challenge().unwrap(); ++ let event = auth_event_with_created_at(challenge, unix_time() - 1200); // 20 minutes ++ ++ let result = client_conn.authenticate(&event, RELAY.into()); ++ ++ assert!(matches!(result, Err(Error::AuthFailure))); ++ } ++ ++ #[test] ++ fn test_fail_to_authenticate_with_future_timestamp() { ++ let mut client_conn = ClientConn::new("127.0.0.1".into()); ++ ++ assert_eq!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ client_conn.generate_auth_challenge(); ++ ++ assert_ne!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ let challenge = client_conn.auth_challenge().unwrap(); ++ let event = auth_event_with_created_at(challenge, unix_time() + 1200); // 20 minutes ++ ++ let result = client_conn.authenticate(&event, RELAY.into()); ++ ++ assert!(matches!(result, Err(Error::AuthFailure))); ++ } ++ ++ #[test] ++ fn test_fail_to_authenticate_without_tags() { ++ let mut client_conn = ClientConn::new("127.0.0.1".into()); ++ ++ assert_eq!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ client_conn.generate_auth_challenge(); ++ ++ assert_ne!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ let event = auth_event_without_tags(); ++ ++ let result = client_conn.authenticate(&event, RELAY.into()); ++ ++ assert!(matches!(result, Err(Error::AuthFailure))); ++ } ++ ++ #[test] ++ fn test_fail_to_authenticate_without_challenge() { ++ let mut client_conn = ClientConn::new("127.0.0.1".into()); ++ ++ assert_eq!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ client_conn.generate_auth_challenge(); ++ ++ assert_ne!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ let event = auth_event_without_challenge(); ++ ++ let result = client_conn.authenticate(&event, RELAY.into()); ++ ++ assert!(matches!(result, Err(Error::AuthFailure))); ++ } ++ ++ #[test] ++ fn test_fail_to_authenticate_without_relay() { ++ let mut client_conn = ClientConn::new("127.0.0.1".into()); ++ ++ assert_eq!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ client_conn.generate_auth_challenge(); ++ ++ assert_ne!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ let challenge = client_conn.auth_challenge().unwrap(); ++ let event = auth_event_without_relay(challenge); ++ ++ let result = client_conn.authenticate(&event, RELAY.into()); ++ ++ assert!(matches!(result, Err(Error::AuthFailure))); ++ } ++ ++ #[test] ++ fn test_fail_to_authenticate_with_invalid_challenge() { ++ let mut client_conn = ClientConn::new("127.0.0.1".into()); ++ ++ assert_eq!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ client_conn.generate_auth_challenge(); ++ ++ assert_ne!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ let event = auth_event(&"invalid challenge".into()); ++ ++ let result = client_conn.authenticate(&event, RELAY.into()); ++ ++ assert!(matches!(result, Err(Error::AuthFailure))); ++ } ++ ++ #[test] ++ fn test_fail_to_authenticate_with_invalid_relay() { ++ let mut client_conn = ClientConn::new("127.0.0.1".into()); ++ ++ assert_eq!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ client_conn.generate_auth_challenge(); ++ ++ assert_ne!(client_conn.auth_challenge(), None); ++ assert_eq!(client_conn.auth_pubkey(), None); ++ ++ let challenge = client_conn.auth_challenge().unwrap(); ++ let event = auth_event_with_relay(challenge, &"xyz".into()); ++ ++ let result = client_conn.authenticate(&event, RELAY.into()); ++ ++ assert!(matches!(result, Err(Error::AuthFailure))); ++ } ++ ++ fn auth_event(challenge: &String) -> Event { ++ create_auth_event(Some(challenge), Some(&RELAY.into()), 22242, unix_time()) ++ } ++ ++ fn auth_event_with_kind(challenge: &String, kind: u64) -> Event { ++ create_auth_event(Some(challenge), Some(&RELAY.into()), kind, unix_time()) ++ } ++ ++ fn auth_event_with_created_at(challenge: &String, created_at: u64) -> Event { ++ create_auth_event(Some(challenge), Some(&RELAY.into()), 22242, created_at) ++ } ++ ++ fn auth_event_without_challenge() -> Event { ++ create_auth_event(None, Some(&RELAY.into()), 22242, unix_time()) ++ } ++ ++ fn auth_event_without_relay(challenge: &String) -> Event { ++ create_auth_event(Some(challenge), None, 22242, unix_time()) ++ } ++ ++ fn auth_event_without_tags() -> Event { ++ create_auth_event(None, None, 22242, unix_time()) ++ } ++ ++ fn auth_event_with_relay(challenge: &String, relay: &String) -> Event { ++ create_auth_event(Some(challenge), Some(relay), 22242, unix_time()) ++ } ++ ++ fn create_auth_event( ++ challenge: Option<&String>, ++ relay: Option<&String>, ++ kind: u64, ++ created_at: u64, ++ ) -> Event { ++ let secp = Secp256k1::new(); ++ let key_pair = KeyPair::new(&secp, &mut rand::thread_rng()); ++ let public_key = XOnlyPublicKey::from_keypair(&key_pair); ++ ++ let mut tags: Vec> = vec![]; ++ ++ if let Some(c) = challenge { ++ let tag = vec!["challenge".into(), c.into()]; ++ tags.push(tag); ++ } ++ ++ if let Some(r) = relay { ++ let tag = vec!["relay".into(), r.into()]; ++ tags.push(tag); ++ } ++ ++ let mut event = Event { ++ id: "0".to_owned(), ++ pubkey: public_key.to_hex(), ++ delegated_by: None, ++ created_at, ++ kind, ++ tags, ++ content: "".to_owned(), ++ sig: "0".to_owned(), ++ tagidx: None, ++ }; ++ ++ let c = event.to_canonical().unwrap(); ++ let digest: sha256::Hash = sha256::Hash::hash(c.as_bytes()); ++ ++ let msg = secp256k1::Message::from_slice(digest.as_ref()).unwrap(); ++ let sig = secp.sign_schnorr(&msg, &key_pair); ++ ++ event.id = format!("{digest:x}"); ++ event.sig = sig.to_hex(); ++ ++ event ++ } ++} +diff --git a/tests/integration_test.rs b/tests/integration_test.rs +new file mode 100644 +index 0000000..f65f4e8 +--- /dev/null ++++ b/tests/integration_test.rs +@@ -0,0 +1,79 @@ ++use anyhow::Result; ++use futures::SinkExt; ++use futures::StreamExt; ++use std::thread; ++use std::time::Duration; ++use tokio_tungstenite::connect_async; ++use tracing::info; ++mod common; ++ ++#[tokio::test] ++async fn start_and_stop() -> Result<()> { ++ // this will be the common pattern for acquiring a new relay: ++ // start a fresh relay, on a port to-be-provided back to us: ++ let relay = common::start_relay()?; ++ // wait for the relay's webserver to start up and deliver a page: ++ common::wait_for_healthy_relay(&relay).await?; ++ let port = relay.port; ++ // just make sure we can startup and shut down. ++ // if we send a shutdown message before the server is listening, ++ // we will get a SendError. Keep sending until someone is ++ // listening. ++ loop { ++ let shutdown_res = relay.shutdown_tx.send(()); ++ match shutdown_res { ++ Ok(()) => { ++ break; ++ } ++ Err(_) => { ++ thread::sleep(Duration::from_millis(100)); ++ } ++ } ++ } ++ // wait for relay to shutdown ++ let thread_join = relay.handle.join(); ++ assert!(thread_join.is_ok()); ++ // assert that port is now available. ++ assert!(common::port_is_available(port)); ++ Ok(()) ++} ++ ++#[tokio::test] ++async fn relay_home_page() -> Result<()> { ++ // get a relay and wait for startup... ++ let relay = common::start_relay()?; ++ common::wait_for_healthy_relay(&relay).await?; ++ // tell relay to shutdown ++ let _res = relay.shutdown_tx.send(()); ++ Ok(()) ++} ++ ++//#[tokio::test] ++// Still inwork ++async fn publish_test() -> Result<()> { ++ // get a relay and wait for startup ++ let relay = common::start_relay()?; ++ common::wait_for_healthy_relay(&relay).await?; ++ // open a non-secure websocket connection. ++ let (mut ws, _res) = connect_async(format!("ws://localhost:{}", relay.port)).await?; ++ // send a simple pre-made message ++ let simple_event = r#"["EVENT", {"content": "hello world","created_at": 1691239763, ++ "id":"f3ce6798d70e358213ebbeba4886bbdfacf1ecfd4f65ee5323ef5f404de32b86", ++ "kind": 1, ++ "pubkey": "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798", ++ "sig": "30ca29e8581eeee75bf838171dec818af5e6de2b74f5337de940f5cc91186534c0b20d6cf7ad1043a2c51dbd60b979447720a471d346322103c83f6cb66e4e98", ++ "tags": []}]"#; ++ ws.send(simple_event.into()).await?; ++ // get response from server, confirm it is an array with first element "OK" ++ let event_confirm = ws.next().await; ++ ws.close(None).await?; ++ info!("event confirmed: {:?}", event_confirm); ++ // open a new connection, and wait for some time to get the event. ++ let (mut sub_ws, _res) = connect_async(format!("ws://localhost:{}", relay.port)).await?; ++ let event_sub = r#"["REQ", "simple", {}]"#; ++ sub_ws.send(event_sub.into()).await?; ++ // read from subscription ++ let _ws_next = sub_ws.next().await; ++ let _res = relay.shutdown_tx.send(()); ++ Ok(()) ++}