diff --git a/.github/workflows/build-release-artifacts.yml b/.github/workflows/build-release-artifacts.yml
index 05010b5553..b30d4e1803 100644
--- a/.github/workflows/build-release-artifacts.yml
+++ b/.github/workflows/build-release-artifacts.yml
@@ -1,21 +1,27 @@
-name: build release artifacts
+# This workflow builds and packages the release artifacts, without actually running a release.
+#
+# It can sometimes be useful to obtain these binaries built from other branches, or a tag, or when
+# the release process is being updated, this workflow can be used to test some of the changes.
+#
+# The built and packaged binaries will be attached to the workflow run as artifacts, available for
+# download.
+name: build and package release artifacts
on:
workflow_dispatch:
inputs:
branch:
- description: The branch to build.
+ description: Set to build a particular branch
type: string
tag:
- description: The tag to build.
+ description: Set to build a particular tag
type: string
-# Copied from `release.yml`
-# During the build step, the env variable has to be manually sent to the containers for cross platform builds.
-# Update the Justfile as well.
+# The key variables also need to be passed to `cross`, which runs in a container and does not
+# inherit variables from the parent environment. The `cross` tool is used in the `build`
+# job. If any keys are added, the `build-release-artifacts` target in the Justfile must
+# also be updated.
env:
- JUST_BIN_URL: https://github.com/casey/just/releases/download/1.25.2/just-1.25.2-x86_64-unknown-linux-musl.tar.gz
- WORKFLOW_URL: https://github.com/maidsafe/safe_network/actions/runs
GENESIS_PK: ${{ secrets.STABLE_GENESIS_PK }}
GENESIS_SK: ${{ secrets.STABLE_GENESIS_SK }}
FOUNDATION_PK: ${{ secrets.STABLE_FOUNDATION_PK }}
@@ -33,6 +39,8 @@ jobs:
target: x86_64-pc-windows-msvc
- os: macos-latest
target: x86_64-apple-darwin
+ - os: macos-latest
+ target: aarch64-apple-darwin
- os: ubuntu-latest
target: x86_64-unknown-linux-musl
- os: ubuntu-latest
@@ -46,8 +54,6 @@ jobs:
with:
ref: ${{ inputs.tag || inputs.branch }}
- uses: dtolnay/rust-toolchain@stable
- # cargo-binstall will try and use pre-built binaries if they are available and also speeds up
- # installing `cross`
- uses: cargo-bins/cargo-binstall@main
- shell: bash
run: cargo binstall --no-confirm just
@@ -61,21 +67,14 @@ jobs:
artifacts
!artifacts/.cargo-lock
- # This job isn't necessary, but it's useful for debugging the packaging process for the real release
- # workflow, just in case any issues are ever encountered there.
package:
- name: publish and release
+ name: package artifacts
runs-on: ubuntu-latest
- needs: [build]
- env:
- AWS_ACCESS_KEY_ID: ${{ secrets.S3_DEPLOY_AWS_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_DEPLOY_AWS_SECRET_ACCESS_KEY }}
- AWS_DEFAULT_REGION: eu-west-2
+ needs: [ build ]
steps:
- uses: actions/checkout@v4
with:
- fetch-depth: "0"
- token: ${{ secrets.VERSION_BUMP_COMMIT_PAT }}
+ ref: ${{ inputs.tag || inputs.branch }}
- uses: actions/download-artifact@master
with:
name: safe_network-x86_64-pc-windows-msvc
@@ -84,6 +83,10 @@ jobs:
with:
name: safe_network-x86_64-unknown-linux-musl
path: artifacts/x86_64-unknown-linux-musl/release
+ - uses: actions/download-artifact@master
+ with:
+ name: safe_network-aarch64-apple-darwin
+ path: artifacts/aarch64-apple-darwin/release
- uses: actions/download-artifact@master
with:
name: safe_network-x86_64-apple-darwin
@@ -100,32 +103,24 @@ jobs:
with:
name: safe_network-aarch64-unknown-linux-musl
path: artifacts/aarch64-unknown-linux-musl/release
- # It's possible to `cargo install` just, but it's very slow to compile on GHA infra.
- # Therefore we just pull the binary from the Github Release.
- - name: install just
+ - uses: cargo-bins/cargo-binstall@main
+ - shell: bash
+ run: cargo binstall --no-confirm just
+
+ - name: package binaries
shell: bash
run: |
- curl -L -O $JUST_BIN_URL
- mkdir just
- tar xvf just-1.25.2-x86_64-unknown-linux-musl.tar.gz -C just
- rm just-1.25.2-x86_64-unknown-linux-musl.tar.gz
- sudo mv just/just /usr/local/bin
- rm -rf just
- sudo apt-get install -y tree
- - name: package artifacts
+ just package-all-bins
+ - uses: actions/upload-artifact@main
+ with:
+ name: packaged_binaries
+ path: packaged_bins
+
+ - name: package architectures
shell: bash
run: |
- tree artifacts
- just package-release-assets "faucet"
- just package-release-assets "nat-detection"
- just package-release-assets "node-launchpad"
- just package-release-assets "safe"
- just package-release-assets "safenode"
- just package-release-assets "safenode_rpc_client"
- just package-release-assets "safenode-manager"
- just package-release-assets "safenodemand"
- just package-release-assets "sn_auditor"
+ just package-all-architectures
- uses: actions/upload-artifact@main
with:
- name: packaged_binaries
- path: deploy
+ name: packaged_architectures
+ path: packaged_architectures
diff --git a/.github/workflows/memcheck.yml b/.github/workflows/memcheck.yml
index fb4125e2e4..b9965b64f3 100644
--- a/.github/workflows/memcheck.yml
+++ b/.github/workflows/memcheck.yml
@@ -5,9 +5,9 @@ on:
# on main, we want to know that all commits are passing at a glance, any deviation should help bisecting errors
# the merge run checks should show on master and enable this clear test/passing history
merge_group:
- branches: [main, alpha*, beta*, rc*]
+ branches: [ main, alpha*, beta*, rc* ]
pull_request:
- branches: ["*"]
+ branches: [ "*" ]
env:
SAFE_DATA_PATH: /home/runner/.local/share/safe
@@ -100,6 +100,7 @@ jobs:
- name: Create and fund a wallet to pay for files storage
run: |
echo "Obtaining address for use with the faucet..."
+ ./target/release/safe --log-output-dest=data-dir wallet create --no-password
address=$(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1)
echo "Sending tokens to the faucet at $address"
./target/release/faucet --log-output-dest=data-dir send 5000000 $address > initial_balance_from_faucet.txt
@@ -171,6 +172,7 @@ jobs:
mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs
ls -l $CLIENT_DATA_PATH
cp ./the-test-data.zip ./the-test-data_1.zip
+ ./target/release/safe --log-output-dest=data-dir wallet create --no-replace --no-password
./target/release/faucet --log-output-dest=data-dir send 5000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) > initial_balance_from_faucet_1.txt
cat initial_balance_from_faucet_1.txt
cat initial_balance_from_faucet_1.txt | tail -n 1 > transfer_hex
diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml
index deb9622df1..011fd53640 100644
--- a/.github/workflows/merge.yml
+++ b/.github/workflows/merge.yml
@@ -5,9 +5,9 @@ on:
# on main, we want to know that all commits are passing at a glance, any deviation should help bisecting errors
# the merge run checks should show on master and enable this clear test/passing history
merge_group:
- branches: [main, alpha*, beta*, rc*]
+ branches: [ main, alpha*, beta*, rc* ]
pull_request:
- branches: ["*"]
+ branches: [ "*" ]
env:
CARGO_INCREMENTAL: 0 # bookkeeping for incremental builds has overhead, not useful in CI.
@@ -95,7 +95,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
- os: [ubuntu-latest, windows-latest, macos-latest]
+ os: [ ubuntu-latest, windows-latest, macos-latest ]
steps:
- uses: actions/checkout@v4
@@ -207,6 +207,7 @@ jobs:
- name: Create and fund a wallet to pay for files storage
run: |
+ ./target/release/safe --log-output-dest=data-dir wallet create --no-password
./target/release/faucet --log-output-dest=data-dir send 1000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex
./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex
env:
@@ -341,7 +342,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
- os: [ubuntu-latest, windows-latest, macos-latest]
+ os: [ ubuntu-latest, windows-latest, macos-latest ]
steps:
- uses: actions/checkout@v4
@@ -359,7 +360,7 @@ jobs:
timeout-minutes: 30
- name: Build testing executable
- run: cargo test --release -p sn_node --features=local-discovery --test sequential_transfers --test storage_payments --test double_spend --test spend_simulation --no-run
+ run: cargo test --release -p sn_node --features=local-discovery --test sequential_transfers --test storage_payments --test double_spend --no-run
env:
# only set the target dir for windows to bypass the linker issue.
# happens if we build the node manager via testnet action
@@ -406,8 +407,69 @@ jobs:
CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }}
timeout-minutes: 25
+ - name: Stop the local network and upload logs
+ if: always()
+ uses: maidsafe/sn-local-testnet-action@main
+ with:
+ action: stop
+ log_file_prefix: safe_test_logs_spend
+ platform: ${{ matrix.os }}
+
+ # runs with increased node count
+ spend_simulation:
+ if: "!startsWith(github.event.head_commit.message, 'chore(release):')"
+ name: spend simulation
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ os: [ ubuntu-latest, windows-latest, macos-latest ]
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Install Rust
+ uses: dtolnay/rust-toolchain@stable
+
+ - uses: Swatinem/rust-cache@v2
+
+ - name: Build binaries
+ run: cargo build --release --features=local-discovery --bin safenode
+ timeout-minutes: 30
+
+ - name: Build faucet binary
+ run: cargo build --release --bin faucet --features="local-discovery,gifting"
+ timeout-minutes: 30
+
+ - name: Build testing executable
+ run: cargo test --release -p sn_node --features=local-discovery --test spend_simulation --no-run
+ env:
+ # only set the target dir for windows to bypass the linker issue.
+ # happens if we build the node manager via testnet action
+ CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }}
+ timeout-minutes: 30
+
+ - name: Start a local network
+ uses: maidsafe/sn-local-testnet-action@main
+ with:
+ action: start
+ interval: 2000
+ node-count: 50
+ node-path: target/release/safenode
+ faucet-path: target/release/faucet
+ platform: ${{ matrix.os }}
+ build: true
+
+ - name: Check SAFE_PEERS was set
+ shell: bash
+ run: |
+ if [[ -z "$SAFE_PEERS" ]]; then
+ echo "The SAFE_PEERS variable has not been set"
+ exit 1
+ else
+ echo "SAFE_PEERS has been set to $SAFE_PEERS"
+ fi
+
- name: execute the spend simulation
- run: cargo test --release -p sn_node --features="local-discovery" --test spend_simulation -- --nocapture --test-threads=1
+ run: cargo test --release -p sn_node --features="local-discovery" --test spend_simulation -- --nocapture
env:
CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }}
timeout-minutes: 25
@@ -417,7 +479,7 @@ jobs:
uses: maidsafe/sn-local-testnet-action@main
with:
action: stop
- log_file_prefix: safe_test_logs_spend
+ log_file_prefix: safe_test_logs_spend_simulation
platform: ${{ matrix.os }}
token_distribution_test:
@@ -426,7 +488,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
- os: [ubuntu-latest, windows-latest, macos-latest]
+ os: [ ubuntu-latest, windows-latest, macos-latest ]
steps:
- uses: actions/checkout@v4
@@ -794,6 +856,7 @@ jobs:
- name: Create and fund a wallet first time
run: |
+ ~/safe --log-output-dest=data-dir wallet create --no-password
~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>first.txt
echo "----------"
cat first.txt
@@ -808,6 +871,7 @@ jobs:
rm -rf /home/runner/.local/share/safe/test_faucet
rm -rf /home/runner/.local/share/safe/test_genesis
rm -rf /home/runner/.local/share/safe/client
+ ~/safe --log-output-dest=data-dir wallet create --no-password
~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 1>second.txt
echo "----------"
cat second.txt
@@ -828,6 +892,7 @@ jobs:
rm -rf /home/runner/.local/share/safe/test_faucet
rm -rf /home/runner/.local/share/safe/test_genesis
rm -rf /home/runner/.local/share/safe/client
+ ~/safe --log-output-dest=data-dir wallet create --no-password
if GENESIS_PK=a9925296499299fdbf4412509d342a92e015f5b996e9acd1d2ab7f2326e3ad05934326efdc345345a95e973ac1bb6637 GENESIS_SK=40f6bbc870355c68138ac70b450b6425af02b49874df3f141b7018378ceaac66 nohup ~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1); then
echo "Faucet with different genesis key not rejected!"
exit 1
@@ -966,6 +1031,7 @@ jobs:
- name: Create and fund a wallet to pay for files storage
run: |
+ ~/safe --log-output-dest=data-dir wallet create --no-password
~/faucet --log-output-dest=data-dir send 100000000 $(~/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex
~/safe --log-output-dest=data-dir wallet receive --file transfer_hex
env:
@@ -1098,6 +1164,7 @@ jobs:
- name: Create and fund a wallet to pay for files storage
run: |
+ ./target/release/safe --log-output-dest=data-dir wallet create --no-password
./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex
./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex
env:
@@ -1182,6 +1249,7 @@ jobs:
ls -l $SAFE_DATA_PATH
mv $SAFE_DATA_PATH/client_first/logs $CLIENT_DATA_PATH/logs
ls -l $CLIENT_DATA_PATH
+ ./target/release/safe --log-output-dest=data-dir wallet create --no-password
./target/release/faucet --log-output-dest=data-dir send 100000000 $(./target/release/safe --log-output-dest=data-dir wallet address | tail -n 1) | tail -n 1 > transfer_hex
./target/release/safe --log-output-dest=data-dir wallet receive --file transfer_hex
env:
diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml
index 37038ccf62..7165866f79 100644
--- a/.github/workflows/nightly.yml
+++ b/.github/workflows/nightly.yml
@@ -203,7 +203,7 @@ jobs:
timeout-minutes: 30
- name: Build testing executable
- run: cargo test --release -p sn_node --features=local-discovery --test sequential_transfers --test storage_payments --test double_spend --test spend_simulation --no-run
+ run: cargo test --release -p sn_node --features=local-discovery --test sequential_transfers --test storage_payments --test double_spend --no-run
env:
# only set the target dir for windows to bypass the linker issue.
# happens if we build the node manager via testnet action
@@ -221,7 +221,7 @@ jobs:
build: true
- name: execute the sequential transfers test
- run: cargo test --release -p sn_node --features="local-discovery" --test sequential_transfers -- --nocapture
+ run: cargo test --release -p sn_node --features="local-discovery" --test sequential_transfers -- --nocapture --test-threads=1
env:
CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }}
SN_LOG: "all"
@@ -240,8 +240,67 @@ jobs:
CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }}
timeout-minutes: 25
+ - name: Small wait to allow reward receipt
+ run: sleep 30
+ timeout-minutes: 1
+
+ - name: Stop the local network and upload logs
+ if: always()
+ uses: maidsafe/sn-local-testnet-action@main
+ with:
+ action: stop
+ log_file_prefix: safe_test_logs_spend
+ platform: ${{ matrix.os }}
+
+ - name: post notification to slack on failure
+ if: ${{ failure() }}
+ uses: bryannice/gitactions-slack-notification@2.0.0
+ env:
+ SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }}
+ SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}"
+ SLACK_TITLE: "Nightly Spend Test Run Failed"
+
+ # runs with increased node count
+ spend_simulation:
+ name: spend simulation
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ os: [ubuntu-latest, windows-latest, macos-latest]
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Install Rust
+ uses: dtolnay/rust-toolchain@stable
+
+ - uses: Swatinem/rust-cache@v2
+ continue-on-error: true
+
+ - name: Build binaries
+ run: cargo build --release --features=local-discovery --bin safenode --bin faucet
+ timeout-minutes: 30
+
+ - name: Build testing executable
+ run: cargo test --release -p sn_node --features=local-discovery --test spend_simulation --no-run
+ env:
+ # only set the target dir for windows to bypass the linker issue.
+ # happens if we build the node manager via testnet action
+ CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }}
+ timeout-minutes: 30
+
+ - name: Start a local network
+ uses: maidsafe/sn-local-testnet-action@main
+ with:
+ action: start
+ interval: 2000
+ node-count: 50
+ node-path: target/release/safenode
+ faucet-path: target/release/faucet
+ platform: ${{ matrix.os }}
+ build: true
+
- name: execute the spend simulation test
- run: cargo test --release -p sn_node --features="local-discovery" --test spend_simulation -- --nocapture --test-threads=1
+ run: cargo test --release -p sn_node --features="local-discovery" --test spend_simulation -- --nocapture
env:
CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }}
timeout-minutes: 25
@@ -255,7 +314,7 @@ jobs:
uses: maidsafe/sn-local-testnet-action@main
with:
action: stop
- log_file_prefix: safe_test_logs_spend
+ log_file_prefix: safe_test_logs_spend_simulation
platform: ${{ matrix.os }}
- name: post notification to slack on failure
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 8e70b4efa4..edb068d2a2 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -1,31 +1,13 @@
-# Will automatically generate relases for version bumped code.
-# Can optionally be run as workflow action to generate a soft release (no publish steps, just s3 and github release)
-
name: release
-# prevent concurrent version bumps + releases from running at the same time
-concurrency:
- group: "version-bump-release-${{ github.ref }}"
-
on:
- push:
- branches:
- - stable*
- - alpha*
- - beta*
- - rc*
workflow_dispatch:
- inputs:
- network_version_mode:
- description: "Set NETWORK_VERSION_MODE if desired. (This restricts the network from contacting any other network mode. If left empty, the default protocol is used)"
- required: false
- default: ""
-
-# During the build step, the env variable has to be manually sent to the containers for cross platform builds.
-# Update the Justfile as well.
+
+# The key variables also need to be passed to `cross`, which runs in a container and does not
+# inherit variables from the parent environment. The `cross` tool is used in the `build`
+# job. If any keys are added, the `build-release-artifacts` target in the Justfile must
+# also be updated.
env:
- RELEASE_PLZ_BIN_URL: https://github.com/MarcoIeni/release-plz/releases/download/release-plz-v0.3.43/release-plz-x86_64-unknown-linux-gnu.tar.gz
- JUST_BIN_URL: https://github.com/casey/just/releases/download/1.25.2/just-1.25.2-x86_64-unknown-linux-musl.tar.gz
WORKFLOW_URL: https://github.com/maidsafe/safe_network/actions/runs
GENESIS_PK: ${{ secrets.STABLE_GENESIS_PK }}
GENESIS_SK: ${{ secrets.STABLE_GENESIS_SK }}
@@ -35,10 +17,10 @@ env:
jobs:
build:
- if: |
- github.repository_owner == 'maidsafe' &&
- startsWith(github.event.head_commit.message, 'chore(release):') ||
- github.event_name == 'workflow_dispatch'
+ if: ${{
+ github.repository_owner == 'maidsafe' &&
+ (github.ref == 'refs/heads/stable' || startsWith(github.ref, 'refs/heads/rc'))
+ }}
name: build
runs-on: ${{ matrix.os }}
strategy:
@@ -48,6 +30,8 @@ jobs:
target: x86_64-pc-windows-msvc
- os: macos-latest
target: x86_64-apple-darwin
+ - os: macos-latest
+ target: aarch64-apple-darwin
- os: ubuntu-latest
target: x86_64-unknown-linux-musl
- os: ubuntu-latest
@@ -59,18 +43,10 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- # cargo-binstall will try and use pre-built binaries if they are available and also speeds up
- # installing `cross`
- uses: cargo-bins/cargo-binstall@main
- shell: bash
run: cargo binstall --no-confirm just
- # Set the network versioning based on our branch or workflow input
- - name: provide network versioning
- shell: bash
- run: |
- echo "NETWORK_VERSION_MODE=${{ github.event.inputs.network_version_mode || '' }}" >> $GITHUB_ENV
-
- name: build release artifacts
shell: bash
run: |
@@ -82,6 +58,7 @@ jobs:
path: |
artifacts
!artifacts/.cargo-lock
+
- name: post notification to slack on failure
if: ${{ failure() }}
uses: bryannice/gitactions-slack-notification@2.0.0
@@ -90,26 +67,20 @@ jobs:
SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}"
SLACK_TITLE: "Release Failed"
- release:
- if: |
- github.repository_owner == 'maidsafe' &&
- startsWith(github.event.head_commit.message, 'chore(release):') ||
- github.event_name == 'workflow_dispatch'
- name: publish flows and release creation
+ s3-release:
+ if: ${{
+ github.repository_owner == 'maidsafe' &&
+ (github.ref == 'refs/heads/stable' || startsWith(github.ref, 'refs/heads/rc'))
+ }}
+ name: s3 release
runs-on: ubuntu-latest
- needs: [build]
+ needs: [ build ]
env:
AWS_ACCESS_KEY_ID: ${{ secrets.S3_DEPLOY_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_DEPLOY_AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: eu-west-2
- GH_TOKEN: ${{ secrets.VERSION_BUMP_COMMIT_PAT }}
-
steps:
- uses: actions/checkout@v4
- with:
- fetch-depth: "0"
- token: ${{ secrets.VERSION_BUMP_COMMIT_PAT }}
-
- uses: actions/download-artifact@master
with:
name: safe_network-x86_64-pc-windows-msvc
@@ -122,6 +93,10 @@ jobs:
with:
name: safe_network-x86_64-apple-darwin
path: artifacts/x86_64-apple-darwin/release
+ - uses: actions/download-artifact@master
+ with:
+ name: safe_network-aarch64-apple-darwin
+ path: artifacts/aarch64-apple-darwin/release
- uses: actions/download-artifact@master
with:
name: safe_network-arm-unknown-linux-musleabi
@@ -134,42 +109,13 @@ jobs:
with:
name: safe_network-aarch64-unknown-linux-musl
path: artifacts/aarch64-unknown-linux-musl/release
- - shell: bash
- run: |
- git config --local user.email "action@github.com"
- git config --local user.name "GitHub Action"
- # It's possible to `cargo install` these tools, but it's very slow to compile on GHA infra.
- # Therefore we just pull some binaries from the Github Releases.
- - name: install tools
+
+ - uses: cargo-bins/cargo-binstall@main
+ - name: install just
shell: bash
- run: |
- curl -L -O $RELEASE_PLZ_BIN_URL
- tar xvf release-plz-x86_64-unknown-linux-gnu.tar.gz
- rm release-plz-x86_64-unknown-linux-gnu.tar.gz
- sudo mv release-plz /usr/local/bin
-
- curl -L -O $JUST_BIN_URL
- mkdir just
- tar xvf just-1.25.2-x86_64-unknown-linux-musl.tar.gz -C just
- rm just-1.25.2-x86_64-unknown-linux-musl.tar.gz
- sudo mv just/just /usr/local/bin
- rm -rf just
-
- # only publish if we're on the stable branch
- - name: Conditionally remove 'publish = false' from workspace in release-plz.toml on stable branch
- if: startsWith(github.ref_name, 'stable')
- run: |
- ls -la
- sed -i '/^\[workspace\]/,/^\[/ {/^publish = false$/d;}' ./release-plz.toml
- sed -i '/^\[workspace\]/,/^\[/ {/^git_release_draft = true$/d;}' ./release-plz.toml
- sed -i '/^\[workspace\]/,/^\[/ {/^git_tag_enable = false$/d;}' ./release-plz.toml
-
- # only publish if we're on the stable branch
- - name: Conditionally remove 'git_release_draft = true' from workspace in release-plz.toml on stable branch
- if: startsWith(github.ref_name, 'stable')
- run: |
+ run: cargo binstall --no-confirm just
- - name: upload to s3
+ - name: upload binaries to S3
shell: bash
run: |
# Package versioned assets as tar.gz and zip archives, and upload them to S3.
@@ -182,68 +128,125 @@ jobs:
# because the process gets the latest version from `crates.io` then downloads the binaries
# from S3, using that version number. Uploading the binaries to S3 before publishing
# ensures that they will exist after the new crate has been published.
+ just package-all-bins
+ just upload-all-packaged-bins-to-s3
+
+ github-release:
+ if: ${{
+ github.repository_owner == 'maidsafe' &&
+ (github.ref == 'refs/heads/stable' || startsWith(github.ref, 'refs/heads/rc'))
+ }}
+ name: github release
+ runs-on: ubuntu-latest
+ needs: [ build ]
- just package-release-assets "faucet"
- just package-release-assets "nat-detection"
- just package-release-assets "node-launchpad"
- just package-release-assets "safe"
- just package-release-assets "safenode"
- just package-release-assets "safenode_rpc_client"
- just package-release-assets "safenode-manager"
- just package-release-assets "safenodemand"
- just package-release-assets "sn_auditor"
- just upload-release-assets-to-s3 "faucet"
- just upload-release-assets-to-s3 "nat-detection"
- just upload-release-assets-to-s3 "node-launchpad"
- just upload-release-assets-to-s3 "safe"
- just upload-release-assets-to-s3 "safenode"
- just upload-release-assets-to-s3 "safenode-manager"
- just upload-release-assets-to-s3 "safenodemand"
- just upload-release-assets-to-s3 "safenode_rpc_client"
- just upload-release-assets-to-s3 "sn_auditor"
-
- # unless release plz toml is changed (as above removing certain limits)
- # github releases are drafts, and we do not publish to crates.io
- - name: publish and release
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/download-artifact@master
+ with:
+ name: safe_network-x86_64-pc-windows-msvc
+ path: artifacts/x86_64-pc-windows-msvc/release
+ - uses: actions/download-artifact@master
+ with:
+ name: safe_network-x86_64-unknown-linux-musl
+ path: artifacts/x86_64-unknown-linux-musl/release
+ - uses: actions/download-artifact@master
+ with:
+ name: safe_network-x86_64-apple-darwin
+ path: artifacts/x86_64-apple-darwin/release
+ - uses: actions/download-artifact@master
+ with:
+ name: safe_network-aarch64-apple-darwin
+ path: artifacts/aarch64-apple-darwin/release
+ - uses: actions/download-artifact@master
+ with:
+ name: safe_network-arm-unknown-linux-musleabi
+ path: artifacts/arm-unknown-linux-musleabi/release
+ - uses: actions/download-artifact@master
+ with:
+ name: safe_network-armv7-unknown-linux-musleabihf
+ path: artifacts/armv7-unknown-linux-musleabihf/release
+ - uses: actions/download-artifact@master
+ with:
+ name: safe_network-aarch64-unknown-linux-musl
+ path: artifacts/aarch64-unknown-linux-musl/release
+
+ - uses: cargo-bins/cargo-binstall@main
+ - name: install just
+ shell: bash
+ run: cargo binstall --no-confirm just
+
+ - name: set package version
shell: bash
run: |
- # The `release-plz` command publishes crates which had their versions bumped, and also
- # creates Github releases. The binaries are then attached to the releases in the
- # `upload-github-release-assets` target.
- cargo login "${{ secrets.CRATES_IO_TOKEN }}"
- # The use of 'awk' suppresses the annoying instrumentation output
- # that makes the log difficult to read.
- release-plz release --git-token ${{ secrets.VERSION_BUMP_COMMIT_PAT }} | \
- awk '{ if (!/^\s*in release with input/ && !/^\s{4}/) print }'
+ current_date=$(date +%Y.%m)
+ release_cycle=$(grep 'release-cycle:' release-cycle-info | awk '{print $2}')
+ release_cycle_counter=$(grep 'release-cycle-counter:' release-cycle-info | awk '{print $2}')
+ version="$current_date.$release_cycle.$release_cycle_counter"
+ echo "PACKAGE_VERSION=$version" >> $GITHUB_ENV
- - name: create github release assets
+ - name: package release artifacts
shell: bash
- run: just upload-github-release-assets
+ run: just package-all-architectures
+
+ # For the next two steps, it seems to be necessary to set `GITHUB_TOKEN` on the step rather
+ # than the job level.
+ - name: create release
+ uses: actions/create-release@v1
+ env:
+ GITHUB_TOKEN: ${{ secrets.VERSION_BUMP_COMMIT_PAT }}
+ with:
+ tag_name: ${{ env.PACKAGE_VERSION }}
+ release_name: ${{ env.PACKAGE_VERSION }}
+ draft: false
+ prerelease: ${{ startsWith(github.ref, 'refs/heads/rc') && true || false }}
- - name: upload as latest release
+ - name: upload artifacts as assets
+ env:
+ GITHUB_TOKEN: ${{ secrets.VERSION_BUMP_COMMIT_PAT }}
shell: bash
- if: github.event_name != 'workflow_dispatch'
run: |
- # Now upload the 'latest' versions to S3. This can be done later because the node manager
- # does not depend on these existing.
- just package-release-assets "faucet" "latest"
- just package-release-assets "nat-detection" "latest"
- just package-release-assets "node-launchpad" "latest"
- just package-release-assets "safe" "latest"
- just package-release-assets "safenode" "latest"
- just package-release-assets "safenode_rpc_client" "latest"
- just package-release-assets "safenode-manager" "latest"
- just package-release-assets "safenodemand" "latest"
- just package-release-assets "sn_auditor" "latest"
- just upload-release-assets-to-s3 "faucet"
- just upload-release-assets-to-s3 "nat-detection"
- just upload-release-assets-to-s3 "node-launchpad"
- just upload-release-assets-to-s3 "safe"
- just upload-release-assets-to-s3 "safenode"
- just upload-release-assets-to-s3 "safenode-manager"
- just upload-release-assets-to-s3 "safenodemand"
- just upload-release-assets-to-s3 "safenode_rpc_client"
- just upload-release-assets-to-s3 "sn_auditor"
+ (
+ cd packaged_architectures
+ ls | xargs gh release upload ${{ env.PACKAGE_VERSION }}
+ )
+
+ - name: post notification to slack on failure
+ if: ${{ failure() }}
+ uses: bryannice/gitactions-slack-notification@2.0.0
+ env:
+ SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }}
+ SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}"
+ SLACK_TITLE: "Release Failed"
+
+ publish-crates:
+ if: ${{ github.repository_owner == 'maidsafe' && github.ref == 'refs/heads/stable' }}
+ needs: [ build, s3-release ]
+ runs-on: self-hosted
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: "0"
+ token: ${{ secrets.VERSION_BUMP_COMMIT_PAT }}
+
+ # Required for the creation of tags
+ - shell: bash
+ run: |
+ git config --local user.email "action@github.com"
+ git config --local user.name "GitHub Action"
+
+ - uses: cargo-bins/cargo-binstall@main
+ - shell: bash
+ run: cargo binstall --no-confirm release-plz
+
+ - name: publish crates
+ shell: bash
+ run: |
+ cargo login "${{ secrets.CRATES_IO_TOKEN }}"
+ # The use of 'awk' suppresses the annoying instrumentation output that makes the log
+ # difficult to read.
+ release-plz release --git-token ${{ secrets.VERSION_BUMP_COMMIT_PAT }} | \
+ awk '{ if (!/^\s*in release with input/ && !/^\s{4}/) print }'
- name: post notification to slack on failure
if: ${{ failure() }}
diff --git a/.gitignore b/.gitignore
index b19247f664..99b9fcf479 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,6 +3,8 @@
/target/
/artifacts/
/deploy/
+/packaged_architectures/
+/packaged_bins/
# These are backup files generated by rustfmt
**/*.rs.bk
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9ce23135f3..2d2915d56e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,175 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
*When editing this file, please respect a line length of 100.*
+## 2024-07-25
+
+### Binaries
+
+* `faucet` v0.4.31
+* `nat-detection` v0.2.1
+* `node-launchpad` v0.3.11
+* `safe` v0.94.0
+* `safenode` v0.110.0
+* `safenode-manager` v0.10.1
+* `safenodemand` v0.10.1
+* `safenode_rpc_client` v0.6.26
+* `sn_auditor` v0.2.3
+
+### 🔦 Highlights
+
+* The introduction of a record-store cache has significantly reduced the node's disk IO. As a side
+ effect, the CPU does less work, and performance improves. RAM usage has increased by around 25MB per
+ node, but we view this as a reasonable trade off.
+* The node's relay server now supports more connections: when running with `--home-network`, up to
+ 256 will be supported, and otherwise, it will be 1024. Along with minor tweaks to utilize the
+ relay server properly, this should hopefully result in less connections being dropped.
+* Reward forwarding is more robust.
+* Chunk verification is now probabilistic, which should reduce messaging. In combination with
+ replication messages also being reduced, this should result in a bandwidth usage reduction of
+ ~20%.
+* Replication messages are less frequent, reducing bandwidth by ~20% per node.
+* Bad nodes and nodes with a mismatched protocol are now added to a block list. This reduces the
+ chance of a network interference and the impact of a bad node in the network.
+* For the time being, hole punching has been removed. It was causing handshake time outs, resulting
+ in home nodes being less stable. It will be re-enabled in the future.
+* Wallet password encryption enhances security, and in the case of secret key leakage, prevents
+ unauthorized access.
+* Native Apple Silicon (M-series) binaries have been added to our releases, meaning M-series Mac
+ users do not have to rely on running Intel binaries with Rosetta.
+
+### Merged Pull Requests
+
+2024-07-11 [#1945](https://github.com/maidsafe/safe_network/pull/1945) -- feat: double spend spam protection
+
+2024-07-11 [#1952](https://github.com/maidsafe/safe_network/pull/1952) -- fix(auditor): create auditor directory if it doesn't exist
+
+2024-07-11 [#1951](https://github.com/maidsafe/safe_network/pull/1951) -- test(spend_simulation): add more attacks
+
+2024-07-11 [#1953](https://github.com/maidsafe/safe_network/pull/1953) -- chore/fix(resources): use more portable shebang
+
+2024-07-12 [#1959](https://github.com/maidsafe/safe_network/pull/1959) -- refactor outdated conn removal
+
+2024-07-12 [#1964](https://github.com/maidsafe/safe_network/pull/1964) -- refactor(cli)!: `wallet address` and `wallet create` changes
+
+2024-07-15 [#1946](https://github.com/maidsafe/safe_network/pull/1946) -- docs(sn_client): Basic documentation
+
+2024-07-15 [#1966](https://github.com/maidsafe/safe_network/pull/1966) -- fix(network): do not add bootstrap peer as relay candidate
+
+2024-07-16 [#1969](https://github.com/maidsafe/safe_network/pull/1969) -- chore(network): force close connection if there is a protocol mistmatch
+
+2024-07-16 [#1972](https://github.com/maidsafe/safe_network/pull/1972) -- feat(safenode_rpc_client): added `--version` flag
+
+2024-07-17 [#1973](https://github.com/maidsafe/safe_network/pull/1973) -- Auditor supplement features
+
+2024-07-17 [#1975](https://github.com/maidsafe/safe_network/pull/1975) -- feat(networking): remove self.close_group and checks there as unused
+
+2024-07-18 [#1976](https://github.com/maidsafe/safe_network/pull/1976) -- chore(networking): make ChunkVerification probabalistic
+
+2024-07-18 [#1949](https://github.com/maidsafe/safe_network/pull/1949) -- feat(wallet): wallet secret key file encryption
+
+2024-07-18 [#1977](https://github.com/maidsafe/safe_network/pull/1977) -- Reduce replication msg processing
+
+2024-07-18 [#1983](https://github.com/maidsafe/safe_network/pull/1983) -- fix(node): remove cn from disk and flush to confirmed_spends during forwarding
+
+2024-07-18 [#1980](https://github.com/maidsafe/safe_network/pull/1980) -- feat(networking): add small record cache
+
+2024-07-18 [#1982](https://github.com/maidsafe/safe_network/pull/1982) -- feat(network): implement blocklist behaviour
+
+2024-07-18 [#1984](https://github.com/maidsafe/safe_network/pull/1984) -- chore(node): move sn_client to dev deps
+
+2024-07-18 [#1985](https://github.com/maidsafe/safe_network/pull/1985) -- Fix Nano count disappearing from Launchpad after restart
+
+2024-07-19 [#1971](https://github.com/maidsafe/safe_network/pull/1971) -- feat!: limit error surface
+
+2024-07-19 [#1986](https://github.com/maidsafe/safe_network/pull/1986) -- Add native Apple Silicon binaries to the release artifacts
+
+2024-07-19 [#1955](https://github.com/maidsafe/safe_network/pull/1955) -- feat(networking): relax relay limits
+
+2024-07-24 [#1990](https://github.com/maidsafe/safe_network/pull/1990) -- chore: implement new process in release workflow
+
+### Detailed Changes
+
+#### Network
+
+##### Added
+
+- Protection against an attack allowing bad nodes or clients to shadow a spend (make it disappear)
+ through spamming.
+- Nodes allow more relayed connections through them. Also, home nodes will relay through 4 nodes
+ instead of 2. Without these changes, relays were denying new connections to home nodes, making them
+ difficult to reach.
+- Auditor tracks forwarded payments using the default key.
+- Auditor tracks burnt spend attempts and only credits them once.
+- Auditor collects balance of UTXOs.
+- Added different attack types to the spend simulation test to ensure spend validation is solid.
+- Bad nodes and nodes with a mismatched protocol are now added to a block list. This reduces the
+ chance of a network interference and the impact of a bad node in the network.
+- The introduction of a record-store cache has significantly reduced the node's disk IO. As a side
+ effect, the CPU does less work, and performance improves. RAM usage has increased by around 25MB per
+ node, but we view this as a reasonable trade off.
+
+##### Changed
+
+- For the time being, hole punching has been removed. It was causing handshake time outs, resulting
+ in home nodes being less stable. It will be re-enabled in the future.
+- Force connection closure if a peer is using a different protocol.
+- Reserve trace level logs for tracking event statistics. Now you can use `SN_LOG=v` to get more
+ relevant logs without being overwhelmed by event handling stats.
+- Chunk verification is now probabilistic, which should reduce messaging. In combination with
+ replication messages also being reduced, this should result in a bandwidth usage reduction of
+ ~20%.
+
+##### Fixed
+
+- During payment forwarding, CashNotes are removed from disk and confirmed spends are stored to
+ disk. This is necessary for resolving burnt spend attempts for forwarded payments.
+- Fix a bug where the auditor was not storing data to disk because of a missing directory.
+- Bootstrap peers are not added as relay candidates as we do not want to overwhelm them.
+
+#### Client
+
+##### Added
+
+- Basic global documentation for the `sn_client` crate.
+- Option to encrypt the wallet private key with a password, in a file called
+ `main_secret_key.encrypted`, inside the wallet directory.
+- Option to load a wallet from an encrypted secret-key file using a password.
+- The `wallet create` command provides a `--password` argument to encrypt the wallet.
+- The `wallet create` command provides a `--no-password` argument skip encryption.
+- The `wallet create` command provides a `--no-replace` argument to suppress a prompt to replace an
+ existing wallet.
+- The `wallet create` command provides a `--key` argument to create a wallet from a hex-encoded
+ private key.
+- The `wallet create` command provides a `--derivation` argument to set a derivation passphrase to
+ be used with the mnemonic to create a new private key.
+- A new `wallet encrypt` command encrypts an existing wallet.
+
+##### Changed
+
+- The `wallet address` command no longer creates a new wallet if no wallet exists.
+- The `wallet create` command creates a wallet using the account mnemonic instead of requiring a
+ hex-encoded secret key.
+- The `wallet create` `--key` and `--derivation` arguments are mutually exclusive.
+
+#### Launchpad
+
+##### Fixed
+
+- The `Total Nanos Earned` stat no longer resets on restart.
+
+#### RPC Client
+
+##### Added
+
+- A `--version` argument shows the binary version
+
+#### Other
+
+##### Added
+
+- Native Apple Silicon (M-series) binaries have been added to our releases, meaning M-series Mac
+ users do not have to rely on running Intel binaries with Rosetta.
+
## 2024-07-10
### Binaries
diff --git a/Cargo.lock b/Cargo.lock
index 55e0dd315b..46463d6f33 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3736,7 +3736,6 @@ dependencies = [
"libp2p-autonat",
"libp2p-connection-limits",
"libp2p-core",
- "libp2p-dcutr",
"libp2p-dns",
"libp2p-gossipsub",
"libp2p-identify",
@@ -3833,29 +3832,6 @@ dependencies = [
"void",
]
-[[package]]
-name = "libp2p-dcutr"
-version = "0.11.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a4f7bb7fa2b9e6cad9c30a6f67e3ff5c1e4b658c62b6375e35861a85f9c97bf3"
-dependencies = [
- "asynchronous-codec 0.6.2",
- "either",
- "futures",
- "futures-bounded",
- "futures-timer",
- "instant",
- "libp2p-core",
- "libp2p-identity",
- "libp2p-swarm",
- "lru 0.11.1",
- "quick-protobuf",
- "quick-protobuf-codec 0.2.0",
- "thiserror",
- "tracing",
- "void",
-]
-
[[package]]
name = "libp2p-dns"
version = "0.41.1"
@@ -3917,7 +3893,7 @@ dependencies = [
"libp2p-core",
"libp2p-identity",
"libp2p-swarm",
- "lru 0.12.3",
+ "lru",
"quick-protobuf",
"quick-protobuf-codec 0.3.1",
"smallvec",
@@ -4003,7 +3979,6 @@ dependencies = [
"futures",
"instant",
"libp2p-core",
- "libp2p-dcutr",
"libp2p-identify",
"libp2p-identity",
"libp2p-kad",
@@ -4125,7 +4100,7 @@ dependencies = [
"libp2p-core",
"libp2p-identity",
"libp2p-swarm-derive",
- "lru 0.12.3",
+ "lru",
"multistream-select",
"once_cell",
"rand 0.8.5",
@@ -4297,15 +4272,6 @@ version = "0.4.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
-[[package]]
-name = "lru"
-version = "0.11.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a4a83fb7698b3643a0e34f9ae6f2e8f0178c0fd42f8b59d493aa271ff3a5bf21"
-dependencies = [
- "hashbrown 0.14.5",
-]
-
[[package]]
name = "lru"
version = "0.12.3"
@@ -4559,7 +4525,7 @@ dependencies = [
[[package]]
name = "nat-detection"
-version = "0.2.0"
+version = "0.2.1"
dependencies = [
"clap",
"clap-verbosity-flag",
@@ -4674,7 +4640,7 @@ dependencies = [
[[package]]
name = "node-launchpad"
-version = "0.3.10"
+version = "0.3.11"
dependencies = [
"atty",
"better-panic",
@@ -5998,7 +5964,7 @@ dependencies = [
"compact_str",
"crossterm",
"itertools 0.12.1",
- "lru 0.12.3",
+ "lru",
"paste",
"serde",
"stability",
@@ -6298,6 +6264,17 @@ dependencies = [
"serde_derive",
]
+[[package]]
+name = "rpassword"
+version = "7.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "80472be3c897911d0137b2d2b9055faf6eeac5b14e324073d83bc17b191d7e3f"
+dependencies = [
+ "libc",
+ "rtoolbox",
+ "windows-sys 0.48.0",
+]
+
[[package]]
name = "rtnetlink"
version = "0.10.1"
@@ -6313,6 +6290,16 @@ dependencies = [
"tokio",
]
+[[package]]
+name = "rtoolbox"
+version = "0.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c247d24e63230cdb56463ae328478bd5eac8b8faa8c69461a77e8e323afac90e"
+dependencies = [
+ "libc",
+ "windows-sys 0.48.0",
+]
+
[[package]]
name = "rust-ini"
version = "0.19.0"
@@ -6616,6 +6603,15 @@ dependencies = [
"cc",
]
+[[package]]
+name = "secrecy"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e"
+dependencies = [
+ "zeroize",
+]
+
[[package]]
name = "self_encryption"
version = "0.29.2"
@@ -6917,7 +6913,7 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
[[package]]
name = "sn-node-manager"
-version = "0.10.0"
+version = "0.10.1"
dependencies = [
"assert_cmd",
"assert_fs",
@@ -6979,7 +6975,7 @@ dependencies = [
[[package]]
name = "sn_auditor"
-version = "0.2.2"
+version = "0.2.3"
dependencies = [
"blsttc",
"clap",
@@ -7013,14 +7009,14 @@ dependencies = [
[[package]]
name = "sn_build_info"
-version = "0.1.9"
+version = "0.1.10"
dependencies = [
"vergen",
]
[[package]]
name = "sn_cli"
-version = "0.93.9"
+version = "0.94.0"
dependencies = [
"aes 0.7.5",
"base64 0.22.1",
@@ -7044,6 +7040,7 @@ dependencies = [
"rayon",
"reqwest 0.12.4",
"rmp-serde",
+ "rpassword",
"serde",
"sn_build_info",
"sn_client",
@@ -7061,7 +7058,7 @@ dependencies = [
[[package]]
name = "sn_client"
-version = "0.108.0"
+version = "0.109.0"
dependencies = [
"assert_matches",
"async-trait",
@@ -7146,7 +7143,7 @@ dependencies = [
[[package]]
name = "sn_faucet"
-version = "0.4.30"
+version = "0.4.31"
dependencies = [
"assert_fs",
"base64 0.22.1",
@@ -7178,7 +7175,7 @@ dependencies = [
[[package]]
name = "sn_logging"
-version = "0.2.30"
+version = "0.2.31"
dependencies = [
"chrono",
"color-eyre",
@@ -7203,7 +7200,7 @@ dependencies = [
[[package]]
name = "sn_metrics"
-version = "0.1.10"
+version = "0.1.11"
dependencies = [
"clap",
"color-eyre",
@@ -7217,7 +7214,7 @@ dependencies = [
[[package]]
name = "sn_networking"
-version = "0.17.0"
+version = "0.17.1"
dependencies = [
"aes-gcm-siv",
"async-trait",
@@ -7251,6 +7248,7 @@ dependencies = [
"tokio",
"tracing",
"uuid",
+ "void",
"walkdir",
"wasm-bindgen-futures",
"wasmtimer",
@@ -7259,7 +7257,7 @@ dependencies = [
[[package]]
name = "sn_node"
-version = "0.109.0"
+version = "0.110.0"
dependencies = [
"assert_fs",
"assert_matches",
@@ -7314,7 +7312,7 @@ dependencies = [
[[package]]
name = "sn_node_rpc_client"
-version = "0.6.25"
+version = "0.6.26"
dependencies = [
"assert_fs",
"async-trait",
@@ -7341,7 +7339,7 @@ dependencies = [
[[package]]
name = "sn_peers_acquisition"
-version = "0.4.0"
+version = "0.4.1"
dependencies = [
"clap",
"lazy_static",
@@ -7357,7 +7355,7 @@ dependencies = [
[[package]]
name = "sn_protocol"
-version = "0.17.5"
+version = "0.17.6"
dependencies = [
"blsttc",
"bytes",
@@ -7384,7 +7382,7 @@ dependencies = [
[[package]]
name = "sn_registers"
-version = "0.3.15"
+version = "0.3.16"
dependencies = [
"blsttc",
"crdts",
@@ -7401,7 +7399,7 @@ dependencies = [
[[package]]
name = "sn_service_management"
-version = "0.3.8"
+version = "0.3.9"
dependencies = [
"async-trait",
"dirs-next",
@@ -7427,10 +7425,11 @@ dependencies = [
[[package]]
name = "sn_transfers"
-version = "0.18.8"
+version = "0.18.9"
dependencies = [
"assert_fs",
"blsttc",
+ "chrono",
"criterion",
"custom_debug",
"dirs-next",
@@ -7442,10 +7441,13 @@ dependencies = [
"pprof",
"rand 0.8.5",
"rayon",
+ "ring 0.17.8",
"rmp-serde",
+ "secrecy",
"serde",
"serde_bytes",
"serde_json",
+ "tempfile",
"thiserror",
"tiny-keccak",
"tokio",
@@ -7756,7 +7758,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
[[package]]
name = "test_utils"
-version = "0.4.1"
+version = "0.4.2"
dependencies = [
"color-eyre",
"dirs-next",
@@ -7888,7 +7890,7 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "token_supplies"
-version = "0.1.48"
+version = "0.1.49"
dependencies = [
"dirs-next",
"reqwest 0.11.27",
diff --git a/Justfile b/Justfile
index a6d1e7d24e..450ea0ca71 100644
--- a/Justfile
+++ b/Justfile
@@ -73,6 +73,7 @@ build-release-artifacts arch:
supported_archs=(
"x86_64-pc-windows-msvc"
"x86_64-apple-darwin"
+ "aarch64-apple-darwin"
"x86_64-unknown-linux-musl"
"arm-unknown-linux-musleabi"
"armv7-unknown-linux-musleabihf"
@@ -106,12 +107,7 @@ build-release-artifacts arch:
mkdir artifacts
cargo clean
- echo "Using the keys: GENESIS_PK=$GENESIS_PK, FOUNDATION_PK=$FOUNDATION_PK, NETWORK_ROYALTIES_PK=$NETWORK_ROYALTIES_PK, PAYMENT_FORWARD_PK=$PAYMENT_FORWARD_PK"
cross_container_opts="--env \"GENESIS_PK=$GENESIS_PK\" --env \"GENESIS_SK=$GENESIS_SK\" --env \"FOUNDATION_PK=$FOUNDATION_PK\" --env \"NETWORK_ROYALTIES_PK=$NETWORK_ROYALTIES_PK\" --env \"PAYMENT_FORWARD_PK=$PAYMENT_FORWARD_PK\""
- if [[ -n "${NETWORK_VERSION_MODE+x}" ]]; then
- echo "The NETWORK_VERSION_MODE variable is set to $NETWORK_VERSION_MODE"
- cross_container_opts="$cross_container_opts --env NETWORK_VERSION_MODE=$NETWORK_VERSION_MODE"
- fi
export CROSS_CONTAINER_OPTS=$cross_container_opts
if [[ $arch == arm* || $arch == armv7* || $arch == aarch64* ]]; then
@@ -152,6 +148,7 @@ make-artifacts-directory:
architectures=(
"x86_64-pc-windows-msvc"
"x86_64-apple-darwin"
+ "aarch64-apple-darwin"
"x86_64-unknown-linux-musl"
"arm-unknown-linux-musleabi"
"armv7-unknown-linux-musleabihf"
@@ -164,13 +161,27 @@ make-artifacts-directory:
rm safe_network-$arch.zip
done
-package-release-assets bin version="":
+package-all-bins:
+ #!/usr/bin/env bash
+ set -e
+ just package-bin "faucet"
+ just package-bin "nat-detection"
+ just package-bin "node-launchpad"
+ just package-bin "safe"
+ just package-bin "safenode"
+ just package-bin "safenode_rpc_client"
+ just package-bin "safenode-manager"
+ just package-bin "safenodemand"
+ just package-bin "sn_auditor"
+
+package-bin bin version="":
#!/usr/bin/env bash
set -e
architectures=(
"x86_64-pc-windows-msvc"
"x86_64-apple-darwin"
+ "aarch64-apple-darwin"
"x86_64-unknown-linux-musl"
"arm-unknown-linux-musleabi"
"armv7-unknown-linux-musleabihf"
@@ -222,7 +233,6 @@ package-release-assets bin version="":
sn_auditor)
crate_dir_name="sn_auditor"
;;
-
*)
echo "The $bin binary is not supported"
exit 1
@@ -241,7 +251,7 @@ package-release-assets bin version="":
exit 1
fi
- rm -rf deploy/$bin
+ rm -rf packaged_bins/$bin
find artifacts/ -name "$bin" -exec chmod +x '{}' \;
for arch in "${architectures[@]}" ; do
echo "Packaging for $arch..."
@@ -250,95 +260,30 @@ package-release-assets bin version="":
tar -C artifacts/$arch/release -zcvf $bin-$version-$arch.tar.gz $bin_name
done
- mkdir -p deploy/$bin
- mv *.tar.gz deploy/$bin
- mv *.zip deploy/$bin
+ mkdir -p packaged_bins/$bin
+ mv *.tar.gz packaged_bins/$bin
+ mv *.zip packaged_bins/$bin
-upload-github-release-assets:
+upload-all-packaged-bins-to-s3:
#!/usr/bin/env bash
set -e
- binary_crates=(
- "sn_faucet"
- "node-launchpad"
- "sn_cli"
- "sn_node"
- "sn-node-manager"
- "sn_node_rpc_client"
- "sn_auditor"
- "nat-detection"
+ binaries=(
+ faucet
+ nat-detection
+ node-launchpad
+ safe
+ safenode
+ safenode-manager
+ safenode_rpc_client
+ safenodemand
+ sn_auditor
)
-
- commit_msg=$(git log -1 --pretty=%B)
- commit_msg=${commit_msg#*: } # Remove 'chore(release): ' prefix
-
- IFS='/' read -ra crates_with_versions <<< "$commit_msg"
- declare -a crate_names
- for crate_with_version in "${crates_with_versions[@]}"; do
- crate=$(echo "$crate_with_version" | awk -F'-v' '{print $1}')
- crates+=("$crate")
+ for binary in "${binaries[@]}"; do
+ just upload-packaged-bin-to-s3 "$binary"
done
- for crate in "${crates[@]}"; do
- for binary_crate in "${binary_crates[@]}"; do
- if [[ "$crate" == "$binary_crate" ]]; then
- case "$crate" in
- sn_faucet)
- bin_name="faucet"
- bucket="sn-faucet"
- ;;
- node-launchpad)
- bin_name="node-launchpad"
- bucket="node-launchpad"
- ;;
- sn_cli)
- bin_name="safe"
- bucket="sn-cli"
- ;;
- sn_node)
- bin_name="safenode"
- bucket="sn-node"
- ;;
- sn-node-manager)
- bin_name="safenode-manager"
- bucket="sn-node-manager"
- ;;
- sn_node_rpc_client)
- bin_name="safenode_rpc_client"
- bucket="sn-node-rpc-client"
- ;;
- sn_auditor)
- bin_name="sn_auditor"
- bucket="sn-auditor"
- ;;
- nat-detection)
- bin_name="nat-detection"
- bucket="nat-detection"
- ;;
- *)
- echo "The $crate crate is not supported"
- exit 1
- ;;
- esac
- # The crate_with_version variable will correspond to the tag name of the release.
- # However, only binary crates have releases, so we need to skip any tags that don't
- # correspond to a binary.
- for crate_with_version in "${crates_with_versions[@]}"; do
- if [[ $crate_with_version == $crate-v* ]]; then
- (
- cd deploy/$bin_name
- if [[ "$crate" == "node-launchpad" || "$crate" == "sn_cli" || "$crate" == "sn_node" || "$crate" == "sn-node-manager" || "$crate" == "sn_auditor" ]]; then
- echo "Uploading $bin_name assets to $crate_with_version release..."
- ls | xargs gh release upload $crate_with_version --repo {{release_repo}}
- fi
- )
- fi
- done
- fi
- done
- done
-
-upload-release-assets-to-s3 bin_name:
+upload-packaged-bin-to-s3 bin_name:
#!/usr/bin/env bash
set -e
@@ -376,18 +321,85 @@ upload-release-assets-to-s3 bin_name:
;;
esac
- cd deploy/{{bin_name}}
+ cd packaged_bins/{{bin_name}}
for file in *.zip *.tar.gz; do
dest="s3://$bucket/$file"
- if aws s3 ls "$dest" > /dev/null 2>&1; then
- echo "$dest already exists. This suggests an error somewhere."
- echo "If you intentionally want to overwrite, remove the file and run the workflow again."
- exit 1
- else
+ if [[ "$file" == *latest* ]]; then
+ echo "Allowing overwrite for 'latest' version..."
aws s3 cp "$file" "$dest" --acl public-read
+ else
+ if aws s3 ls "$dest" > /dev/null 2>&1; then
+ echo "$dest already exists. Will not overwrite."
+ else
+ # This command outputs a lot text which makes the build log difficult to read, so we will
+ # suppress it.
+ aws s3 cp "$file" "$dest" --acl public-read > /dev/null 2>&1
+ echo "$dest uploaded"
+ fi
fi
done
+package-all-architectures:
+ #!/usr/bin/env bash
+ set -e
+
+ architectures=(
+ "x86_64-pc-windows-msvc"
+ "x86_64-apple-darwin"
+ "aarch64-apple-darwin"
+ "x86_64-unknown-linux-musl"
+ "arm-unknown-linux-musleabi"
+ "armv7-unknown-linux-musleabihf"
+ "aarch64-unknown-linux-musl"
+ )
+
+ rm -rf packaged_architectures
+ for arch in "${architectures[@]}" ; do
+ echo "Packaging artifacts for $arch..."
+ just package-arch "$arch"
+ done
+
+package-arch arch:
+ #!/usr/bin/env bash
+ set -e
+
+ if [[ -n $PACKAGE_VERSION ]]; then
+ version="$PACKAGE_VERSION"
+ else
+ current_date=$(date +%Y.%m)
+ release_cycle=$(grep 'release-cycle:' release-cycle-info | awk '{print $2}')
+ release_cycle_counter=$(grep 'release-cycle-counter:' release-cycle-info | awk '{print $2}')
+ version="$current_date.$release_cycle.$release_cycle_counter"
+ fi
+ architecture="{{arch}}"
+ zip_filename="${version}.autonomi.${architecture}.zip"
+
+ mkdir -p packaged_architectures
+ cd artifacts/$architecture/release
+
+ binaries=(
+ faucet
+ nat-detection
+ node-launchpad
+ safe
+ safenode
+ safenode-manager
+ safenode_rpc_client
+ safenodemand
+ sn_auditor
+ )
+
+ if [[ "$architecture" == *"windows"* ]]; then
+ for binary in "${binaries[@]}"; do
+ binaries_with_extension+=("$binary.exe")
+ done
+ zip "../../../packaged_architectures/$zip_filename" "${binaries_with_extension[@]}"
+ else
+ zip "../../../packaged_architectures/$zip_filename" "${binaries[@]}"
+ fi
+
+ cd ../../..
+
node-man-integration-tests:
#!/usr/bin/env bash
set -e
diff --git a/README.md b/README.md
index 583f1f7452..3ca059fc27 100644
--- a/README.md
+++ b/README.md
@@ -24,18 +24,23 @@ Libp2p.
### For Developers
-At build time the following env vars can be set to override default keys (** and must be set during the release process to override the default keys**. Github Secrets can be used to set these values for the release process):
+At build time the following env vars can be set to override default keys (** and must be set during the release process
+to override the default keys**. Github Secrets can be used to set these values for the release process):
- `GENESIS_PK` - The genesis spend public key to use for genesis verification.
-- `GENESIS_SK` - If building the faucet for the genesis spend, this is the secret key to use for genesis verification. This should be kept secret.
+- `GENESIS_SK` - If building the faucet for the genesis spend, this is the secret key to use for genesis verification.
+ This should be kept secret.
- `FOUNDATION_PK` - The foundation public key to use for the initial disbursement to the foundation.
- `NETWORK_ROYALTIES_PK` - The foundation public key to use for receiving network royalties.
- `PAYMENT_FORWARD_PK` - The public key to use for payment forwarding for the beta network collection.
When you start a network there are a few scripts to aid with basic processes:
-- `resources/scripts/claim-genesis.sh` which will claim the genesis tokens for a wallet on a launched network (if you have set up the foundation wallet locally by adding a `client/account_secret` and regenerating the wallet or directly adding the `client/wallet/main_secret_key` itself).
-- `resources/scripts/make-wallets.sh` which if you have a wallet with a balance will create a number of wallets with another balance. eg `resources/scripts/make-wallets.sh 5 1` will make 5 wallets with 1 token.
+- `resources/scripts/claim-genesis.sh` which will claim the genesis tokens for a wallet on a launched network (if you
+ have set up the foundation wallet locally by adding a `client/account_secret` and regenerating the wallet or directly
+ adding the `client/wallet/main_secret_key` itself).
+- `resources/scripts/make-wallets.sh` which if you have a wallet with a balance will create a number of wallets with
+ another balance. eg `resources/scripts/make-wallets.sh 5 1` will make 5 wallets with 1 token.
- `resources/scripts/upload-random-data` will use the existing `client` to upload random data to the network.
- [Client](https://github.com/maidsafe/safe_network/blob/main/sn_client/README.md) The client APIs
@@ -254,7 +259,7 @@ Steps on the offline device/computer with the corresponding hot-wallet:
5. If you still don't have a hot-wallet created, which owns the cash-notes used to build the
unsigned transaction, create it with the corresponding secret key:
- `cargo run --release --bin safe -- wallet create `
+ `cargo run --release --bin safe -- wallet create --key `
6. Use the hot-wallet to sign the built transaction:
`cargo run --release --bin safe -- wallet sign `
@@ -527,7 +532,8 @@ metrics.
## Contributing
-Feel free to clone and modify this project. Pull requests are welcome.
You can also visit \* \*[The MaidSafe Forum](https://safenetforum.org/)\*\* for discussion or if you would like to join our
+Feel free to clone and modify this project. Pull requests are welcome.
You can also
+visit \* \*[The MaidSafe Forum](https://safenetforum.org/)\*\* for discussion or if you would like to join our
online community.
### Pull Request Process
diff --git a/nat-detection/Cargo.toml b/nat-detection/Cargo.toml
index 3eafb22135..35c4cfab43 100644
--- a/nat-detection/Cargo.toml
+++ b/nat-detection/Cargo.toml
@@ -7,7 +7,7 @@ license = "GPL-3.0"
name = "nat-detection"
readme = "README.md"
repository = "https://github.com/maidsafe/safe_network"
-version = "0.2.0"
+version = "0.2.1"
[[bin]]
name = "nat-detection"
@@ -28,7 +28,7 @@ libp2p = { version = "0.53", features = [
"macros",
"upnp",
] }
-sn_networking = { path = "../sn_networking", version = "0.17.0" }
+sn_networking = { path = "../sn_networking", version = "0.17.1" }
tokio = { version = "1.32.0", features = ["full"] }
tracing = { version = "~0.1.26" }
tracing-log = "0.2.0"
diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml
index e3376cad89..0b42b55a3a 100644
--- a/node-launchpad/Cargo.toml
+++ b/node-launchpad/Cargo.toml
@@ -2,7 +2,7 @@
authors = ["MaidSafe Developers "]
description = "Node Launchpad"
name = "node-launchpad"
-version = "0.3.10"
+version = "0.3.11"
edition = "2021"
license = "GPL-3.0"
homepage = "https://maidsafe.net"
@@ -49,10 +49,10 @@ reqwest = { version = "0.12.2", default-features = false, features = [
serde = { version = "1.0.188", features = ["derive"] }
serde_json = "1.0.107"
signal-hook = "0.3.17"
-sn-node-manager = { version = "0.10.0", path = "../sn_node_manager" }
-sn_peers_acquisition = { version = "0.4.0", path = "../sn_peers_acquisition" }
+sn-node-manager = { version = "0.10.1", path = "../sn_node_manager" }
+sn_peers_acquisition = { version = "0.4.1", path = "../sn_peers_acquisition" }
sn-releases = "~0.2.6"
-sn_service_management = { version = "0.3.8", path = "../sn_service_management" }
+sn_service_management = { version = "0.3.9", path = "../sn_service_management" }
strip-ansi-escapes = "0.2.0"
strum = { version = "0.26.1", features = ["derive"] }
sysinfo = "0.30.12"
diff --git a/release-cycle-info b/release-cycle-info
new file mode 100644
index 0000000000..14b23f7ad5
--- /dev/null
+++ b/release-cycle-info
@@ -0,0 +1,10 @@
+# The release-cycle is the cycle within the current month. It will be 1 or 2. It is set at the
+# beginning of the cycle.
+#
+# The release-cycle-counter is initially set to 1 at the beginning of each cycle, and during the
+# cycle, it will be incremented for each RC build.
+#
+# Both of these numbers are used in the packaged version number, which is a collective version
+# number for all the released binaries.
+release-cycle: 1
+release-cycle-counter: 1
diff --git a/release-plz.toml b/release-plz.toml
index 41d3c9b000..e896f4f03c 100644
--- a/release-plz.toml
+++ b/release-plz.toml
@@ -1,143 +1,8 @@
[workspace]
-publish = false
-git_release_draft = true
-git_tag_enable = false
-allow_dirty = false
-changelog_update = true
-dependencies_update = false
-git_release_enable = true
-publish_allow_dirty = false
-semver_check = false
-git_release_type = "auto"
-release = false
-
-[[package]]
-name = "sn_auditor"
-changelog_update = true
-git_release_enable = true
-release = true
-
-[[package]]
-name = "sn_build_info"
-changelog_update = true
-git_release_enable = false
-release = true
-
-[[package]]
-name = "sn_cli"
-release = true
-changelog_update = true
-changelog_include = [
- "sn_client",
- "sn_networking",
- "sn_transfers",
- "sn_registers",
- "sn_peers_acquisition",
- "sn_protocol",
-]
-
-[[package]]
-name = "sn_client"
-release = true
-changelog_update = true
-git_release_enable = false
-changelog_include = [
- "sn_networking",
- "sn_transfers",
- "sn_registers",
- "sn_peers_acquisition",
- "sn_protocol",
-]
-
-[[package]]
-name = "sn_faucet"
-release = true
-changelog_update = true
-git_release_enable = false
-
-[[package]]
-name = "sn_logging"
-release = true
-changelog_update = true
-git_release_enable = false
-
-[[package]]
-name = "sn_metrics"
-release = true
-changelog_update = true
-git_release_enable = false
-
-
-[[package]]
-name = "sn_networking"
-release = true
-changelog_update = true
-git_release_enable = false
-
-[[package]]
-name = "sn_node"
-release = true
-changelog_update = true
-changelog_include = [
- "sn_networking",
- "sn_transfers",
- "sn_registers",
- "sn_peers_acquisition",
- "sn_protocol",
-]
-
-[[package]]
-name = "sn-node-manager"
-release = true
-changelog_update = true
-changelog_include = [
- "sn_node",
- "sn_peers_acquisition",
- "sn_protocol",
- "sn_service_management",
- "sn_transfers",
-]
-
-
-[[package]]
-name = "node-launchpad"
-release = true
-changelog_update = true
-git_release_enable = true
-changelog_include = ["sn_node", "sn_protocol", "sn-node-manager"]
-
-[[package]]
-name = "sn_node_rpc_client"
-release = true
-changelog_update = true
-git_release_enable = false
-
-[[package]]
-name = "sn_peers_acquisition"
-release = true
-changelog_update = true
-git_release_enable = false
-
-[[package]]
-name = "sn_protocol"
-release = true
-changelog_update = true
-git_release_enable = false
-
-[[package]]
-name = "sn_registers"
-release = true
-changelog_update = true
-git_release_enable = false
-
-[[package]]
-name = "sn_service_management"
-release = true
-changelog_update = true
+changelog_update = false
git_release_enable = false
+semver_check = false
[[package]]
-name = "sn_transfers"
-release = true
-changelog_update = true
-git_release_enable = false
+name = "test_utils"
+release = false
diff --git a/resources/rc_template.md b/resources/rc_template.md
new file mode 100644
index 0000000000..9ad5f0f5b9
--- /dev/null
+++ b/resources/rc_template.md
@@ -0,0 +1,108 @@
+# Release Candidate YYYY.MM.X.Y
+
+*PLEASE DO NOT EDIT THIS POST.*
+
+It should only be edited by the RC owner, i.e., the original poster.
+
+## Binary Versions
+
+* `faucet` __REPLACE__
+* `nat-detection` __REPLACE__
+* `node-launchpad` __REPLACE__
+* `safe` __REPLACE__
+* `safenode` __REPLACE__
+* `safenode-manager` __REPLACE__
+* `safenodemand` __REPLACE__
+* `sn_auditor` __REPLACE__
+* `safenode_rpc_client` __REPLACE__
+
+## Closed Pull Requests
+
+Replace this with the list of closed PRs since the last version. This is intended to help developers
+with their contributions to the changelog.
+
+## Changelog/Testing Contributions
+
+Please reply with a description of any contributions you made that will be included in this release.
+The list of closed PRs is provided for reference. You can also provide direction or suggestions as
+to how we could test your contributions with community participation.
+
+Use this checklist to track the changelog contributions that are needed.
+
+*Remove people who didn't close any PRs during this cycle.*
+
+You will be ticked off when your reply is provided:
+
+- [] Anselme
+- [] Benno
+- [] Chris
+- [] Josh
+- [] Mazzi
+- [] Mick
+- [] Qi
+- [] Roland
+
+## Contribution Template
+
+To provide your contributions for the changelog, the template below can be used:
+```
+### Network
+
+#### Added
+
+- Provide any added entries or remove the section if it doesn't apply
+
+#### Changed
+
+- Provide any changed entries or remove the section if it doesn't apply
+
+#### Fixed
+
+- Provide any fixed entries or remove the section if it doesn't apply
+
+### Client
+
+#### Added
+
+- Provide any added entries or remove the section if it doesn't apply
+
+#### Changed
+
+- Provide any changed entries or remove the section if it doesn't apply
+
+#### Fixed
+
+- Provide any fixed entries or remove the section if it doesn't apply
+### Node Manager
+
+#### Added
+
+- Provide any added entries or remove the section if it doesn't apply
+
+#### Changed
+
+- Provide any changed entries or remove the section if it doesn't apply
+
+#### Fixed
+
+- Provide any fixed entries or remove the section if it doesn't apply
+
+### Launchpad
+
+Remove whole section if it does not apply.
+
+#### Added
+
+- Provide any added entries or remove the section if it doesn't apply
+
+#### Changed
+
+- Provide any changed entries or remove the section if it doesn't apply
+
+#### Fixed
+
+- Provide any fixed entries or remove the section if it doesn't apply
+```
+
+If you have any suggestions for testing your contributions with the community, please add them to
+your reply, or provide them as a separate reply in the thread.
diff --git a/resources/scripts/bump_version.sh b/resources/scripts/bump_version.sh
deleted file mode 100755
index d10b2db7ba..0000000000
--- a/resources/scripts/bump_version.sh
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-# Suffix to append to the version. Passed as an argument to this script.
-SUFFIX="$1"
-
-# Ensure cargo set-version is installed
-if ! cargo set-version --help > /dev/null 2>&1; then
- echo "cargo set-version command not found."
- echo "Please install cargo-edit with the command: cargo install cargo-edit --features vendored-openssl"
- exit 1
-fi
-
-release-plz update 2>&1 | tee bump_version_output
-
-crates_bumped=()
-while IFS= read -r line; do
- name=$(echo "$line" | awk -F"\`" '{print $2}')
- version=$(echo "$line" | awk -F"-> " '{print $2}')
- crates_bumped+=("${name}-v${version}")
-done < <(cat bump_version_output | grep "^\*")
-
-
-if [[ -z "$SUFFIX" ]]; then
- echo "Removing any existing suffixes and bumping versions to stable."
- for crate in $(cargo metadata --no-deps --format-version 1 | jq -r '.packages[] | .name'); do
- version=$(cargo metadata --no-deps --format-version 1 | jq -r --arg crate_name "$crate" '.packages[] | select(.name==$crate_name) | .version')
- new_version=$(echo "$version" | sed -E 's/(-alpha\.[0-9]+|-beta\.[0-9]+)$//')
- if [[ "$version" != "$new_version" ]]; then
- echo "Removing suffix from $crate, setting version to $new_version"
- cargo set-version -p $crate $new_version
- crates_bumped+=("${crate}-v${new_version}")
- fi
- done
-fi
-
-
-if [[ -n "$SUFFIX" ]]; then
- echo "We are releasing to the $SUFFIX channel"
- echo "Versions with $SUFFIX are not supported by release-plz"
- echo "Reverting changes by release-plz"
- git checkout -- .
-fi
-
-commit_message="chore(release): "
-for crate in "${crates_bumped[@]}"; do
- # Extract the crate name and version in a cross-platform way
- crate_name=$(echo "$crate" | sed -E 's/-v.*$//')
- version=$(echo "$crate" | sed -E 's/^.*-v(.*)$/\1/')
- new_version=$version
-
- echo "----------------------------------------------------------"
- echo "Processing $crate_name"
- echo "----------------------------------------------------------"
- if [[ -n "$SUFFIX" ]]; then
- # if we're already in a release channel, reapplying the suffix will reset things.
- if [[ "$version" == *"-alpha."* || "$version" == *"-beta."* ]]; then
- base_version=$(echo "$version" | sed -E 's/(-alpha\.[0-9]+|-beta\.[0-9]+)$//')
- pre_release_identifier=$(echo "$version" | sed -E 's/.*-(alpha|beta)\.([0-9]+)$/\2/')
- new_version="${base_version}-${SUFFIX}.$pre_release_identifier"
- else
- new_version="${version}-${SUFFIX}.0"
- fi
- else
- # For main release, strip any alpha or beta suffix from the version
- new_version=$(echo "$version" | sed -E 's/(-alpha\.[0-9]+|-beta\.[0-9]+)$//')
- fi
-
- echo "Using set-version to apply $new_version to $crate_name"
- cargo set-version -p $crate_name $new_version
- commit_message="${commit_message}${crate_name}-v$new_version/" # append crate to commit message
-done
-commit_message=${commit_message%/} # strip off trailing '/' character
-
-git add --all
-git commit -m "$commit_message"
-echo "Generated release commit: $commit_message"
diff --git a/resources/scripts/bump_version_for_rc.sh b/resources/scripts/bump_version_for_rc.sh
new file mode 100755
index 0000000000..655345e199
--- /dev/null
+++ b/resources/scripts/bump_version_for_rc.sh
@@ -0,0 +1,91 @@
+#!/usr/bin/env bash
+
+set -e
+
+# This script must run from the root of the repository.
+
+# This allows for, e.g., "alpha" to be passed when calling the script.
+pre_release_identifer=${1:-"rc"}
+
+all_crates=($(awk '/members = \[/{flag=1; next} /\]/{flag=0} flag {gsub(/[",]/, ""); print $0}' \
+ Cargo.toml))
+
+if ! cargo set-version --help > /dev/null 2>&1; then
+ echo "cargo set-version not found"
+ echo "Please install cargo-edit: cargo install cargo-edit --features vendored-openssl"
+ exit 1
+fi
+
+declare -A crates_bumped
+crates_bumped_with_version=()
+
+release-plz update 2>&1 | tee bump_version_output
+
+while IFS= read -r line; do
+ # Sometimes this list can include crates that were not bumped. The presence of "->" indicates
+ # whether a bump occurred.
+ if [[ "$line" == *"->"* ]]; then
+ name=$(echo "$line" | awk -F"\`" '{print $2}')
+ version=$(echo "$line" | awk -F"-> " '{print $2}')
+ crates_bumped["$name"]=1
+ crates_bumped_with_version+=("${name}-v${version}")
+ fi
+done < <(cat bump_version_output | grep "^\*")
+
+# The bumps performed by release-plz need to be reverted, because going to an `rc` pre-release
+# specifier is considered a downgrade, so `set-version` won't do it. We will take the bumps that
+# release-plz provided and use `set-version` to put the `rc` specifier on them.
+git checkout -- .
+
+for crate in "${crates_bumped_with_version[@]}"; do
+ name=$(echo "$crate" | sed -E 's/-v.*$//')
+ version=$(echo "$crate" | sed -E 's/^.*-v(.*)$/\1/')
+ new_version="${version}-${pre_release_identifer}.1"
+ echo "Setting $crate to $new_version"
+ cargo set-version --package $name $new_version
+done
+
+echo "Now performing safety bumps for any crates not bumped by release-plz..."
+for crate in "${all_crates[@]}"; do
+ # The node manager is an annoying special case where the directory and crate name don't match.
+ if [[ $crate == "sn_node_manager" ]]; then
+ crate="sn-node-manager"
+ fi
+
+ if [[ -z "${crates_bumped[$crate]}" ]]; then
+ echo "==============================="
+ echo " Safety bump for $crate"
+ echo "==============================="
+ echo "release-plz did not bump $crate"
+ version=$(grep "^version" < $crate/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')
+ echo "Current version is $version"
+
+ IFS='.' read -r major minor patch <<< "$version"
+ patch=$((patch + 1))
+ new_version="${major}.${minor}.${patch}-${pre_release_identifer}.1"
+
+ echo "Safety bump to $new_version"
+ cargo set-version --package $crate $new_version
+ fi
+done
+
+echo "======================"
+echo " New Crate Versions "
+echo "======================"
+for crate in "${all_crates[@]}"; do
+ version=$(grep "^version" < $crate/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')
+ echo "$crate: $version"
+done
+
+echo "======================="
+echo " New Binary Versions "
+echo "======================="
+echo "faucet: $(grep "^version" < sn_faucet/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')"
+echo "nat-detection: $(grep "^version" < nat-detection/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')"
+echo "node-launchpad: $(grep "^version" < node-launchpad/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')"
+echo "safe: $(grep "^version" < sn_cli/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')"
+echo "safenode: $(grep "^version" < sn_node/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')"
+echo "safenode-manager: $(grep "^version" < sn_node_manager/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')"
+echo "safenode_rpc_client: $(grep "^version" < sn_node_rpc_client/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')"
+echo "safenodemand: $(grep "^version" < sn_node_manager/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')"
+echo "sn_auditor: $(grep "^version" < sn_auditor/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')"
diff --git a/resources/scripts/dag-user-comparator.sh b/resources/scripts/dag-user-comparator.sh
index c201032abd..211ba462c9 100755
--- a/resources/scripts/dag-user-comparator.sh
+++ b/resources/scripts/dag-user-comparator.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# Check if the correct number of arguments is provided
if [ "$#" -ne 2 ]; then
diff --git a/resources/scripts/dag-user-sync.sh b/resources/scripts/dag-user-sync.sh
index 56c44c6faf..9f54af84ca 100755
--- a/resources/scripts/dag-user-sync.sh
+++ b/resources/scripts/dag-user-sync.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# Check if the correct number of arguments is provided
if [ "$#" -ne 2 ]; then
diff --git a/resources/scripts/list-safe-network-closed-prs.py b/resources/scripts/list-safe-network-closed-prs.py
new file mode 100755
index 0000000000..6355703c43
--- /dev/null
+++ b/resources/scripts/list-safe-network-closed-prs.py
@@ -0,0 +1,111 @@
+#!/usr/bin/env python
+
+import os
+import sys
+from collections import defaultdict
+from github import Github
+
+def has_breaking_change(commits):
+ for commit in commits:
+ commit_message = commit.commit.message
+ if '!' in commit_message.split('\n')[0] or 'BREAKING CHANGE' in commit_message:
+ return True
+ return False
+
+
+def main(last_released_pr_number):
+ token = os.getenv("GITHUB_PAT_SAFE_NETWORK_PR_LIST")
+ if not token:
+ raise Exception("The GITHUB_PAT_SAFE_NETWORK_PR_LIST environment variable must be set")
+
+ g = Github(token)
+ repo = g.get_repo("maidsafe/safe_network")
+
+ last_released_pr = repo.get_pull(last_released_pr_number)
+ if not last_released_pr:
+ raise Exception(f"Could not retrieve PR #{last_released_pr_number}")
+ last_release_date = last_released_pr.closed_at
+ if not last_release_date:
+ raise Exception(f"PR #{last_released_pr_number} has not been merged")
+
+ print("Base comparison PR:")
+ print(f"#{last_released_pr.number}: {last_released_pr.title} closed at {last_released_pr.closed_at}")
+ print()
+
+ pulls = repo.get_pulls(state="closed", sort="updated", direction="desc")
+ filtered_pulls = []
+ for pr in pulls:
+ if not pr.closed_at:
+ print(f"PR {pr.number} is not closed yet")
+ continue
+ print(f"Processing PR {pr.number}...")
+ if pr.closed_at <= last_release_date:
+ break
+ if pr.merged_at:
+ commits = pr.get_commits()
+ breaking = has_breaking_change(commits)
+ filtered_pulls.append({
+ "number": pr.number,
+ "title": pr.title,
+ "author": pr.user.login,
+ "closed_at": pr.closed_at,
+ "breaking": breaking,
+ "commits": commits
+ })
+ filtered_pulls.sort(key=lambda pr: pr["closed_at"])
+
+ print("Flat list:")
+ for pr in filtered_pulls:
+ closed_date = pr["closed_at"].date()
+ breaking_text = "[BREAKING]" if pr["breaking"] else ""
+ print(f"{closed_date} #{pr['number']} -- {pr['title']} [@{pr['author']}] {breaking_text}")
+ print("Flat list markdown:")
+ for pr in filtered_pulls:
+ pr_number = pr["number"]
+ closed_date = pr["closed_at"].date()
+ breaking_text = "[BREAKING]" if pr["breaking"] else ""
+ print(f"{closed_date} [#{pr_number}](https://github.com/maidsafe/safe_network/pull/{pr_number}) -- {pr['title']} [@{pr['author']}] {breaking_text}")
+
+ print()
+ grouped_pulls = defaultdict(list)
+ for pr in filtered_pulls:
+ grouped_pulls[pr["author"]].append(pr)
+
+ print("Grouped by author:")
+ for author, prs in grouped_pulls.items():
+ print(f"@{author}")
+ for pr in prs:
+ closed_date = pr["closed_at"].date()
+ breaking_text = "[BREAKING]" if pr["breaking"] else ""
+ print(f" {closed_date} #{pr['number']} -- {pr['title']} {breaking_text}")
+ print()
+
+ print("Grouped by author with commits:")
+ for author, prs in grouped_pulls.items():
+ print(f"@{author}")
+ for pr in prs:
+ closed_date = pr["closed_at"].date()
+ breaking_text = "[BREAKING]" if pr["breaking"] else ""
+ print(f" {closed_date} #{pr['number']} -- {pr['title']} {breaking_text}")
+ for commit in pr["commits"]:
+ print(f" {commit.commit.message.split('\n')[0]}")
+ print()
+
+ print("Grouped by author markdown:")
+ for author, prs in grouped_pulls.items():
+ print(f"@{author}")
+ for pr in prs:
+ pr_number = pr["number"]
+ closed_date = pr["closed_at"].date()
+ breaking_text = "[BREAKING]" if pr["breaking"] else ""
+ print(f" {closed_date} [#{pr_number}](https://github.com/maidsafe/safe_network/pull/{pr_number}) -- {pr['title']} {breaking_text}")
+ print()
+
+
+if __name__ == "__main__":
+ if len(sys.argv) != 2:
+ print("Usage: python script.py ")
+ sys.exit(1)
+
+ last_release_pr_number = int(sys.argv[1])
+ main(last_release_pr_number)
diff --git a/resources/scripts/make-wallets.sh b/resources/scripts/make-wallets.sh
index aa80a21f1f..b886fcc7a7 100755
--- a/resources/scripts/make-wallets.sh
+++ b/resources/scripts/make-wallets.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# Function to print a message in a box
print_in_box() {
diff --git a/resources/scripts/print-versions.sh b/resources/scripts/print-versions.sh
new file mode 100755
index 0000000000..b2a75fdb49
--- /dev/null
+++ b/resources/scripts/print-versions.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+
+set -e
+
+all_crates=($(awk '/members = \[/{flag=1; next} /\]/{flag=0} flag {gsub(/[",]/, ""); print $0}' \
+ Cargo.toml))
+
+echo "=================="
+echo " Crate Versions "
+echo "=================="
+for crate in "${all_crates[@]}"; do
+ version=$(grep "^version" < $crate/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')
+ echo "$crate: $version"
+done
+
+echo "==================="
+echo " Binary Versions "
+echo "==================="
+echo "faucet: $(grep "^version" < sn_faucet/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')"
+echo "nat-detection: $(grep "^version" < nat-detection/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')"
+echo "node-launchpad: $(grep "^version" < node-launchpad/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')"
+echo "safe: $(grep "^version" < sn_cli/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')"
+echo "safenode: $(grep "^version" < sn_node/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')"
+echo "safenode-manager: $(grep "^version" < sn_node_manager/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')"
+echo "safenode_rpc_client: $(grep "^version" < sn_node_rpc_client/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')"
+echo "safenodemand: $(grep "^version" < sn_node_manager/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')"
+echo "sn_auditor: $(grep "^version" < sn_auditor/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')"
diff --git a/resources/scripts/set-release-channel.sh b/resources/scripts/set-release-channel.sh
deleted file mode 100755
index fe7ac4252e..0000000000
--- a/resources/scripts/set-release-channel.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/bash
-
-# Define the workspace Cargo.toml location (ensure you're in the workspace root)
-WORKSPACE_CARGO_TOML="./Cargo.toml"
-
-# Suffix to append to the version. Passed as an argument to this script.
-SUFFIX="$1"
-
-# Ensure the suffix starts with a dash if it's provided and not empty
-if [ -n "$SUFFIX" ] && [[ "$SUFFIX" != -* ]]; then
- SUFFIX="-$SUFFIX"
-fi
-
-# Check if jq is installed
-if ! command -v jq > /dev/null 2>&1; then
- echo "jq is not installed. Please install jq to continue."
- exit 1
-fi
-
-
-# Check if the 'cargo set-version' command is available
-if ! cargo set-version --help > /dev/null 2>&1; then
- echo "cargo set-version command not found."
- echo "Please install cargo-edit with the command: cargo install cargo-edit --features vendored-openssl"
- exit 1
-fi
-
-# Function to update version for a single crate with suffix
-update_version_with_suffix() {
- local crate=$1
- local suffix=$2
- local current_version=$(cargo metadata --no-deps --format-version 1 | jq -r ".packages[] | select(.name == \"$crate\") | .version")
- # Perform the dry run to get the upgrade message
- local dry_run_output=$(cargo set-version -p $crate --bump patch --dry-run 2>&1)
- # Use grep and awk to extract the new version
- local new_version=$(echo "$dry_run_output" | grep "Upgrading $crate from" | awk '{print $6}')
-
- echo "Updating $crate from $current_version to $new_version with suffix $suffix..."
- cargo set-version -p $crate "$new_version$suffix"
-}
-
-# Function to bump patch version for the whole workspace
-bump_patch_version_for_workspace() {
- echo "Bumping patch version for the whole workspace..."
- cargo set-version --bump patch
-}
-
-# Use cargo metadata and jq to parse workspace members
-MEMBERS=$(cargo metadata --format-version 1 | jq -r '.workspace_members[] | split(" ") | .[0] | split("(") | .[0] | rtrimstr(")")')
-
-if [ -n "$SUFFIX" ]; then
- # Update each crate with the new version and suffix
- for member in $MEMBERS; do
- member_name=$(echo $member | cut -d' ' -f1)
- update_version_with_suffix "$member_name" "$SUFFIX"
- done
-else
- # If no suffix is provided, bump the patch version for the whole workspace
- bump_patch_version_for_workspace
-fi
-
-echo "Version update process completed."
diff --git a/resources/scripts/sync_crates_versions.sh b/resources/scripts/sync_crates_versions.sh
index 49cf55d6cf..bc33ecc53e 100755
--- a/resources/scripts/sync_crates_versions.sh
+++ b/resources/scripts/sync_crates_versions.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# Parse members from Cargo.toml using tomlq
members=()
diff --git a/resources/scripts/upload-random-data.sh b/resources/scripts/upload-random-data.sh
index e0fe843d7a..dbcf5b06be 100755
--- a/resources/scripts/upload-random-data.sh
+++ b/resources/scripts/upload-random-data.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# Target rate of 1.5mb/s
diff --git a/sn_auditor/Cargo.toml b/sn_auditor/Cargo.toml
index 7c8497118a..6919e4ae63 100644
--- a/sn_auditor/Cargo.toml
+++ b/sn_auditor/Cargo.toml
@@ -2,7 +2,7 @@
authors = ["MaidSafe Developers "]
description = "Safe Network Auditor"
name = "sn_auditor"
-version = "0.2.2"
+version = "0.2.3"
edition = "2021"
homepage = "https://maidsafe.net"
repository = "https://github.com/maidsafe/safe_network"
@@ -31,9 +31,9 @@ graphviz-rust = { version = "0.9.0", optional = true }
lazy_static = "1.4.0"
serde = { version = "1.0.133", features = ["derive", "rc"] }
serde_json = "1.0.108"
-sn_client = { path = "../sn_client", version = "0.108.0" }
-sn_logging = { path = "../sn_logging", version = "0.2.30" }
-sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0" }
+sn_client = { path = "../sn_client", version = "0.109.0" }
+sn_logging = { path = "../sn_logging", version = "0.2.31" }
+sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.1" }
tiny_http = { version = "0.12", features = ["ssl-rustls"] }
tracing = { version = "~0.1.26" }
tokio = { version = "1.32.0", features = [
diff --git a/sn_auditor/src/dag_db.rs b/sn_auditor/src/dag_db.rs
index 0ed400dd05..6b522cd4ca 100644
--- a/sn_auditor/src/dag_db.rs
+++ b/sn_auditor/src/dag_db.rs
@@ -17,6 +17,7 @@ use serde::{Deserialize, Serialize};
use sn_client::transfers::{
Hash, NanoTokens, SignedSpend, SpendAddress, DEFAULT_PAYMENT_FORWARD_SK,
};
+use sn_client::transfers::{DEFAULT_NETWORK_ROYALTIES_PK, NETWORK_ROYALTIES_PK};
use sn_client::{Client, SpendDag, SpendDagGet};
use std::collections::{BTreeMap, BTreeSet};
use std::fmt::Write;
@@ -61,6 +62,7 @@ pub struct SpendDagDb {
dag: Arc>,
beta_tracking: Arc>,
beta_participants: Arc>>,
+ utxo_addresses: Arc>>,
encryption_sk: Option,
}
@@ -70,11 +72,17 @@ struct BetaTracking {
processed_spends: u64,
total_accumulated_utxo: u64,
total_on_track_utxo: u64,
+ total_royalties: BTreeMap,
}
/// Map of Discord usernames to their tracked forwarded payments
type ForwardedPayments = BTreeMap>;
+type UtxoStatus = (
+ BTreeMap,
+ BTreeMap,
+);
+
#[derive(Clone, Serialize, Deserialize)]
struct SpendJsonResponse {
address: String,
@@ -92,15 +100,19 @@ impl SpendDagDb {
client: Client,
encryption_sk: Option,
) -> Result {
+ if !path.exists() {
+ debug!("Creating directory {path:?}...");
+ std::fs::create_dir_all(&path)?;
+ }
let dag_path = path.join(SPEND_DAG_FILENAME);
info!("Loading DAG from {dag_path:?}...");
let dag = match SpendDag::load_from_file(&dag_path) {
Ok(d) => {
- println!("Found a local spend DAG file");
+ info!("Found a local spend DAG file");
d
}
Err(_) => {
- println!("Found no local spend DAG file, starting from Genesis");
+ info!("Found no local spend DAG file, starting from Genesis");
client.new_dag_with_genesis_only().await?
}
};
@@ -111,6 +123,7 @@ impl SpendDagDb {
dag: Arc::new(RwLock::new(dag)),
beta_tracking: Arc::new(RwLock::new(Default::default())),
beta_participants: Arc::new(RwLock::new(BTreeMap::new())),
+ utxo_addresses: Arc::new(RwLock::new(BTreeMap::new())),
encryption_sk,
})
}
@@ -133,6 +146,7 @@ impl SpendDagDb {
dag: Arc::new(RwLock::new(dag)),
beta_tracking: Arc::new(RwLock::new(Default::default())),
beta_participants: Arc::new(RwLock::new(BTreeMap::new())),
+ utxo_addresses: Arc::new(RwLock::new(BTreeMap::new())),
encryption_sk,
})
}
@@ -204,7 +218,7 @@ impl SpendDagDb {
}
/// Update DAG from Network continuously
- pub async fn continuous_background_update(self) -> Result<()> {
+ pub async fn continuous_background_update(self, storage_dir: PathBuf) -> Result<()> {
let client = if let Some(client) = &self.client {
client.clone()
} else {
@@ -213,26 +227,73 @@ impl SpendDagDb {
// init utxos to fetch
let start_dag = { Arc::clone(&self.dag).read().await.clone() };
- let mut utxo_addresses: BTreeMap = start_dag
- .get_utxos()
- .into_iter()
- .map(|a| (a, Instant::now()))
- .collect();
+ {
+ let mut utxo_addresses = self.utxo_addresses.write().await;
+ for addr in start_dag.get_utxos().iter() {
+ let _ = utxo_addresses.insert(*addr, (Instant::now(), NanoTokens::zero()));
+ }
+ }
// beta rewards processing
let self_clone = self.clone();
let spend_processing = if let Some(sk) = self.encryption_sk.clone() {
- let (tx, mut rx) = tokio::sync::mpsc::channel(SPENDS_PROCESSING_BUFFER_SIZE);
+ let (tx, mut rx) = tokio::sync::mpsc::channel::<(SignedSpend, u64, bool)>(
+ SPENDS_PROCESSING_BUFFER_SIZE,
+ );
tokio::spawn(async move {
- while let Some((spend, utxos_for_further_track)) = rx.recv().await {
- self_clone
- .beta_background_process_spend(spend, &sk, utxos_for_further_track)
- .await;
+ let mut double_spends = BTreeSet::new();
+ let mut detected_spends = BTreeSet::new();
+
+ while let Some((spend, utxos_for_further_track, is_double_spend)) = rx.recv().await
+ {
+ let content_hash = spend.spend.hash();
+
+ if detected_spends.insert(content_hash) {
+ let hex_content_hash = content_hash.to_hex();
+ let addr_hex = spend.address().to_hex();
+ let file_name = format!("{addr_hex}_{hex_content_hash}");
+ let spend_copy = spend.clone();
+ let file_path = storage_dir.join(&file_name);
+
+ tokio::spawn(async move {
+ let bytes = spend_copy.to_bytes();
+ match std::fs::write(&file_path, bytes) {
+ Ok(_) => {
+ info!("Wrote spend {file_name} to disk!");
+ }
+ Err(err) => {
+ error!("Error writing spend {file_name}, error: {err:?}");
+ }
+ }
+ });
+ }
+
+ if is_double_spend {
+ self_clone
+ .beta_background_process_double_spend(
+ spend.clone(),
+ &sk,
+ utxos_for_further_track,
+ )
+ .await;
+
+ // For double_spend, only credit the owner first time
+ // The performance track only count the received spend & utxos once.
+ if double_spends.insert(spend.address()) {
+ self_clone
+ .beta_background_process_spend(spend, &sk, utxos_for_further_track)
+ .await;
+ }
+ } else {
+ self_clone
+ .beta_background_process_spend(spend, &sk, utxos_for_further_track)
+ .await;
+ }
}
});
Some(tx)
} else {
- eprintln!("Foundation secret key not set! Beta rewards will not be processed.");
+ warn!("Foundation secret key not set! Beta rewards will not be processed.");
None
};
@@ -241,12 +302,19 @@ impl SpendDagDb {
loop {
// `addrs_to_get` is always empty when reaching this point
// get expired utxos for the further fetch
- let utxos_to_fetch;
- let now = Instant::now();
- (utxo_addresses, utxos_to_fetch) = utxo_addresses
- .into_iter()
- .partition(|(_address, time_stamp)| *time_stamp > now);
- addrs_to_get.extend(utxos_to_fetch.keys().cloned().collect::>());
+ {
+ let now = Instant::now();
+ let mut utxo_addresses = self.utxo_addresses.write().await;
+ let mut utxos_to_fetch = BTreeSet::new();
+ utxo_addresses.retain(|address, (time_stamp, amount)| {
+ let not_expired = *time_stamp > now;
+ if !not_expired {
+ let _ = utxos_to_fetch.insert((*address, *amount));
+ }
+ not_expired
+ });
+ addrs_to_get.extend(utxos_to_fetch);
+ }
if addrs_to_get.is_empty() {
debug!(
@@ -260,17 +328,23 @@ impl SpendDagDb {
if cfg!(feature = "dag-collection") {
let new_utxos = self
.crawl_and_generate_local_dag(
- addrs_to_get.clone(),
+ addrs_to_get.iter().map(|(addr, _amount)| *addr).collect(),
spend_processing.clone(),
client.clone(),
)
.await;
addrs_to_get.clear();
- utxo_addresses.extend(
- new_utxos
- .into_iter()
- .map(|a| (a, Instant::now() + *UTXO_REATTEMPT_INTERVAL)),
- );
+
+ let mut utxo_addresses = self.utxo_addresses.write().await;
+ utxo_addresses.extend(new_utxos.into_iter().map(|a| {
+ (
+ a,
+ (
+ Instant::now() + *UTXO_REATTEMPT_INTERVAL,
+ NanoTokens::zero(),
+ ),
+ )
+ }));
} else if let Some(sender) = spend_processing.clone() {
if let Ok(reattempt_addrs) = client
.crawl_to_next_utxos(
@@ -280,6 +354,7 @@ impl SpendDagDb {
)
.await
{
+ let mut utxo_addresses = self.utxo_addresses.write().await;
utxo_addresses.extend(reattempt_addrs);
}
} else {
@@ -291,7 +366,7 @@ impl SpendDagDb {
async fn crawl_and_generate_local_dag(
&self,
from: BTreeSet,
- spend_processing: Option>,
+ spend_processing: Option>,
client: Client,
) -> BTreeSet {
// get a copy of the current DAG
@@ -337,17 +412,87 @@ impl SpendDagDb {
beta_tracking.total_accumulated_utxo += spend.spend.spent_tx.outputs.len() as u64;
beta_tracking.total_on_track_utxo += utxos_for_further_track;
+ // Collect royalties
+ let royalty_pubkeys: BTreeSet<_> = spend
+ .spend
+ .network_royalties
+ .iter()
+ .map(|derivation_idx| NETWORK_ROYALTIES_PK.new_unique_pubkey(derivation_idx))
+ .collect();
+ let default_royalty_pubkeys: BTreeSet<_> = spend
+ .spend
+ .network_royalties
+ .iter()
+ .map(|derivation_idx| DEFAULT_NETWORK_ROYALTIES_PK.new_unique_pubkey(derivation_idx))
+ .collect();
+ let mut royalties = BTreeMap::new();
+ for output in spend.spend.spent_tx.outputs.iter() {
+ if default_royalty_pubkeys.contains(&output.unique_pubkey)
+ || royalty_pubkeys.contains(&output.unique_pubkey)
+ {
+ let _ = royalties.insert(
+ SpendAddress::from_unique_pubkey(&output.unique_pubkey),
+ output.amount.as_nano(),
+ );
+ }
+ }
+
+ if royalties.len() > (spend.spend.spent_tx.outputs.len() - 1) / 2 {
+ eprintln!(
+ "Spend: {:?} has incorrect royalty of {}, with amount {} with reason {:?}",
+ spend.spend.unique_pubkey,
+ royalties.len(),
+ spend.spend.amount.as_nano(),
+ spend.spend.reason
+ );
+ eprintln!(
+ "Incorrect royalty spend has {} royalties, {:?} - {:?}",
+ spend.spend.network_royalties.len(),
+ spend.spend.spent_tx.inputs,
+ spend.spend.spent_tx.outputs
+ );
+ warn!(
+ "Spend: {:?} has incorrect royalty of {}, with amount {} with reason {:?}",
+ spend.spend.unique_pubkey,
+ royalties.len(),
+ spend.spend.amount.as_nano(),
+ spend.spend.reason
+ );
+ warn!(
+ "Incorrect royalty spend has {} royalties, {:?} - {:?}",
+ spend.spend.network_royalties.len(),
+ spend.spend.spent_tx.inputs,
+ spend.spend.spent_tx.outputs
+ );
+ }
+ beta_tracking.total_royalties.extend(royalties);
+
+ let addr = spend.address();
+ let amount = spend.spend.amount;
+
// check for beta rewards reason
let user_name_hash = match spend.reason().get_sender_hash(sk) {
Some(n) => n,
None => {
- return;
+ if let Some(default_user_name_hash) =
+ spend.reason().get_sender_hash(&DEFAULT_PAYMENT_FORWARD_SK)
+ {
+ warn!("With default key, got forwarded reward of {amount} at {addr:?}");
+ println!("With default key, got forwarded reward of {amount} at {addr:?}");
+ default_user_name_hash
+ } else {
+ warn!(
+ "Can't descrypt discord_id from {addr:?} with compile key nor default key"
+ );
+ println!(
+ "Can't descrypt discord_id from {addr:?} with compile key nor default key"
+ );
+ return;
+ }
}
};
// add to local rewards
- let addr = spend.address();
- let amount = spend.spend.amount;
let beta_participants_read = self.beta_participants.read().await;
if let Some(user_name) = beta_participants_read.get(&user_name_hash) {
@@ -363,8 +508,8 @@ impl SpendDagDb {
spend.reason().get_sender_hash(&DEFAULT_PAYMENT_FORWARD_SK)
{
if let Some(user_name) = beta_participants_read.get(&default_user_name_hash) {
- warn!("With default key, got forwarded reward {amount} from {user_name} of {amount} at {addr:?}");
- println!("With default key, got forwarded reward {amount} from {user_name} of {amount} at {addr:?}");
+ warn!("With default key, got forwarded reward from {user_name} of {amount} at {addr:?}");
+ println!("With default key, got forwarded reward from {user_name} of {amount} at {addr:?}");
beta_tracking
.forwarded_payments
.entry(user_name.to_owned())
@@ -375,7 +520,6 @@ impl SpendDagDb {
}
warn!("Found a forwarded reward {amount} for an unknown participant at {addr:?}: {user_name_hash:?}");
- println!("Found a forwarded reward {amount} for an unknown participant at {addr:?}: {user_name_hash:?}");
beta_tracking
.forwarded_payments
.entry(format!("unknown participant: {user_name_hash:?}"))
@@ -384,6 +528,41 @@ impl SpendDagDb {
}
}
+ async fn beta_background_process_double_spend(
+ &self,
+ spend: SignedSpend,
+ sk: &SecretKey,
+ _utxos_for_further_track: u64,
+ ) {
+ let user_name_hash = match spend.reason().get_sender_hash(sk) {
+ Some(n) => n,
+ None => {
+ return;
+ }
+ };
+
+ let addr = spend.address();
+
+ let beta_participants_read = self.beta_participants.read().await;
+
+ if let Some(user_name) = beta_participants_read.get(&user_name_hash) {
+ println!("Found double spend from {user_name} at {addr:?}");
+ } else {
+ if let Some(default_user_name_hash) =
+ spend.reason().get_sender_hash(&DEFAULT_PAYMENT_FORWARD_SK)
+ {
+ if let Some(user_name) = beta_participants_read.get(&default_user_name_hash) {
+ println!("Found double spend from {user_name} at {addr:?} using default key");
+ return;
+ }
+ }
+
+ println!(
+ "Found double spend from an unknown participant {user_name_hash:?} at {addr:?}"
+ );
+ }
+ }
+
/// Merge a SpendDag into the current DAG
/// This can be used to enrich our DAG with a DAG from another node to avoid costly computations
/// Make sure to verify the other DAG is trustworthy before calling this function to merge it in
@@ -399,6 +578,8 @@ impl SpendDagDb {
pub(crate) async fn beta_program_json(&self) -> Result<(String, String)> {
let r_handle = Arc::clone(&self.beta_tracking);
let beta_tracking = r_handle.read().await;
+ let r_utxo_handler = Arc::clone(&self.utxo_addresses);
+ let utxo_addresses = r_utxo_handler.read().await;
let mut rewards_output = vec![];
let mut total_hits = 0_u64;
let mut total_amount = 0_u64;
@@ -413,10 +594,49 @@ impl SpendDagDb {
rewards_output.push((participant.clone(), total_rewards));
}
let json = serde_json::to_string_pretty(&rewards_output)?;
- let tracking_performance = format!("processed_spends: {}\ntotal_accumulated_utxo:{}\ntotal_on_track_utxo:{}\nskipped_utxo:{}\nrepeated_utxo:{}\ntotal_hits:{}\ntotal_amount:{}",
+
+ let mut tracking_performance = format!("processed_spends: {}\ntotal_accumulated_utxo:{}\ntotal_on_track_utxo:{}\nskipped_utxo:{}\nrepeated_utxo:{}\ntotal_hits:{}\ntotal_amount:{}",
beta_tracking.processed_spends, beta_tracking.total_accumulated_utxo, beta_tracking.total_on_track_utxo, beta_tracking.total_accumulated_utxo - beta_tracking.total_on_track_utxo,
- beta_tracking.total_on_track_utxo - beta_tracking.processed_spends, total_hits, total_amount
+ utxo_addresses.len(), total_hits, total_amount
);
+
+ tracking_performance = format!(
+ "{tracking_performance}\ntotal_royalties hits: {}",
+ beta_tracking.total_royalties.len()
+ );
+ let total_royalties = beta_tracking.total_royalties.values().sum::();
+ tracking_performance =
+ format!("{tracking_performance}\ntotal_royalties amount: {total_royalties}");
+
+ // UTXO amount that greater than 100000 nanos shall be considered as `change`
+ // which indicates the `wallet balance`
+ let (big_utxos, small_utxos): UtxoStatus = utxo_addresses
+ .iter()
+ .partition(|(_address, (_time_stamp, amount))| amount.as_nano() > 100000);
+
+ let total_big_utxo_amount = big_utxos
+ .iter()
+ .map(|(_addr, (_time, amount))| amount.as_nano())
+ .sum::();
+ tracking_performance =
+ format!("{tracking_performance}\ntotal_big_utxo_amount: {total_big_utxo_amount}");
+
+ let total_small_utxo_amount = small_utxos
+ .iter()
+ .map(|(_addr, (_time, amount))| amount.as_nano())
+ .sum::();
+ tracking_performance =
+ format!("{tracking_performance}\ntotal_small_utxo_amount: {total_small_utxo_amount}");
+
+ for (addr, (_time, amount)) in big_utxos.iter() {
+ tracking_performance =
+ format!("{tracking_performance}\n{addr:?}, {}", amount.as_nano());
+ }
+ for (addr, (_time, amount)) in small_utxos.iter() {
+ tracking_performance =
+ format!("{tracking_performance}\n{addr:?}, {}", amount.as_nano());
+ }
+
Ok((json, tracking_performance))
}
diff --git a/sn_auditor/src/main.rs b/sn_auditor/src/main.rs
index e3a58aec6e..bf559d1195 100644
--- a/sn_auditor/src/main.rs
+++ b/sn_auditor/src/main.rs
@@ -101,12 +101,17 @@ async fn main() -> Result<()> {
}
let client = connect_to_network(opt.peers).await?;
+
+ let storage_dir = get_auditor_data_dir_path()?.join("fetched_spends");
+ std::fs::create_dir_all(&storage_dir).expect("fetched_spends path to be successfully created.");
+
let dag = initialize_background_spend_dag_collection(
client.clone(),
opt.force_from_genesis,
opt.clean,
beta_participants,
maybe_sk,
+ storage_dir,
)
.await?;
@@ -139,7 +144,7 @@ fn logging_init(
async fn connect_to_network(peers_args: PeersArgs) -> Result {
let bootstrap_peers = peers_args.get_peers().await?;
- println!(
+ info!(
"Connecting to the network with {} bootstrap peers",
bootstrap_peers.len(),
);
@@ -153,7 +158,7 @@ async fn connect_to_network(peers_args: PeersArgs) -> Result {
.await
.map_err(|err| eyre!("Failed to connect to the network: {err}"))?;
- println!("Connected to the network");
+ info!("Connected to the network");
Ok(client)
}
@@ -168,10 +173,10 @@ fn initialize_background_rewards_backup(dag: SpendDagDb) {
BETA_REWARDS_BACKUP_INTERVAL_SECS,
))
.await;
- println!("Backing up beta rewards...");
+ info!("Backing up beta rewards...");
if let Err(e) = dag.backup_rewards().await {
- eprintln!("Failed to backup beta rewards: {e}");
+ error!("Failed to backup beta rewards: {e}");
}
}
});
@@ -186,15 +191,20 @@ async fn initialize_background_spend_dag_collection(
clean: bool,
beta_participants: BTreeSet,
foundation_sk: Option,
+ storage_dir: PathBuf,
) -> Result {
- println!("Initialize spend dag...");
+ info!("Initialize spend dag...");
let path = get_auditor_data_dir_path()?;
+ if !path.exists() {
+ debug!("Creating directory {path:?}...");
+ std::fs::create_dir_all(&path)?;
+ }
// clean the local spend DAG if requested
if clean {
- println!("Cleaning local spend DAG...");
+ info!("Cleaning local spend DAG...");
let dag_file = path.join(dag_db::SPEND_DAG_FILENAME);
- let _ = std::fs::remove_file(dag_file).map_err(|e| eprintln!("Cleanup interrupted: {e}"));
+ let _ = std::fs::remove_file(dag_file).map_err(|e| error!("Cleanup interrupted: {e}"));
}
// initialize the DAG
@@ -205,7 +215,6 @@ async fn initialize_background_spend_dag_collection(
// optional force restart from genesis and merge into our current DAG
// feature guard to prevent a mis-use of opt
if force_from_genesis && cfg!(feature = "dag-collection") {
- println!("Forcing DAG to be updated from genesis...");
warn!("Forcing DAG to be updated from genesis...");
let mut d = dag.clone();
let mut genesis_dag = client
@@ -219,7 +228,7 @@ async fn initialize_background_spend_dag_collection(
let _ = d
.merge(genesis_dag)
.await
- .map_err(|e| eprintln!("Failed to merge from genesis DAG into our DAG: {e}"));
+ .map_err(|e| error!("Failed to merge from genesis DAG into our DAG: {e}"));
});
}
@@ -233,21 +242,21 @@ async fn initialize_background_spend_dag_collection(
panic!("Foundation SK required to initialize beta rewards program");
};
- println!("Initializing beta rewards program tracking...");
+ info!("Initializing beta rewards program tracking...");
if let Err(e) = dag.track_new_beta_participants(beta_participants).await {
- eprintln!("Could not initialize beta rewards: {e}");
+ error!("Could not initialize beta rewards: {e}");
return Err(e);
}
}
// background thread to update DAG
- println!("Starting background DAG collection thread...");
+ info!("Starting background DAG collection thread...");
let d = dag.clone();
tokio::spawn(async move {
let _ = d
- .continuous_background_update()
+ .continuous_background_update(storage_dir)
.await
- .map_err(|e| eprintln!("Failed to update DAG in background thread: {e}"));
+ .map_err(|e| error!("Failed to update DAG in background thread: {e}"));
});
Ok(dag)
@@ -255,9 +264,9 @@ async fn initialize_background_spend_dag_collection(
async fn start_server(dag: SpendDagDb) -> Result<()> {
let server = Server::http("0.0.0.0:4242").expect("Failed to start server");
- println!("Starting dag-query server listening on port 4242...");
+ info!("Starting dag-query server listening on port 4242...");
for request in server.incoming_requests() {
- println!(
+ info!(
"Received request! method: {:?}, url: {:?}",
request.method(),
request.url(),
@@ -313,7 +322,7 @@ fn load_and_update_beta_participants(
.lines()
.map(|line| line.trim().to_string())
.collect::>();
- println!(
+ debug!(
"Tracking beta rewards for the {} discord usernames provided in {:?}",
discord_names.len(),
participants_file
@@ -331,7 +340,7 @@ fn load_and_update_beta_participants(
.lines()
.map(|line| line.trim().to_string())
.collect::>();
- println!(
+ debug!(
"Restoring beta rewards for the {} discord usernames from {:?}",
discord_names.len(),
local_participants_file
@@ -340,7 +349,7 @@ fn load_and_update_beta_participants(
}
// write the beta participants to disk
let _ = std::fs::write(local_participants_file, beta_participants.join("\n"))
- .map_err(|e| eprintln!("Failed to write beta participants to disk: {e}"));
+ .map_err(|e| error!("Failed to write beta participants to disk: {e}"));
Ok(beta_participants.into_iter().collect())
}
diff --git a/sn_build_info/Cargo.toml b/sn_build_info/Cargo.toml
index 43517827bb..8f61fdff57 100644
--- a/sn_build_info/Cargo.toml
+++ b/sn_build_info/Cargo.toml
@@ -8,7 +8,7 @@ license = "GPL-3.0"
name = "sn_build_info"
readme = "README.md"
repository = "https://github.com/maidsafe/safe_network"
-version = "0.1.9"
+version = "0.1.10"
[build-dependencies]
vergen = { version = "8.0.0", features = ["build", "git", "gitcl"] }
diff --git a/sn_cli/Cargo.toml b/sn_cli/Cargo.toml
index cd83b0acf4..ab809fc121 100644
--- a/sn_cli/Cargo.toml
+++ b/sn_cli/Cargo.toml
@@ -8,7 +8,7 @@ license = "GPL-3.0"
name = "sn_cli"
readme = "README.md"
repository = "https://github.com/maidsafe/safe_network"
-version = "0.93.9"
+version = "0.94.0"
[[bin]]
path = "src/bin/main.rs"
@@ -56,12 +56,13 @@ reqwest = { version = "0.12.2", default-features = false, features = [
"rustls-tls-manual-roots",
] }
rmp-serde = "1.1.1"
+rpassword = "7.3.1"
serde = { version = "1.0.133", features = ["derive"] }
-sn_build_info = { path = "../sn_build_info", version = "0.1.9" }
-sn_client = { path = "../sn_client", version = "0.108.0" }
-sn_logging = { path = "../sn_logging", version = "0.2.30" }
-sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0" }
-sn_protocol = { path = "../sn_protocol", version = "0.17.5" }
+sn_build_info = { path = "../sn_build_info", version = "0.1.10" }
+sn_client = { path = "../sn_client", version = "0.109.0" }
+sn_logging = { path = "../sn_logging", version = "0.2.31" }
+sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.1" }
+sn_protocol = { path = "../sn_protocol", version = "0.17.6" }
tempfile = "3.6.0"
tiny-keccak = "~2.0.2"
tokio = { version = "1.32.0", features = [
@@ -83,7 +84,7 @@ eyre = "0.6.8"
criterion = "0.5.1"
tempfile = "3.6.0"
rand = { version = "~0.8.5", features = ["small_rng"] }
-sn_client = { path = "../sn_client", version = "0.108.0", features = [
+sn_client = { path = "../sn_client", version = "0.109.0", features = [
"test-utils",
] }
diff --git a/sn_cli/src/bin/main.rs b/sn_cli/src/bin/main.rs
index b3a9922d80..2e4546fb28 100644
--- a/sn_cli/src/bin/main.rs
+++ b/sn_cli/src/bin/main.rs
@@ -79,7 +79,8 @@ async fn main() -> Result<()> {
| WalletCmds::Balance { .. }
| WalletCmds::Create { .. }
| WalletCmds::Sign { .. }
- | WalletCmds::Status = cmds
+ | WalletCmds::Status { .. }
+ | WalletCmds::Encrypt { .. } = cmds
{
wallet_cmds_without_client(cmds, &client_data_dir_path).await?;
return Ok(());
@@ -242,5 +243,110 @@ fn get_stdin_response(prompt: &str) -> String {
// consider if error should process::exit(1) here
return "".to_string();
};
- buffer
+ // Remove leading and trailing whitespace
+ buffer.trim().to_owned()
+}
+
+fn get_stdin_password_response(prompt: &str) -> String {
+ rpassword::prompt_password(prompt)
+ .map(|v| v.trim().to_owned())
+ .unwrap_or("".to_string())
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::subcommands::wallet::hot_wallet::{wallet_cmds_without_client, WalletCmds};
+ use crate::subcommands::wallet::WalletApiHelper;
+ use bls::SecretKey;
+ use color_eyre::Result;
+ use sn_client::acc_packet::{load_or_create_mnemonic, secret_key_from_mnemonic};
+ use sn_client::transfers::HotWallet;
+ use std::path::Path;
+
+ fn create_wallet(root_dir: &Path, derivation_passphrase: Option) -> Result {
+ let mnemonic = load_or_create_mnemonic(root_dir)?;
+ let secret_key = secret_key_from_mnemonic(mnemonic, derivation_passphrase)?;
+ let wallet = HotWallet::create_from_key(root_dir, secret_key, None)?;
+ Ok(wallet)
+ }
+
+ #[tokio::test]
+ async fn test_wallet_address_command() {
+ let tmp_dir = tempfile::tempdir().expect("Could not create temp dir");
+ let root_dir = tmp_dir.path().to_path_buf();
+
+ // Create wallet
+ let _wallet = create_wallet(&root_dir, None).expect("Could not create wallet");
+
+ let cmds = WalletCmds::Address;
+
+ let result = wallet_cmds_without_client(&cmds, &root_dir).await;
+ assert!(result.is_ok());
+ }
+
+ #[tokio::test]
+ async fn test_wallet_address_command_should_fail_with_no_existing_wallet() {
+ let tmp_dir = tempfile::tempdir().expect("Could not create temp dir");
+ let client_data_dir = tmp_dir.path().to_path_buf();
+
+ let cmds = WalletCmds::Address;
+
+ // Runs command without a wallet being present, thus should fail
+ let result = wallet_cmds_without_client(&cmds, &client_data_dir).await;
+ assert!(result.is_err());
+ }
+
+ #[tokio::test]
+ async fn test_wallet_create_command() {
+ let tmp_dir = tempfile::tempdir().expect("Could not create temp dir");
+ let root_dir = tmp_dir.path().to_path_buf();
+
+ let cmds = WalletCmds::Create {
+ no_replace: false,
+ no_password: true,
+ key: None,
+ derivation_passphrase: None,
+ password: None,
+ };
+
+ // Run command and hopefully create a wallet
+ let result = wallet_cmds_without_client(&cmds, &root_dir).await;
+ assert!(result.is_ok());
+
+ // Check if valid wallet exists
+ let result = WalletApiHelper::load_from(&root_dir);
+ assert!(result.is_ok());
+ }
+
+ #[tokio::test]
+ async fn test_wallet_create_command_with_hex_key() {
+ let tmp_dir = tempfile::tempdir().expect("Could not create temp dir");
+ let root_dir = tmp_dir.path().to_path_buf();
+
+ let secret_key = SecretKey::random();
+ let secret_key_hex = secret_key.to_hex();
+
+ let cmds = WalletCmds::Create {
+ no_replace: false,
+ no_password: true,
+ key: Some(secret_key_hex),
+ derivation_passphrase: None,
+ password: None,
+ };
+
+ // Run command and hopefully create a wallet
+ let result = wallet_cmds_without_client(&cmds, &root_dir).await;
+ assert!(result.is_ok());
+
+ // Check if valid wallet exists
+ let result = WalletApiHelper::load_from(&root_dir);
+ assert!(result.is_ok());
+
+ if let WalletApiHelper::HotWallet(wallet) = result.expect("No valid wallet found") {
+ // Compare public addresses (secret keys are the same if the public addresses are)
+ assert_eq!(wallet.address().to_hex(), secret_key.public_key().to_hex());
+ } else {
+ panic!("Did not expect a watch only wallet");
+ }
+ }
}
diff --git a/sn_cli/src/bin/subcommands/wallet.rs b/sn_cli/src/bin/subcommands/wallet.rs
index 9a497e38c9..168709305f 100644
--- a/sn_cli/src/bin/subcommands/wallet.rs
+++ b/sn_cli/src/bin/subcommands/wallet.rs
@@ -14,11 +14,12 @@ pub(crate) mod wo_wallet;
use sn_client::transfers::{CashNote, HotWallet, MainPubkey, NanoTokens, WatchOnlyWallet};
use sn_protocol::storage::SpendAddress;
+use crate::get_stdin_password_response;
use color_eyre::Result;
use std::{collections::BTreeSet, io::Read, path::Path};
// TODO: convert this into a Trait part of the wallet APIs.
-enum WalletApiHelper {
+pub(crate) enum WalletApiHelper {
WatchOnlyWallet(WatchOnlyWallet),
HotWallet(HotWallet),
}
@@ -30,10 +31,25 @@ impl WalletApiHelper {
}
pub fn load_from(root_dir: &Path) -> Result {
- let wallet = HotWallet::load_from(root_dir)?;
+ let wallet = if HotWallet::is_encrypted(root_dir) {
+ println!("Wallet is encrypted. It needs a password to unlock.");
+ let password = get_stdin_password_response("Enter password: ");
+ let mut wallet = HotWallet::load_encrypted_from_path(root_dir, password.to_owned())?;
+ // Authenticate so that a user doesn't have to immediately provide the password again
+ wallet.authenticate_with_password(password)?;
+ wallet
+ } else {
+ HotWallet::load_from(root_dir)?
+ };
+
Ok(Self::HotWallet(wallet))
}
+ pub fn encrypt(root_dir: &Path, password: &str) -> Result<()> {
+ HotWallet::encrypt(root_dir, password)?;
+ Ok(())
+ }
+
pub fn balance(&self) -> NanoTokens {
match self {
Self::WatchOnlyWallet(w) => w.balance(),
@@ -41,9 +57,11 @@ impl WalletApiHelper {
}
}
- pub fn status(&mut self) {
+ pub fn status(&mut self) -> Result<()> {
+ self.authenticate()?;
+
match self {
- Self::WatchOnlyWallet(_) => {}
+ Self::WatchOnlyWallet(_) => Ok(()),
Self::HotWallet(w) => {
println!("Unconfirmed spends are:");
for spend in w.unconfirmed_spend_requests().iter() {
@@ -73,6 +91,8 @@ impl WalletApiHelper {
println!("{cnr:?}");
}
}
+
+ Ok(())
}
}
}
@@ -153,6 +173,22 @@ impl WalletApiHelper {
}
Ok(())
}
+
+ /// Authenticate with password for encrypted wallet.
+ fn authenticate(&mut self) -> Result<()> {
+ match self {
+ WalletApiHelper::WatchOnlyWallet(_) => Ok(()),
+ WalletApiHelper::HotWallet(w) => {
+ if w.authenticate().is_err() {
+ let password = get_stdin_password_response("Wallet password: ");
+ w.authenticate_with_password(password)?;
+ Ok(())
+ } else {
+ Ok(())
+ }
+ }
+ }
+ }
}
fn watch_only_wallet_from_pk(main_pk: MainPubkey, root_dir: &Path) -> Result {
diff --git a/sn_cli/src/bin/subcommands/wallet/hot_wallet.rs b/sn_cli/src/bin/subcommands/wallet/hot_wallet.rs
index db51964612..8a9dd98b84 100644
--- a/sn_cli/src/bin/subcommands/wallet/hot_wallet.rs
+++ b/sn_cli/src/bin/subcommands/wallet/hot_wallet.rs
@@ -11,8 +11,9 @@ use super::{
helpers::{get_faucet, receive},
WalletApiHelper,
};
-use crate::get_stdin_response;
+use crate::{get_stdin_password_response, get_stdin_response};
+use autonomi::utils::is_valid_key_hex;
use bls::SecretKey;
use clap::Parser;
use color_eyre::{
@@ -20,6 +21,7 @@ use color_eyre::{
Result,
};
use dialoguer::Confirm;
+use sn_client::acc_packet::{load_or_create_mnemonic, secret_key_from_mnemonic};
use sn_client::transfers::{
HotWallet, MainPubkey, MainSecretKey, NanoTokens, Transfer, TransferError, UnsignedTransfer,
WalletError,
@@ -34,13 +36,7 @@ use std::{path::Path, str::FromStr};
#[derive(Parser, Debug)]
pub enum WalletCmds {
/// Print the wallet address.
- Address {
- /// Optional passphrase to protect the mnemonic,
- /// it's not the source of the entropy for the mnemonic generation.
- /// The mnemonic+passphrase will be the seed. See detail at
- /// ``
- passphrase: Option,
- },
+ Address,
/// Print the wallet balance.
Balance {
/// Instead of checking CLI local wallet balance, the PeerId of a node can be used
@@ -49,11 +45,26 @@ pub enum WalletCmds {
#[clap(long)]
peer_id: Vec,
},
- /// Create a hot wallet from the given (hex-encoded) key.
+ /// Create a hot wallet.
Create {
- /// Hex-encoded main secret key.
- #[clap(name = "key")]
- key: String,
+ /// Optional flag to not replace existing wallet.
+ #[clap(long, action)]
+ no_replace: bool,
+ /// Optional flag to not add a password.
+ #[clap(long, action)]
+ no_password: bool,
+ /// Optional hex-encoded main secret key.
+ #[clap(long, short, name = "key")]
+ key: Option,
+ /// Optional derivation passphrase to protect the mnemonic,
+ /// it's not the source of the entropy for the mnemonic generation.
+ /// The mnemonic+passphrase will be the seed. See detail at
+ /// ``
+ #[clap(long, short, name = "derivation")]
+ derivation_passphrase: Option,
+ /// Optional password to encrypt the wallet with.
+ #[clap(long, short)]
+ password: Option,
},
/// Get tokens from a faucet.
GetFaucet {
@@ -129,19 +140,18 @@ pub enum WalletCmds {
sk_str: Option,
},
Status,
+ /// Encrypt wallet with a password.
+ Encrypt,
}
pub(crate) async fn wallet_cmds_without_client(cmds: &WalletCmds, root_dir: &Path) -> Result<()> {
match cmds {
- WalletCmds::Address {
- passphrase: derivation_passphrase,
- } => {
- let wallet = load_account_wallet_or_create_with_mnemonic(
- root_dir,
- derivation_passphrase.as_deref(),
- )?;
-
- println!("{:?}", wallet.address());
+ WalletCmds::Address => {
+ let wallet = WalletApiHelper::load_from(root_dir)?;
+ match wallet {
+ WalletApiHelper::WatchOnlyWallet(w) => println!("{:?}", w.address()),
+ WalletApiHelper::HotWallet(w) => println!("{:?}", w.address()),
+ }
Ok(())
}
WalletCmds::Balance { peer_id } => {
@@ -162,21 +172,48 @@ pub(crate) async fn wallet_cmds_without_client(cmds: &WalletCmds, root_dir: &Pat
}
Ok(())
}
- WalletCmds::Create { key } => {
- let sk = SecretKey::from_hex(key)
- .map_err(|err| eyre!("Failed to parse hex-encoded SK: {err:?}"))?;
- let main_sk = MainSecretKey::new(sk);
- // TODO: encrypt wallet file
- // check for existing wallet with balance
- let existing_balance = match WalletApiHelper::load_from(root_dir) {
- Ok(wallet) => wallet.balance(),
- Err(_) => NanoTokens::zero(),
- };
- // if about to overwrite an existing balance, confirm operation
- if existing_balance > NanoTokens::zero() {
- let prompt = format!("Existing wallet has balance of {existing_balance}. Replace with new wallet? [y/N]");
- let response = get_stdin_response(&prompt);
- if response.trim() != "y" {
+ WalletCmds::Create {
+ no_replace,
+ no_password,
+ key,
+ derivation_passphrase,
+ password,
+ } => {
+ let mut wallet_already_exists = false;
+ if key.is_some() && derivation_passphrase.is_some() {
+ return Err(eyre!(
+ "Only one of `--key` or `--derivation` may be specified"
+ ));
+ }
+ if *no_password && password.is_some() {
+ return Err(eyre!(
+ "Only one of `--no-password` or `--password` may be specified"
+ ));
+ }
+ if let Some(key) = key {
+ // Check if key is valid
+ // Doing this early to avoid stashing an existing wallet while the provided key is invalid
+ if !is_valid_key_hex(key) {
+ return Err(eyre!("Please provide a valid secret key in hex format. It must be 64 characters long."));
+ }
+ }
+ // Check for existing wallet
+ if HotWallet::is_encrypted(root_dir) {
+ wallet_already_exists = true;
+ println!("Existing encrypted wallet found.");
+ } else if let Ok(existing_wallet) = WalletApiHelper::load_from(root_dir) {
+ wallet_already_exists = true;
+ let balance = existing_wallet.balance();
+ println!("Existing wallet found with balance of {balance}");
+ }
+ // If a wallet already exists, ask the user if they want to replace it
+ if wallet_already_exists {
+ let response = if *no_replace {
+ "n".to_string()
+ } else {
+ get_stdin_response("Replace existing wallet with new wallet? [y/N]")
+ };
+ if response != "y" {
// Do nothing, return ok and prevent any further operations
println!("Exiting without creating new wallet");
return Ok(());
@@ -185,9 +222,26 @@ pub(crate) async fn wallet_cmds_without_client(cmds: &WalletCmds, root_dir: &Pat
let new_location = HotWallet::stash(root_dir)?;
println!("Old wallet stored at {}", new_location.display());
}
+ let main_sk = if let Some(key) = key {
+ let sk = SecretKey::from_hex(key)
+ .map_err(|err| eyre!("Failed to parse hex-encoded SK: {err:?}"))?;
+ MainSecretKey::new(sk)
+ } else {
+ // If no key is specified, use the mnemonic
+ let mnemonic = load_or_create_mnemonic(root_dir)?;
+ secret_key_from_mnemonic(mnemonic, derivation_passphrase.to_owned())?
+ };
+ // Ask user if they want to encrypt the wallet with a password
+ let password = if *no_password {
+ None
+ } else if let Some(password) = password {
+ Some(password.to_owned())
+ } else {
+ request_password(false)
+ };
// Create the new wallet with the new key
let main_pubkey = main_sk.main_pubkey();
- let local_wallet = HotWallet::create_from_key(root_dir, main_sk)?;
+ let local_wallet = HotWallet::create_from_key(root_dir, main_sk, password)?;
let balance = local_wallet.balance();
println!(
"Hot Wallet created (balance {balance}) for main public key: {main_pubkey:?}."
@@ -198,7 +252,16 @@ pub(crate) async fn wallet_cmds_without_client(cmds: &WalletCmds, root_dir: &Pat
WalletCmds::Status => {
let mut wallet = WalletApiHelper::load_from(root_dir)?;
println!("{}", wallet.balance());
- wallet.status();
+ wallet.status()?;
+ Ok(())
+ }
+ WalletCmds::Encrypt => {
+ println!("Encrypt your wallet with a password. WARNING: If you forget your password, you will lose access to your wallet!");
+ // Ask user for a new password to encrypt the wallet with
+ if let Some(password) = request_password(true) {
+ WalletApiHelper::encrypt(root_dir, &password)?;
+ }
+ println!("Wallet successfully encrypted.");
Ok(())
}
cmd => Err(eyre!("{cmd:?} requires us to be connected to the Network")),
@@ -371,3 +434,45 @@ fn sign_transaction(tx: &str, root_dir: &Path, force: bool) -> Result<()> {
Ok(())
}
+
+fn request_password(required: bool) -> Option {
+ 'outer: loop {
+ let prompt = if required {
+ "Enter password: "
+ } else {
+ "Enter password (leave empty for none): "
+ };
+
+ let password_response = get_stdin_password_response(prompt);
+
+ if required && password_response.is_empty() {
+ println!("Password is required.");
+ continue 'outer;
+ }
+
+ // If a password is set, request user to repeat it
+ if !password_response.is_empty() {
+ const MAX_RETRIES: u8 = 2;
+ let mut retries = 0u8;
+
+ loop {
+ let repeat_password = get_stdin_password_response("Repeat password: ");
+
+ if repeat_password == password_response {
+ break;
+ } else if retries >= MAX_RETRIES {
+ // User forgot the password, let them reset it again
+ println!("You might have forgotten the password. Please set a new one.");
+ continue 'outer;
+ } else {
+ println!("Passwords do not match.");
+ retries += 1;
+ }
+ }
+
+ break Some(password_response);
+ }
+
+ break None;
+ }
+}
diff --git a/sn_cli/src/lib.rs b/sn_cli/src/lib.rs
index 0a85ce69b3..4d0e77b41e 100644
--- a/sn_cli/src/lib.rs
+++ b/sn_cli/src/lib.rs
@@ -8,6 +8,7 @@
mod acc_packet;
mod files;
+pub mod utils;
pub use acc_packet::AccountPacket;
pub use files::{
diff --git a/sn_cli/src/utils.rs b/sn_cli/src/utils.rs
new file mode 100644
index 0000000000..88cd3c1331
--- /dev/null
+++ b/sn_cli/src/utils.rs
@@ -0,0 +1,12 @@
+// Copyright 2024 MaidSafe.net limited.
+//
+// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
+// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
+// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. Please review the Licences for the specific language governing
+// permissions and limitations relating to use of the SAFE Network Software.
+
+/// Returns whether a hex string is a valid secret key in hex format.
+pub fn is_valid_key_hex(hex: &str) -> bool {
+ hex.len() == 64 && hex.chars().all(|c| c.is_ascii_hexdigit())
+}
diff --git a/sn_client/Cargo.toml b/sn_client/Cargo.toml
index 6aa122ca3d..8d0ff54915 100644
--- a/sn_client/Cargo.toml
+++ b/sn_client/Cargo.toml
@@ -8,7 +8,7 @@ license = "GPL-3.0"
name = "sn_client"
readme = "README.md"
repository = "https://github.com/maidsafe/safe_network"
-version = "0.108.0"
+version = "0.109.0"
[features]
default = []
@@ -49,17 +49,17 @@ rayon = "1.8.0"
rmp-serde = "1.1.1"
self_encryption = "~0.29.0"
serde = { version = "1.0.133", features = ["derive", "rc"] }
-sn_networking = { path = "../sn_networking", version = "0.17.0" }
-sn_protocol = { path = "../sn_protocol", version = "0.17.5" }
+sn_networking = { path = "../sn_networking", version = "0.17.1" }
+sn_protocol = { path = "../sn_protocol", version = "0.17.6" }
serde_json = "1.0"
-sn_registers = { path = "../sn_registers", version = "0.3.15" }
-sn_transfers = { path = "../sn_transfers", version = "0.18.8" }
+sn_registers = { path = "../sn_registers", version = "0.3.16" }
+sn_transfers = { path = "../sn_transfers", version = "0.18.9" }
tempfile = "3.6.0"
thiserror = "1.0.23"
tiny-keccak = "~2.0.2"
tracing = { version = "~0.1.26" }
xor_name = "5.0.0"
-sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0", optional = true }
+sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.1", optional = true }
eyre = { version = "0.6.8", optional = true }
lazy_static = { version = "~1.4.0", optional = true }
@@ -69,8 +69,8 @@ dirs-next = "~2.0.0"
# add rand to libp2p
libp2p-identity = { version = "0.2.7", features = ["rand"] }
sn_client = { path = "../sn_client", features = ["test-utils"] }
-sn_logging = { path = "../sn_logging", version = "0.2.30" }
-sn_registers = { path = "../sn_registers", version = "0.3.15", features = [
+sn_logging = { path = "../sn_logging", version = "0.2.31" }
+sn_registers = { path = "../sn_registers", version = "0.3.16", features = [
"test-utils",
] }
@@ -85,7 +85,7 @@ crate-type = ["cdylib", "rlib"]
getrandom = { version = "0.2.12", features = ["js"] }
wasm-bindgen = "0.2.90"
wasm-bindgen-futures = "0.4.40"
-sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0" }
+sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.1" }
console_error_panic_hook = "0.1.6"
tracing-wasm = "0.2.1"
wasmtimer = "0.2.0"
diff --git a/sn_client/README.md b/sn_client/README.md
index f9bc76391a..1e3d5a8259 100644
--- a/sn_client/README.md
+++ b/sn_client/README.md
@@ -10,6 +10,7 @@ The `sn_client` library provides the core functionalities for interacting with t
- [Installation](#installation)
- [Usage](#usage)
- [API Calls](#api-calls)
+- [Running Tests](#running-tests)
- [Contributing](#contributing)
- [Conventional Commits](#conventional-commits)
- [License](#license)
@@ -32,6 +33,16 @@ use sn_client::Client;
let client = Client::new(signer, peers, req_response_timeout, custom_concurrency_limit).await?;
```
+## Running Tests
+
+Prerequisites:
+* A running local network. Refer to [`safe_network/README.md`](../README.md) to run a local test network.
+* `SAFE_PEERS` environment variable or running the tests with `--feature=local-discovery`:
+
+```bash
+$ cargo test --package sn_client --release --tests --features=local-discovery
+```
+
## Contributing
Please refer to the [Contributing Guidelines](../CONTRIBUTING.md) from the main directory for details on how to contribute to this project.
diff --git a/sn_client/src/acc_packet.rs b/sn_client/src/acc_packet.rs
index 4da6110b6c..2d9570f34a 100644
--- a/sn_client/src/acc_packet.rs
+++ b/sn_client/src/acc_packet.rs
@@ -6,10 +6,10 @@
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
-use std::path::Path;
-
use super::error::Result;
-use sn_transfers::{get_faucet_data_dir, HotWallet};
+use bip39::Mnemonic;
+use sn_transfers::{get_faucet_data_dir, HotWallet, MainSecretKey};
+use std::path::Path;
pub mod user_secret;
@@ -21,36 +21,50 @@ pub fn load_account_wallet_or_create_with_mnemonic(
derivation_passphrase: Option<&str>,
) -> Result {
let wallet = HotWallet::load_from(root_dir);
+
match wallet {
Ok(wallet) => Ok(wallet),
Err(error) => {
warn!("Issue loading wallet, creating a new one: {error}");
println!("Issue loading wallet from {root_dir:?}");
- let mnemonic = match user_secret::read_mnemonic_from_disk(root_dir) {
- Ok(mnemonic) => {
- println!("Found existing mnemonic in {root_dir:?}, this will be used for key derivation.");
- info!("Using existing mnemonic from {root_dir:?}");
- mnemonic
- }
- Err(error) => {
- println!("No existing mnemonic found, creating a new one in {root_dir:?}.");
- warn!("No existing mnemonic found in {root_dir:?}, creating new one. Error was: {error:?}");
- let mnemonic = user_secret::random_eip2333_mnemonic()?;
- user_secret::write_mnemonic_to_disk(root_dir, &mnemonic)?;
-
- mnemonic
- }
- };
-
- let passphrase = derivation_passphrase.unwrap_or(DEFAULT_WALLET_DERIVIATION_PASSPHRASE);
-
- let wallet = user_secret::account_wallet_secret_key(mnemonic, passphrase)?;
- Ok(HotWallet::create_from_key(root_dir, wallet)?)
+ let mnemonic = load_or_create_mnemonic(root_dir)?;
+ let wallet =
+ secret_key_from_mnemonic(mnemonic, derivation_passphrase.map(|v| v.to_owned()))?;
+
+ Ok(HotWallet::create_from_key(root_dir, wallet, None)?)
}
}
}
+pub fn load_or_create_mnemonic(root_dir: &Path) -> Result {
+ match user_secret::read_mnemonic_from_disk(root_dir) {
+ Ok(mnemonic) => {
+ println!(
+ "Found existing mnemonic in {root_dir:?}, this will be used for key derivation."
+ );
+ info!("Using existing mnemonic from {root_dir:?}");
+ Ok(mnemonic)
+ }
+ Err(error) => {
+ println!("No existing mnemonic found, creating a new one in {root_dir:?}.");
+ warn!("No existing mnemonic found in {root_dir:?}, creating new one. Error was: {error:?}");
+ let mnemonic = user_secret::random_eip2333_mnemonic()?;
+ user_secret::write_mnemonic_to_disk(root_dir, &mnemonic)?;
+ Ok(mnemonic)
+ }
+ }
+}
+
+pub fn secret_key_from_mnemonic(
+ mnemonic: Mnemonic,
+ derivation_passphrase: Option,
+) -> Result {
+ let passphrase =
+ derivation_passphrase.unwrap_or(DEFAULT_WALLET_DERIVIATION_PASSPHRASE.to_owned());
+ user_secret::account_wallet_secret_key(mnemonic, &passphrase)
+}
+
pub fn create_faucet_account_and_wallet() -> HotWallet {
let root_dir = get_faucet_data_dir();
diff --git a/sn_client/src/api.rs b/sn_client/src/api.rs
index 0d78dea614..f7aaf74d04 100644
--- a/sn_client/src/api.rs
+++ b/sn_client/src/api.rs
@@ -52,7 +52,7 @@ use tracing::trace;
use xor_name::XorName;
/// The maximum duration the client will wait for a connection to the network before timing out.
-const CONNECTION_TIMEOUT: Duration = Duration::from_secs(30);
+pub const CONNECTION_TIMEOUT: Duration = Duration::from_secs(30);
/// The timeout duration for the client to receive any response from the network.
const INACTIVITY_TIMEOUT: Duration = Duration::from_secs(30);
@@ -67,7 +67,7 @@ impl Client {
///
/// Optionally specify the duration for the connection timeout.
///
- /// Defaults to 180 seconds.
+ /// Defaults to [`CONNECTION_TIMEOUT`].
///
/// # Arguments
/// * 'signer' - [SecretKey]
diff --git a/sn_client/src/audit/dag_crawling.rs b/sn_client/src/audit/dag_crawling.rs
index ddc1ab7aa9..2887b71afd 100644
--- a/sn_client/src/audit/dag_crawling.rs
+++ b/sn_client/src/audit/dag_crawling.rs
@@ -11,7 +11,7 @@ use crate::{Client, Error, SpendDag};
use futures::{future::join_all, StreamExt};
use sn_networking::{GetRecordError, NetworkError};
use sn_transfers::{
- SignedSpend, SpendAddress, SpendReason, WalletError, WalletResult,
+ NanoTokens, SignedSpend, SpendAddress, SpendReason, WalletError, WalletResult,
DEFAULT_NETWORK_ROYALTIES_PK, GENESIS_SPEND_UNIQUE_KEY, NETWORK_ROYALTIES_PK,
};
use std::{
@@ -66,7 +66,7 @@ impl Client {
pub async fn spend_dag_build_from(
&self,
spend_addr: SpendAddress,
- spend_processing: Option>,
+ spend_processing: Option>,
verify: bool,
) -> WalletResult {
let (tx, mut rx) = tokio::sync::mpsc::channel(SPENDS_PROCESSING_BUFFER_SIZE);
@@ -93,7 +93,7 @@ impl Client {
if let Some(sender) = &spend_processing {
let outputs = spend.spend.spent_tx.outputs.len() as u64;
sender
- .send((spend, outputs))
+ .send((spend, outputs, false))
.await
.map_err(|e| WalletError::SpendProcessing(e.to_string()))?;
}
@@ -138,42 +138,60 @@ impl Client {
/// Return with UTXOs for re-attempt (with insertion time stamp)
pub async fn crawl_to_next_utxos(
&self,
- addrs_to_get: &mut BTreeSet,
- sender: Sender<(SignedSpend, u64)>,
+ addrs_to_get: &mut BTreeSet<(SpendAddress, NanoTokens)>,
+ sender: Sender<(SignedSpend, u64, bool)>,
reattempt_interval: Duration,
- ) -> WalletResult> {
+ ) -> WalletResult> {
let mut failed_utxos = BTreeMap::new();
let mut tasks = JoinSet::new();
while !addrs_to_get.is_empty() || !tasks.is_empty() {
while tasks.len() < 32 && !addrs_to_get.is_empty() {
- if let Some(addr) = addrs_to_get.pop_first() {
+ if let Some((addr, amount)) = addrs_to_get.pop_first() {
let client_clone = self.clone();
- let _ =
- tasks.spawn(async move { (client_clone.crawl_spend(addr).await, addr) });
+ let _ = tasks
+ .spawn(async move { (client_clone.crawl_spend(addr).await, addr, amount) });
}
}
- if let Some(Ok((result, address))) = tasks.join_next().await {
+ if let Some(Ok((result, address, amount))) = tasks.join_next().await {
match result {
InternalGetNetworkSpend::Spend(spend) => {
let for_further_track = beta_track_analyze_spend(&spend);
let _ = sender
- .send((*spend, for_further_track.len() as u64))
+ .send((*spend, for_further_track.len() as u64, false))
.await
.map_err(|e| WalletError::SpendProcessing(e.to_string()));
addrs_to_get.extend(for_further_track);
}
- InternalGetNetworkSpend::DoubleSpend(_spends) => {
- warn!("Detected double spend regarding {address:?}");
+ InternalGetNetworkSpend::DoubleSpend(spends) => {
+ warn!(
+ "Detected double spend regarding {address:?} - {:?}",
+ spends.len()
+ );
+ for (i, spend) in spends.iter().enumerate() {
+ warn!("double spend entry {i} reason {:?}, amount {}, inputs: {}, outputs: {}, royties: {}, {:?} - {:?}",
+ spend.spend.reason, spend.spend.amount, spend.spend.spent_tx.inputs.len(), spend.spend.spent_tx.outputs.len(),
+ spend.spend.network_royalties.len(), spend.spend.spent_tx.inputs, spend.spend.spent_tx.outputs);
+
+ let for_further_track = beta_track_analyze_spend(spend);
+ addrs_to_get.extend(for_further_track);
+
+ let _ = sender
+ .send((spend.clone(), 0, true))
+ .await
+ .map_err(|e| WalletError::SpendProcessing(e.to_string()));
+ }
}
InternalGetNetworkSpend::NotFound => {
- let _ = failed_utxos.insert(address, Instant::now() + reattempt_interval);
+ let _ = failed_utxos
+ .insert(address, (Instant::now() + reattempt_interval, amount));
}
InternalGetNetworkSpend::Error(e) => {
warn!("Fetching spend {address:?} result in error {e:?}");
// Error of `NotEnoughCopies` could be re-attempted and succeed eventually.
- let _ = failed_utxos.insert(address, Instant::now() + reattempt_interval);
+ let _ = failed_utxos
+ .insert(address, (Instant::now() + reattempt_interval, amount));
}
}
}
@@ -446,7 +464,7 @@ impl Client {
&self,
dag: &mut SpendDag,
utxos: BTreeSet,
- spend_processing: Option>,
+ spend_processing: Option>,
verify: bool,
) {
let main_dag_src = dag.source();
@@ -482,7 +500,7 @@ impl Client {
pub async fn spend_dag_continue_from_utxos(
&self,
dag: &mut SpendDag,
- spend_processing: Option>,
+ spend_processing: Option>,
verify: bool,
) {
let utxos = dag.get_utxos();
@@ -520,7 +538,7 @@ impl Client {
/// Helper function to analyze spend for beta_tracking optimization.
/// returns the new_utxos that needs to be further tracked.
-fn beta_track_analyze_spend(spend: &SignedSpend) -> BTreeSet {
+fn beta_track_analyze_spend(spend: &SignedSpend) -> BTreeSet<(SpendAddress, NanoTokens)> {
// Filter out royalty outputs
let royalty_pubkeys: BTreeSet<_> = spend
.spend
@@ -545,7 +563,10 @@ fn beta_track_analyze_spend(spend: &SignedSpend) -> BTreeSet {
return None;
}
if !royalty_pubkeys.contains(&output.unique_pubkey) {
- Some(SpendAddress::from_unique_pubkey(&output.unique_pubkey))
+ Some((
+ SpendAddress::from_unique_pubkey(&output.unique_pubkey),
+ output.amount,
+ ))
} else {
None
}
diff --git a/sn_client/src/lib.rs b/sn_client/src/lib.rs
index 87d6204a6d..5505008e43 100644
--- a/sn_client/src/lib.rs
+++ b/sn_client/src/lib.rs
@@ -6,11 +6,41 @@
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
+//! > **Core functionalities for interacting with the SAFE Network**
+//!
+//! The `sn_client` crate is a part of the [Safe Network](https://safenetwork.tech/) (SN),
+//! and plays a crucial role in this ecosystem by serving as the client library that allows
+//! applications and users to interact with the Safe Network, and build applications that
+//! leverage the Safe Network's capabilities, providing a high-level API that simplifies the development process.
+//!
+//! Here are the key functionalities provided by this crate:
+//!
+//! 1. **Network Communication**: It handles communication with the Safe Network, enabling clients to
+//! send and receive messages from the decentralized nodes that make up the network.
+//!
+//! 2. **Data Storage and Retrieval**: to store and retrieve data on the Safe Network.
+//! This includes both private and public data, ensuring privacy and security.
+//!
+//! 3. **Authentication and Access Control**: It provides mechanisms for authenticating users and
+//! managing access to data, ensuring that only authorized users can access sensitive information.
+//!
+//! 4. **File Management**: The crate supports operations related to file management, such as uploading,
+//! downloading, and managing files and directories on the Safe Network.
+//!
+//! 5. **Token Management**: It includes functionality for managing Safe Network tokens, which can be
+//! used for various purposes within the network, including paying for storage and services.
+//!
+//! ## Quick links
+//! - [Crates.io](https://crates.io/crates/sn_client)
+//! - [Forum](https://forum.autonomi.community/)
+//! - [Issues on GitHub](https://github.com/maidsafe/safe_network/issues)
+//!
+
#[macro_use]
extern crate tracing;
pub mod acc_packet;
-mod api;
+pub mod api;
mod audit;
mod chunks;
mod error;
diff --git a/sn_faucet/Cargo.toml b/sn_faucet/Cargo.toml
index 18e85260da..4fd88e3198 100644
--- a/sn_faucet/Cargo.toml
+++ b/sn_faucet/Cargo.toml
@@ -8,7 +8,7 @@ license = "GPL-3.0"
name = "sn_faucet"
readme = "README.md"
repository = "https://github.com/maidsafe/safe_network"
-version = "0.4.30"
+version = "0.4.31"
[features]
default = ["gifting"]
@@ -37,13 +37,13 @@ indicatif = { version = "0.17.5", features = ["tokio"] }
minreq = { version = "2.11.0", features = ["https-rustls"], optional = true }
serde = { version = "1.0.193", features = ["derive"] }
serde_json = "1.0.108"
-sn_build_info = { path = "../sn_build_info", version = "0.1.9" }
-sn_cli = { path = "../sn_cli", version = "0.93.9" }
-sn_client = { path = "../sn_client", version = "0.108.0" }
-sn_logging = { path = "../sn_logging", version = "0.2.30" }
-sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0" }
-sn_protocol = { path = "../sn_protocol", version = "0.17.5" }
-sn_transfers = { path = "../sn_transfers", version = "0.18.8" }
+sn_build_info = { path = "../sn_build_info", version = "0.1.10" }
+sn_cli = { path = "../sn_cli", version = "0.94.0" }
+sn_client = { path = "../sn_client", version = "0.109.0" }
+sn_logging = { path = "../sn_logging", version = "0.2.31" }
+sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.1" }
+sn_protocol = { path = "../sn_protocol", version = "0.17.6" }
+sn_transfers = { path = "../sn_transfers", version = "0.18.9" }
tokio = { version = "1.32.0", features = ["parking_lot", "rt"] }
tracing = { version = "~0.1.26" }
url = "2.5.0"
diff --git a/sn_logging/Cargo.toml b/sn_logging/Cargo.toml
index 01f93bd42d..66cc6870ce 100644
--- a/sn_logging/Cargo.toml
+++ b/sn_logging/Cargo.toml
@@ -8,7 +8,7 @@ license = "GPL-3.0"
name = "sn_logging"
readme = "README.md"
repository = "https://github.com/maidsafe/safe_network"
-version = "0.2.30"
+version = "0.2.31"
[dependencies]
chrono = "~0.4.19"
diff --git a/sn_metrics/Cargo.toml b/sn_metrics/Cargo.toml
index c34a35f57d..00aef41f04 100644
--- a/sn_metrics/Cargo.toml
+++ b/sn_metrics/Cargo.toml
@@ -8,7 +8,7 @@ license = "GPL-3.0"
name = "sn_metrics"
readme = "README.md"
repository = "https://github.com/maidsafe/safe_network"
-version = "0.1.10"
+version = "0.1.11"
[[bin]]
path = "src/main.rs"
diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml
index d712ff1b42..d443f763e0 100644
--- a/sn_networking/Cargo.toml
+++ b/sn_networking/Cargo.toml
@@ -8,7 +8,7 @@ license = "GPL-3.0"
name = "sn_networking"
readme = "README.md"
repository = "https://github.com/maidsafe/safe_network"
-version = "0.17.0"
+version = "0.17.1"
[features]
default = ["libp2p/quic"]
@@ -30,7 +30,6 @@ libp2p = { version = "0.53", features = [
"request-response",
"cbor",
"identify",
- "dcutr",
"tcp",
"relay",
"noise",
@@ -54,10 +53,10 @@ rand = { version = "~0.8.5", features = ["small_rng"] }
rayon = "1.8.0"
rmp-serde = "1.1.1"
serde = { version = "1.0.133", features = ["derive", "rc"] }
-sn_build_info = { path="../sn_build_info", version = "0.1.9" }
-sn_protocol = { path = "../sn_protocol", version = "0.17.5" }
-sn_transfers = { path = "../sn_transfers", version = "0.18.8" }
-sn_registers = { path = "../sn_registers", version = "0.3.15" }
+sn_build_info = { path="../sn_build_info", version = "0.1.10" }
+sn_protocol = { path = "../sn_protocol", version = "0.17.6" }
+sn_transfers = { path = "../sn_transfers", version = "0.18.9" }
+sn_registers = { path = "../sn_registers", version = "0.3.16" }
sysinfo = { version = "0.30.8", default-features = false, optional = true }
thiserror = "1.0.23"
tiny-keccak = { version = "~2.0.2", features = ["sha3"] }
@@ -74,6 +73,7 @@ backoff = { version = "0.4.0", features = ["tokio"] }
aes-gcm-siv = "0.11.1"
walkdir = "~2.5.0"
strum = { version = "0.26.2", features = ["derive"] }
+void = "1.0.2"
[dev-dependencies]
bls = { package = "blsttc", version = "8.0.1" }
diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs
index f84c09804e..ba2014827e 100644
--- a/sn_networking/src/cmd.rs
+++ b/sn_networking/src/cmd.rs
@@ -734,7 +734,6 @@ impl SwarmDriver {
SwarmCmd::RecordNodeIssue { peer_id, issue } => {
cmd_string = "RecordNodeIssues";
- let _ = self.bad_nodes_ongoing_verifications.remove(&peer_id);
self.record_node_issue(peer_id, issue);
}
SwarmCmd::IsPeerShunned { target, sender } => {
@@ -829,11 +828,11 @@ impl SwarmDriver {
}
if *is_bad {
- warn!("Cleaning out bad_peer {peer_id:?}");
+ warn!("Cleaning out bad_peer {peer_id:?} and adding it to the blocklist");
if let Some(dead_peer) = self.swarm.behaviour_mut().kademlia.remove_peer(&peer_id) {
self.update_on_peer_removal(*dead_peer.node.key.preimage());
- let _ = self.check_for_change_in_our_close_group();
}
+ self.swarm.behaviour_mut().blocklist.block_peer(peer_id);
if is_new_bad {
self.send_event(NetworkEvent::PeerConsideredAsBad {
diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs
index 1917d53def..157dc785a0 100644
--- a/sn_networking/src/driver.rs
+++ b/sn_networking/src/driver.rs
@@ -56,7 +56,7 @@ use sn_protocol::{
};
use sn_transfers::PaymentQuote;
use std::{
- collections::{btree_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet},
+ collections::{btree_map::Entry, BTreeMap, HashMap, HashSet},
fmt::Debug,
net::SocketAddr,
num::NonZeroUsize,
@@ -184,20 +184,23 @@ pub enum VerificationKind {
},
}
-/// NodeBehaviour struct
+/// The behaviors are polled in the order they are defined.
+/// The first struct member is polled until it returns Poll::Pending before moving on to later members.
+/// Prioritize the behaviors related to connection handling.
#[derive(NetworkBehaviour)]
#[behaviour(to_swarm = "NodeEvent")]
pub(super) struct NodeBehaviour {
- #[cfg(feature = "upnp")]
- pub(super) upnp: libp2p::swarm::behaviour::toggle::Toggle,
- pub(super) request_response: request_response::cbor::Behaviour,
- pub(super) kademlia: kad::Behaviour,
+ pub(super) blocklist:
+ libp2p::allow_block_list::Behaviour,
+ pub(super) identify: libp2p::identify::Behaviour,
#[cfg(feature = "local-discovery")]
pub(super) mdns: mdns::tokio::Behaviour,
- pub(super) identify: libp2p::identify::Behaviour,
- pub(super) dcutr: libp2p::dcutr::Behaviour,
+ #[cfg(feature = "upnp")]
+ pub(super) upnp: libp2p::swarm::behaviour::toggle::Toggle,
pub(super) relay_client: libp2p::relay::client::Behaviour,
pub(super) relay_server: libp2p::relay::Behaviour,
+ pub(super) kademlia: kad::Behaviour,
+ pub(super) request_response: request_response::cbor::Behaviour,
}
#[derive(Debug)]
@@ -549,11 +552,18 @@ impl NetworkBuilder {
.boxed();
let relay_server = {
- let relay_server_cfg = relay::Config::default();
+ let relay_server_cfg = relay::Config {
+ max_reservations: 128, // Amount of peers we are relaying for
+ max_circuits: 1024, // The total amount of relayed connections at any given moment.
+ max_circuits_per_peer: 256, // Amount of relayed connections per peer (both dst and src)
+ circuit_src_rate_limiters: vec![], // No extra rate limiting for now
+ ..Default::default()
+ };
libp2p::relay::Behaviour::new(peer_id, relay_server_cfg)
};
let behaviour = NodeBehaviour {
+ blocklist: libp2p::allow_block_list::Behaviour::default(),
relay_client: relay_behaviour,
relay_server,
#[cfg(feature = "upnp")]
@@ -563,7 +573,6 @@ impl NetworkBuilder {
identify,
#[cfg(feature = "local-discovery")]
mdns,
- dcutr: libp2p::dcutr::Behaviour::new(peer_id),
};
#[cfg(not(target_arch = "wasm32"))]
@@ -577,7 +586,7 @@ impl NetworkBuilder {
let bootstrap = ContinuousBootstrap::new();
let replication_fetcher = ReplicationFetcher::new(peer_id, network_event_sender.clone());
- let mut relay_manager = RelayManager::new(self.initial_peers, peer_id);
+ let mut relay_manager = RelayManager::new(peer_id);
if !is_client {
relay_manager.enable_hole_punching(self.is_behind_home_network);
}
@@ -592,7 +601,6 @@ impl NetworkBuilder {
peers_in_rt: 0,
bootstrap,
relay_manager,
- close_group: Default::default(),
replication_fetcher,
#[cfg(feature = "open-metrics")]
network_metrics,
@@ -611,7 +619,6 @@ impl NetworkBuilder {
handled_times: 0,
hard_disk_write_error: 0,
bad_nodes: Default::default(),
- bad_nodes_ongoing_verifications: Default::default(),
quotes_history: Default::default(),
replication_targets: Default::default(),
};
@@ -635,7 +642,6 @@ pub struct SwarmDriver {
pub(crate) bootstrap: ContinuousBootstrap,
pub(crate) relay_manager: RelayManager,
/// The peers that are closer to our PeerId. Includes self.
- pub(crate) close_group: Vec,
pub(crate) replication_fetcher: ReplicationFetcher,
#[cfg(feature = "open-metrics")]
pub(crate) network_metrics: Option,
@@ -662,7 +668,6 @@ pub struct SwarmDriver {
handled_times: usize,
pub(crate) hard_disk_write_error: usize,
pub(crate) bad_nodes: BadNodes,
- pub(crate) bad_nodes_ongoing_verifications: BTreeSet,
pub(crate) quotes_history: BTreeMap,
pub(crate) replication_targets: BTreeMap,
}
diff --git a/sn_networking/src/event/kad.rs b/sn_networking/src/event/kad.rs
index 6060eafe81..fac28268f2 100644
--- a/sn_networking/src/event/kad.rs
+++ b/sn_networking/src/event/kad.rs
@@ -263,7 +263,6 @@ impl SwarmDriver {
info!("Evicted old peer on new peer join: {old_peer:?}");
self.update_on_peer_removal(old_peer);
}
- let _ = self.check_for_change_in_our_close_group();
}
kad::Event::InboundRequest {
request: InboundRequest::PutRecord { .. },
diff --git a/sn_networking/src/event/mod.rs b/sn_networking/src/event/mod.rs
index 05d8bafa45..9e0096f0f5 100644
--- a/sn_networking/src/event/mod.rs
+++ b/sn_networking/src/event/mod.rs
@@ -10,7 +10,7 @@ mod kad;
mod request_response;
mod swarm;
-use crate::{driver::SwarmDriver, error::Result, CLOSE_GROUP_SIZE};
+use crate::{driver::SwarmDriver, error::Result};
use core::fmt;
use custom_debug::Debug as CustomDebug;
#[cfg(feature = "local-discovery")]
@@ -27,7 +27,7 @@ use sn_protocol::{
};
use sn_transfers::PaymentQuote;
use std::{
- collections::{BTreeSet, HashSet},
+ collections::BTreeSet,
fmt::{Debug, Formatter},
};
use tokio::sync::oneshot;
@@ -42,9 +42,9 @@ pub(super) enum NodeEvent {
#[cfg(feature = "local-discovery")]
Mdns(Box),
Identify(Box),
- Dcutr(Box),
RelayClient(Box),
RelayServer(Box),
+ Void(void::Void),
}
#[cfg(feature = "upnp")]
@@ -78,11 +78,6 @@ impl From for NodeEvent {
NodeEvent::Identify(Box::new(event))
}
}
-impl From for NodeEvent {
- fn from(event: libp2p::dcutr::Event) -> Self {
- NodeEvent::Dcutr(Box::new(event))
- }
-}
impl From for NodeEvent {
fn from(event: libp2p::relay::client::Event) -> Self {
NodeEvent::RelayClient(Box::new(event))
@@ -94,6 +89,12 @@ impl From for NodeEvent {
}
}
+impl From for NodeEvent {
+ fn from(event: void::Void) -> Self {
+ NodeEvent::Void(event)
+ }
+}
+
#[derive(CustomDebug)]
/// Channel to send the `Response` through.
pub enum MsgResponder {
@@ -236,29 +237,6 @@ impl Debug for NetworkEvent {
}
impl SwarmDriver {
- /// Check for changes in our close group
- pub(crate) fn check_for_change_in_our_close_group(&mut self) -> bool {
- // this includes self
- let closest_k_peers = self.get_closest_k_value_local_peers();
-
- let new_closest_peers: Vec<_> =
- closest_k_peers.into_iter().take(CLOSE_GROUP_SIZE).collect();
-
- let old = self.close_group.iter().cloned().collect::>();
- let new_members: Vec<_> = new_closest_peers
- .iter()
- .filter(|p| !old.contains(p))
- .collect();
- if !new_members.is_empty() {
- debug!("The close group has been updated. The new members are {new_members:?}");
- debug!("New close group: {new_closest_peers:?}");
- self.close_group = new_closest_peers;
- true
- } else {
- false
- }
- }
-
/// Update state on addition of a peer to the routing table.
pub(crate) fn update_on_peer_addition(&mut self, added_peer: PeerId) {
self.peers_in_rt = self.peers_in_rt.saturating_add(1);
diff --git a/sn_networking/src/event/request_response.rs b/sn_networking/src/event/request_response.rs
index 5e6e50c820..8d6ecfd0a2 100644
--- a/sn_networking/src/event/request_response.rs
+++ b/sn_networking/src/event/request_response.rs
@@ -8,14 +8,10 @@
use crate::{
sort_peers_by_address, MsgResponder, NetworkError, NetworkEvent, SwarmDriver, CLOSE_GROUP_SIZE,
- REPLICATION_PEERS_COUNT,
};
use itertools::Itertools;
-use libp2p::{
- request_response::{self, Message},
- PeerId,
-};
-use rand::{rngs::OsRng, Rng};
+use libp2p::request_response::{self, Message};
+use rand::{rngs::OsRng, thread_rng, Rng};
use sn_protocol::{
messages::{CmdResponse, Request, Response},
storage::RecordType,
@@ -203,6 +199,8 @@ impl SwarmDriver {
return;
}
+ let more_than_one_key = incoming_keys.len() > 1;
+
// On receive a replication_list from a close_group peer, we undertake two tasks:
// 1, For those keys that we don't have:
// fetch them if close enough to us
@@ -210,33 +208,26 @@ impl SwarmDriver {
// start chunk_proof check against a randomly selected chunk type record to the sender
// 3, For those spends that we have that differ in the hash, we fetch the other version
// and update our local copy.
-
- // For fetching, only handle those non-exist and in close range keys
- let keys_to_store =
- self.select_non_existent_records_for_replications(&incoming_keys, &closest_k_peers);
-
- if keys_to_store.is_empty() {
- debug!("Empty keys to store after adding to");
+ #[allow(clippy::mutable_key_type)]
+ let all_keys = self
+ .swarm
+ .behaviour_mut()
+ .kademlia
+ .store_mut()
+ .record_addresses_ref();
+ let keys_to_fetch = self
+ .replication_fetcher
+ .add_keys(holder, incoming_keys, all_keys);
+ if keys_to_fetch.is_empty() {
+ trace!("no waiting keys to fetch from the network");
} else {
- #[allow(clippy::mutable_key_type)]
- let all_keys = self
- .swarm
- .behaviour_mut()
- .kademlia
- .store_mut()
- .record_addresses_ref();
- let keys_to_fetch = self
- .replication_fetcher
- .add_keys(holder, keys_to_store, all_keys);
- if keys_to_fetch.is_empty() {
- trace!("no waiting keys to fetch from the network");
- } else {
- self.send_event(NetworkEvent::KeysToFetchForReplication(keys_to_fetch));
- }
+ self.send_event(NetworkEvent::KeysToFetchForReplication(keys_to_fetch));
}
- // Only trigger chunk_proof check when received a periodical replication request.
- if incoming_keys.len() > 1 {
+ // Only trigger chunk_proof check based every X% of the time
+ let mut rng = thread_rng();
+ // 5% probability
+ if more_than_one_key && rng.gen_bool(0.05) {
let keys_to_verify = self.select_verification_data_candidates(sender);
if keys_to_verify.is_empty() {
@@ -250,84 +241,6 @@ impl SwarmDriver {
}
}
- /// Checks suggested records against what we hold, so we only
- /// enqueue what we do not have
- fn select_non_existent_records_for_replications(
- &mut self,
- incoming_keys: &[(NetworkAddress, RecordType)],
- closest_k_peers: &Vec,
- ) -> Vec<(NetworkAddress, RecordType)> {
- #[allow(clippy::mutable_key_type)]
- let locally_stored_keys = self
- .swarm
- .behaviour_mut()
- .kademlia
- .store_mut()
- .record_addresses_ref();
- let non_existent_keys: Vec<_> = incoming_keys
- .iter()
- .filter(|(addr, record_type)| {
- let key = addr.to_record_key();
- let local = locally_stored_keys.get(&key);
-
- // if we have a local value of matching record_type, we don't need to fetch it
- if let Some((_, local_record_type)) = local {
- let not_same_type = local_record_type != record_type;
- if not_same_type {
- // Shall only happens for Register, or DoubleSpendAttempts
- info!("Record {addr:?} has different type: local {local_record_type:?}, incoming {record_type:?}");
- }
- not_same_type
- } else {
- true
- }
- })
- .collect();
-
- non_existent_keys
- .into_iter()
- .filter_map(|(key, record_type)| {
- if Self::is_in_close_range(&self.self_peer_id, key, closest_k_peers) {
- Some((key.clone(), record_type.clone()))
- } else {
- // Reduce the log level as there will always be around 40% records being
- // out of the close range, as the sender side is using `CLOSE_GROUP_SIZE + 2`
- // to send our replication list to provide addressing margin.
- // Given there will normally be 6 nodes sending such list with interval of 5-10s,
- // this will accumulate to a lot of logs with the increasing records uploaded.
- trace!("not in close range for key {key:?}");
- None
- }
- })
- .collect()
- }
-
- /// A close target doesn't falls into the close peers range:
- /// For example, a node b11111X has an RT: [(1, b1111), (2, b111), (5, b11), (9, b1), (7, b0)]
- /// Then for a target bearing b011111 as prefix, all nodes in (7, b0) are its close_group peers.
- /// Then the node b11111X. But b11111X's close_group peers [(1, b1111), (2, b111), (5, b11)]
- /// are none among target b011111's close range.
- /// Hence, the ilog2 calculation based on close_range cannot cover such case.
- /// And have to sort all nodes to figure out whether self is among the close_group to the target.
- fn is_in_close_range(
- our_peer_id: &PeerId,
- target: &NetworkAddress,
- all_peers: &Vec,
- ) -> bool {
- if all_peers.len() <= REPLICATION_PEERS_COUNT {
- return true;
- }
-
- // Margin of 2 to allow our RT being bit lagging.
- match sort_peers_by_address(all_peers, target, REPLICATION_PEERS_COUNT) {
- Ok(close_group) => close_group.contains(&our_peer_id),
- Err(err) => {
- warn!("Could not get sorted peers for {target:?} with error {err:?}");
- true
- }
- }
- }
-
/// Check among all chunk type records that we have, select those close to the peer,
/// and randomly pick one as the verification candidate.
#[allow(clippy::mutable_key_type)]
diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs
index 064e02540e..bac78607a4 100644
--- a/sn_networking/src/event/swarm.rs
+++ b/sn_networking/src/event/swarm.rs
@@ -59,18 +59,6 @@ impl SwarmDriver {
event_string = "kad_event";
self.handle_kad_event(kad_event)?;
}
- SwarmEvent::Behaviour(NodeEvent::Dcutr(event)) => {
- #[cfg(feature = "open-metrics")]
- if let Some(metrics) = &self.network_metrics {
- metrics.record(&(*event));
- }
-
- event_string = "dcutr_event";
- info!(
- "Dcutr with remote peer: {:?} is: {:?}",
- event.remote_peer_id, event.result
- );
- }
SwarmEvent::Behaviour(NodeEvent::RelayClient(event)) => {
event_string = "relay_client_event";
@@ -137,6 +125,14 @@ impl SwarmDriver {
our_protocol: IDENTIFY_PROTOCOL_STR.to_string(),
their_protocol: info.protocol_version,
});
+ // Block the peer from any further communication.
+ self.swarm.behaviour_mut().blocklist.block_peer(peer_id);
+ if let Some(dead_peer) =
+ self.swarm.behaviour_mut().kademlia.remove_peer(&peer_id)
+ {
+ error!("Clearing out a protocol mistmatch peer from RT. Something went wrong, we should not have added this peer to RT: {peer_id:?}");
+ self.update_on_peer_removal(*dead_peer.node.key.preimage());
+ }
return Ok(());
}
@@ -179,7 +175,6 @@ impl SwarmDriver {
&peer_id,
&addrs,
&info.protocols,
- &self.bad_nodes,
);
}
@@ -246,32 +241,25 @@ impl SwarmDriver {
// If we are not local, we care only for peers that we dialed and thus are reachable.
if self.local || has_dialed {
- // To reduce the bad_node check resource usage,
- // during the connection establish process, only check cached black_list
- // The periodical check, which involves network queries shall filter
- // out bad_nodes eventually.
- if let Some((_issues, true)) = self.bad_nodes.get(&peer_id) {
- info!("Peer {peer_id:?} is considered as bad, blocking it.");
- } else {
- self.remove_bootstrap_from_full(peer_id);
-
- // Avoid have `direct link format` addrs co-exists with `relay` addr
- if has_relayed {
- addrs.retain(|multiaddr| {
- multiaddr.iter().any(|p| matches!(p, Protocol::P2pCircuit))
- });
- }
+ // A bad node cannot establish a connection with us. So we can add it to the RT directly.
+ self.remove_bootstrap_from_full(peer_id);
+
+ // Avoid have `direct link format` addrs co-exists with `relay` addr
+ if has_relayed {
+ addrs.retain(|multiaddr| {
+ multiaddr.iter().any(|p| matches!(p, Protocol::P2pCircuit))
+ });
+ }
- trace!(%peer_id, ?addrs, "identify: attempting to add addresses to routing table");
+ trace!(%peer_id, ?addrs, "identify: attempting to add addresses to routing table");
- // Attempt to add the addresses to the routing table.
- for multiaddr in addrs {
- let _routing_update = self
- .swarm
- .behaviour_mut()
- .kademlia
- .add_address(&peer_id, multiaddr);
- }
+ // Attempt to add the addresses to the routing table.
+ for multiaddr in addrs {
+ let _routing_update = self
+ .swarm
+ .behaviour_mut()
+ .kademlia
+ .add_address(&peer_id, multiaddr);
}
}
trace!(
@@ -310,7 +298,7 @@ impl SwarmDriver {
}
SwarmEvent::NewListenAddr {
- address,
+ mut address,
listener_id,
} => {
event_string = "new listen addr";
@@ -327,7 +315,10 @@ impl SwarmDriver {
};
let local_peer_id = *self.swarm.local_peer_id();
- let address = address.with(Protocol::P2p(local_peer_id));
+ // Make sure the address ends with `/p2p/`. In case of relay, `/p2p` is already there.
+ if address.iter().last() != Some(Protocol::P2p(local_peer_id)) {
+ address.push(Protocol::P2p(local_peer_id));
+ }
// Trigger server mode if we're not a client and we should not add our own address if we're behind
// home network.
@@ -381,15 +372,7 @@ impl SwarmDriver {
connection_id,
(peer_id, Instant::now() + Duration::from_secs(60)),
);
- #[cfg(feature = "open-metrics")]
- if let Some(metrics) = &self.network_metrics {
- metrics
- .open_connections
- .set(self.live_connected_peers.len() as i64);
- metrics
- .connected_peers
- .set(self.swarm.connected_peers().count() as i64);
- }
+ self.record_connection_metrics();
if endpoint.is_dialer() {
self.dialed_peers.push(peer_id);
@@ -405,23 +388,7 @@ impl SwarmDriver {
event_string = "ConnectionClosed";
trace!(%peer_id, ?connection_id, ?cause, num_established, "ConnectionClosed: {}", endpoint_str(&endpoint));
let _ = self.live_connected_peers.remove(&connection_id);
- #[cfg(feature = "open-metrics")]
- if let Some(metrics) = &self.network_metrics {
- metrics
- .open_connections
- .set(self.live_connected_peers.len() as i64);
- metrics
- .connected_peers
- .set(self.swarm.connected_peers().count() as i64);
- }
- }
- SwarmEvent::OutgoingConnectionError {
- connection_id,
- peer_id: None,
- error,
- } => {
- event_string = "OutgoingConnErr";
- warn!("OutgoingConnectionError to on {connection_id:?} - {error:?}");
+ self.record_connection_metrics();
}
SwarmEvent::OutgoingConnectionError {
peer_id: Some(failed_peer_id),
@@ -430,6 +397,8 @@ impl SwarmDriver {
} => {
event_string = "OutgoingConnErr";
warn!("OutgoingConnectionError to {failed_peer_id:?} on {connection_id:?} - {error:?}");
+ let _ = self.live_connected_peers.remove(&connection_id);
+ self.record_connection_metrics();
// we need to decide if this was a critical error and the peer should be removed from the routing table
let should_clean_peer = match error {
@@ -538,8 +507,6 @@ impl SwarmDriver {
peer_id: failed_peer_id,
issue: crate::NodeIssue::ConnectionIssue,
})?;
-
- let _ = self.check_for_change_in_our_close_group();
}
}
}
@@ -551,6 +518,8 @@ impl SwarmDriver {
} => {
event_string = "Incoming ConnErr";
error!("IncomingConnectionError from local_addr:?{local_addr:?}, send_back_addr {send_back_addr:?} on {connection_id:?} with error {error:?}");
+ let _ = self.live_connected_peers.remove(&connection_id);
+ self.record_connection_metrics();
}
SwarmEvent::Dialing {
peer_id,
@@ -650,70 +619,72 @@ impl SwarmDriver {
.remove_peer(&to_be_removed_bootstrap);
if let Some(removed_peer) = entry {
self.update_on_peer_removal(*removed_peer.node.key.preimage());
- let _ = self.check_for_change_in_our_close_group();
}
}
}
// Remove outdated connection to a peer if it is not in the RT.
+ // Optionally force remove all the connections for a provided peer.
fn remove_outdated_connections(&mut self) {
- let mut shall_removed = vec![];
-
- let timed_out_connections =
- self.live_connected_peers
- .iter()
- .filter_map(|(connection_id, (peer_id, timeout))| {
- if Instant::now() > *timeout {
- Some((connection_id, peer_id))
- } else {
- None
- }
- });
+ let mut removed_conns = 0;
+ self.live_connected_peers.retain(|connection_id, (peer_id, timeout_time)| {
- for (connection_id, peer_id) in timed_out_connections {
- // Skip if the peer is present in our RT
+ // skip if timeout isn't reached yet
+ if Instant::now() < *timeout_time {
+ return true; // retain peer
+ }
+
+ // ignore if peer is present in our RT
if let Some(kbucket) = self.swarm.behaviour_mut().kademlia.kbucket(*peer_id) {
if kbucket
.iter()
.any(|peer_entry| *peer_id == *peer_entry.node.key.preimage())
{
- continue;
+ return true; // retain peer
}
}
// skip if the peer is a relay server that we're connected to
- if self.relay_manager.keep_alive_peer(peer_id, &self.bad_nodes) {
- continue;
+ if self.relay_manager.keep_alive_peer(peer_id) {
+ return true; // retain peer
}
- shall_removed.push((*connection_id, *peer_id));
+ // actually remove connection
+ let result = self.swarm.close_connection(*connection_id);
+ debug!("Removed outdated connection {connection_id:?} to {peer_id:?} with result: {result:?}");
+
+ removed_conns += 1;
+
+ // do not retain this connection as it has been closed
+ false
+ });
+
+ if removed_conns == 0 {
+ return;
}
- if !shall_removed.is_empty() {
- trace!(
- "Current libp2p peers pool stats is {:?}",
- self.swarm.network_info()
- );
- trace!(
- "Removing {} outdated live connections, still have {} left.",
- shall_removed.len(),
- self.live_connected_peers.len()
- );
-
- for (connection_id, peer_id) in shall_removed {
- let _ = self.live_connected_peers.remove(&connection_id);
- let result = self.swarm.close_connection(connection_id);
- #[cfg(feature = "open-metrics")]
- if let Some(metrics) = &self.network_metrics {
- metrics
- .open_connections
- .set(self.live_connected_peers.len() as i64);
- metrics
- .connected_peers
- .set(self.swarm.connected_peers().count() as i64);
- }
- trace!("Removed outdated connection {connection_id:?} to {peer_id:?} with result: {result:?}");
- }
+ self.record_connection_metrics();
+
+ trace!(
+ "Current libp2p peers pool stats is {:?}",
+ self.swarm.network_info()
+ );
+ trace!(
+ "Removed {removed_conns} outdated live connections, still have {} left.",
+ self.live_connected_peers.len()
+ );
+ }
+
+ /// Record the metrics on update of connection state.
+ fn record_connection_metrics(&self) {
+ #[cfg(feature = "open-metrics")]
+ if let Some(metrics) = &self.network_metrics {
+ metrics
+ .open_connections
+ .set(self.live_connected_peers.len() as i64);
+ metrics
+ .connected_peers
+ .set(self.swarm.connected_peers().count() as i64);
}
}
}
diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs
index db1f13ee92..848c8210a8 100644
--- a/sn_networking/src/lib.rs
+++ b/sn_networking/src/lib.rs
@@ -42,7 +42,6 @@ pub use self::{
error::{GetRecordError, NetworkError},
event::{MsgResponder, NetworkEvent},
record_store::{calculate_cost_for_records, NodeRecordStore},
- spends::SpendVerificationOk,
transfers::{get_raw_signed_spends_from_record, get_signed_spend_from_record},
};
diff --git a/sn_networking/src/metrics/mod.rs b/sn_networking/src/metrics/mod.rs
index cdb351407e..ba8cdebad0 100644
--- a/sn_networking/src/metrics/mod.rs
+++ b/sn_networking/src/metrics/mod.rs
@@ -165,12 +165,6 @@ impl Recorder for NetworkMetrics {
}
}
-impl Recorder for NetworkMetrics {
- fn record(&self, event: &libp2p::dcutr::Event) {
- self.libp2p_metrics.record(event)
- }
-}
-
impl Recorder for NetworkMetrics {
fn record(&self, event: &libp2p::relay::Event) {
self.libp2p_metrics.record(event)
diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs
index fd397b6af7..582179e54a 100644
--- a/sn_networking/src/record_store.rs
+++ b/sn_networking/src/record_store.rs
@@ -34,6 +34,7 @@ use sn_protocol::{
NetworkAddress, PrettyPrintRecordKey,
};
use sn_transfers::{NanoTokens, QuotingMetrics, TOTAL_SUPPLY};
+use std::collections::VecDeque;
use std::{
borrow::Cow,
collections::{HashMap, HashSet},
@@ -54,6 +55,9 @@ use xor_name::XorName;
// this shall allow around 4K records.
const MAX_RECORDS_COUNT: usize = 4096;
+/// The maximum number of records to cache in memory.
+const MAX_RECORDS_CACHE_SIZE: usize = 100;
+
/// File name of the recorded historical quoting metrics.
const HISTORICAL_QUOTING_METRICS_FILENAME: &str = "historic_quoting_metrics";
@@ -67,6 +71,8 @@ pub struct NodeRecordStore {
config: NodeRecordStoreConfig,
/// A set of keys, each corresponding to a data `Record` stored on disk.
records: HashMap,
+ /// FIFO simple cache of records to reduce read times
+ records_cache: VecDeque,
/// Send network events to the node layer.
network_event_sender: mpsc::Sender,
/// Send cmds to the network layer. Used to interact with self in an async fashion.
@@ -101,6 +107,8 @@ pub struct NodeRecordStoreConfig {
pub max_records: usize,
/// The maximum size of record values, in bytes.
pub max_value_bytes: usize,
+ /// The maximum number of records to cache in memory.
+ pub records_cache_size: usize,
}
impl Default for NodeRecordStoreConfig {
@@ -111,6 +119,7 @@ impl Default for NodeRecordStoreConfig {
historic_quote_dir,
max_records: MAX_RECORDS_COUNT,
max_value_bytes: MAX_PACKET_SIZE,
+ records_cache_size: MAX_RECORDS_CACHE_SIZE,
}
}
}
@@ -262,11 +271,14 @@ impl NodeRecordStore {
};
let records = Self::update_records_from_an_existing_store(&config, &encryption_details);
+
+ let cache_size = config.records_cache_size;
let mut record_store = NodeRecordStore {
local_key: KBucketKey::from(local_id),
local_address: NetworkAddress::from_peer(local_id),
config,
records,
+ records_cache: VecDeque::with_capacity(cache_size),
network_event_sender,
swarm_cmd_sender,
responsible_distance_range: None,
@@ -353,6 +365,7 @@ impl NodeRecordStore {
) -> Option> {
let start = Instant::now();
let filename = Self::generate_filename(key);
+
let file_path = storage_dir.join(&filename);
// we should only be reading if we know the record is written to disk properly
@@ -506,6 +519,17 @@ impl NodeRecordStore {
let record_key = PrettyPrintRecordKey::from(&r.key).into_owned();
trace!("PUT a verified Record: {record_key:?}");
+ // if the cache already has this record in it (eg, a conflicting spend)
+ // remove it from the cache
+ self.records_cache.retain(|record| record.key != r.key);
+
+ // store in the FIFO records cache, removing the oldest if needed
+ if self.records_cache.len() > self.config.records_cache_size {
+ self.records_cache.pop_front();
+ }
+
+ self.records_cache.push_back(r.clone());
+
self.prune_records_if_needed(&r.key)?;
let filename = Self::generate_filename(&r.key);
@@ -634,6 +658,13 @@ impl RecordStore for NodeRecordStore {
// with the record. Thus a node can be bombarded with GET reqs for random keys. These can be safely
// ignored if we don't have the record locally.
let key = PrettyPrintRecordKey::from(k);
+
+ let cached_record = self.records_cache.iter().find(|r| r.key == *k);
+ // first return from FIFO cache if existing there
+ if let Some(record) = cached_record {
+ return Some(Cow::Borrowed(record));
+ }
+
if !self.records.contains_key(k) {
trace!("Record not found locally: {key:?}");
return None;
@@ -708,6 +739,8 @@ impl RecordStore for NodeRecordStore {
fn remove(&mut self, k: &Key) {
let _ = self.records.remove(k);
+ self.records_cache.retain(|r| r.key != *k);
+
#[cfg(feature = "open-metrics")]
if let Some(metric) = &self.record_count_metric {
let _ = metric.set(self.records.len() as i64);
diff --git a/sn_networking/src/relay_manager.rs b/sn_networking/src/relay_manager.rs
index 1f1d123019..ddd65b3745 100644
--- a/sn_networking/src/relay_manager.rs
+++ b/sn_networking/src/relay_manager.rs
@@ -14,7 +14,7 @@ use libp2p::{
use rand::Rng;
use std::collections::{BTreeMap, HashMap, HashSet, VecDeque};
-const MAX_CONCURRENT_RELAY_CONNECTIONS: usize = 2;
+const MAX_CONCURRENT_RELAY_CONNECTIONS: usize = 4;
const MAX_POTENTIAL_CANDIDATES: usize = 1000;
pub(crate) fn is_a_relayed_peer(addrs: &HashSet) -> bool {
@@ -40,27 +40,14 @@ pub(crate) struct RelayManager {
}
impl RelayManager {
- pub(crate) fn new(initial_peers: Vec, self_peer_id: PeerId) -> Self {
- let candidates = initial_peers
- .into_iter()
- .filter_map(|addr| {
- for protocol in addr.iter() {
- if let Protocol::P2p(peer_id) = protocol {
- let relay_addr = Self::craft_relay_address(&addr, Some(peer_id))?;
-
- return Some((peer_id, relay_addr));
- }
- }
- None
- })
- .collect();
+ pub(crate) fn new(self_peer_id: PeerId) -> Self {
Self {
self_peer_id,
reserved_by: Default::default(),
enable_client: false,
connected_relays: Default::default(),
waiting_for_reservation: Default::default(),
- candidates,
+ candidates: Default::default(),
relayed_listener_id_map: Default::default(),
}
}
@@ -70,19 +57,10 @@ impl RelayManager {
self.enable_client = enable;
}
- /// Should we keep this peer alive?
- /// If a peer is considered as a bad node, closing it's connection would remove that server from the listen addr.
- #[allow(clippy::nonminimal_bool)]
- pub(crate) fn keep_alive_peer(&self, peer_id: &PeerId, bad_nodes: &BadNodes) -> bool {
- let is_not_bad = if let Some((_, is_bad)) = bad_nodes.get(peer_id) {
- !*is_bad
- } else {
- true
- };
-
- // we disconnect from bad server
- (self.connected_relays.contains_key(peer_id) && is_not_bad)
- || (self.waiting_for_reservation.contains_key(peer_id) && is_not_bad)
+ /// Should we keep this peer alive? Closing a connection to that peer would remove that server from the listen addr.
+ pub(crate) fn keep_alive_peer(&self, peer_id: &PeerId) -> bool {
+ self.connected_relays.contains_key(peer_id)
+ || self.waiting_for_reservation.contains_key(peer_id)
// but servers provide connections to bad nodes.
|| self.reserved_by.contains(peer_id)
}
@@ -94,20 +72,12 @@ impl RelayManager {
peer_id: &PeerId,
addrs: &HashSet,
stream_protocols: &Vec,
- bad_nodes: &BadNodes,
) {
if self.candidates.len() >= MAX_POTENTIAL_CANDIDATES {
trace!("Got max relay candidates");
return;
}
- if let Some((_, is_bad)) = bad_nodes.get(peer_id) {
- if *is_bad {
- debug!("Not adding peer {peer_id:?} as relay candidate as it is a bad node.");
- return;
- }
- }
-
if Self::does_it_support_relay_server_protocol(stream_protocols) {
// todo: collect and manage multiple addrs
if let Some(addr) = addrs.iter().next() {
diff --git a/sn_networking/src/replication_fetcher.rs b/sn_networking/src/replication_fetcher.rs
index fb206973f2..7312c967da 100644
--- a/sn_networking/src/replication_fetcher.rs
+++ b/sn_networking/src/replication_fetcher.rs
@@ -74,17 +74,30 @@ impl ReplicationFetcher {
pub(crate) fn add_keys(
&mut self,
holder: PeerId,
- mut incoming_keys: Vec<(NetworkAddress, RecordType)>,
+ incoming_keys: Vec<(NetworkAddress, RecordType)>,
locally_stored_keys: &HashMap,
) -> Vec<(PeerId, RecordKey)> {
+ // remove locally stored from incoming_keys
+ let mut new_incoming_keys: Vec<_> = incoming_keys
+ .iter()
+ .filter(|(addr, record_type)| {
+ let key = &addr.to_record_key();
+ !locally_stored_keys.contains_key(key)
+ && !self
+ .to_be_fetched
+ .contains_key(&(key.clone(), record_type.clone(), holder))
+ })
+ .cloned()
+ .collect();
+
self.remove_stored_keys(locally_stored_keys);
let self_address = NetworkAddress::from_peer(self.self_peer_id);
- let total_incoming_keys = incoming_keys.len();
+ let total_incoming_keys = new_incoming_keys.len();
// In case of node full, restrict fetch range
if let Some(farthest_distance) = self.farthest_acceptable_distance {
let mut out_of_range_keys = vec![];
- incoming_keys.retain(|(addr, _)| {
+ new_incoming_keys.retain(|(addr, _)| {
let is_in_range = self_address.distance(addr) <= farthest_distance;
if !is_in_range {
out_of_range_keys.push(addr.clone());
@@ -101,8 +114,8 @@ impl ReplicationFetcher {
let mut keys_to_fetch = vec![];
// For new data, it will be replicated out in a special replication_list of length 1.
// And we shall `fetch` that copy immediately (if in range), if it's not being fetched.
- if incoming_keys.len() == 1 {
- let (record_address, record_type) = incoming_keys[0].clone();
+ if new_incoming_keys.len() == 1 {
+ let (record_address, record_type) = new_incoming_keys[0].clone();
let new_data_key = (record_address.to_record_key(), record_type);
@@ -113,16 +126,16 @@ impl ReplicationFetcher {
}
// To avoid later on un-necessary actions.
- incoming_keys.clear();
+ new_incoming_keys.clear();
}
self.to_be_fetched
.retain(|_, time_out| *time_out > Instant::now());
let mut out_of_range_keys = vec![];
- // Filter out those out_of_range ones among the imcoming_keys.
+ // Filter out those out_of_range ones among the incoming_keys.
if let Some(ref distance_range) = self.distance_range {
- incoming_keys.retain(|(addr, _record_type)| {
+ new_incoming_keys.retain(|(addr, _record_type)| {
let is_in_range =
self_address.distance(addr).ilog2().unwrap_or(0) <= *distance_range;
if !is_in_range {
@@ -141,12 +154,14 @@ impl ReplicationFetcher {
}
// add in-range AND non existing keys to the fetcher
- incoming_keys.into_iter().for_each(|(addr, record_type)| {
- let _ = self
- .to_be_fetched
- .entry((addr.to_record_key(), record_type, holder))
- .or_insert(Instant::now() + PENDING_TIMEOUT);
- });
+ new_incoming_keys
+ .into_iter()
+ .for_each(|(addr, record_type)| {
+ let _ = self
+ .to_be_fetched
+ .entry((addr.to_record_key(), record_type, holder))
+ .or_insert(Instant::now() + PENDING_TIMEOUT);
+ });
keys_to_fetch.extend(self.next_keys_to_fetch());
@@ -469,11 +484,13 @@ mod tests {
replication_fetcher.add_keys(PeerId::random(), incoming_keys, &Default::default());
assert_eq!(
keys_to_fetch.len(),
- replication_fetcher.on_going_fetches.len()
+ replication_fetcher.on_going_fetches.len(),
+ "keys to fetch and ongoing fetches should match"
);
assert_eq!(
in_range_keys,
- keys_to_fetch.len() + replication_fetcher.to_be_fetched.len()
+ keys_to_fetch.len() + replication_fetcher.to_be_fetched.len(),
+ "all keys should be in range and in the fetcher"
);
}
}
diff --git a/sn_networking/src/spends.rs b/sn_networking/src/spends.rs
index 800e2e1564..447bbb9633 100644
--- a/sn_networking/src/spends.rs
+++ b/sn_networking/src/spends.rs
@@ -11,22 +11,15 @@ use futures::future::join_all;
use sn_transfers::{is_genesis_spend, SignedSpend, SpendAddress, TransferError};
use std::{collections::BTreeSet, iter::Iterator};
-#[derive(Debug)]
-pub enum SpendVerificationOk {
- Valid,
- ParentDoubleSpend,
-}
-
impl Network {
/// This function verifies a single spend.
/// This is used by nodes for spends validation, before storing them.
/// - It checks if the spend has valid ancestry, that its parents exist on the Network.
- /// - If the parent is a double spend, we still carry out the valdiation, but return SpendVerificationOk::ParentDoubleSpend
+ /// - If the parent is a double spend, we still carry out the valdiation, but at the end return the error
/// - It checks that the spend has a valid signature and content
/// - It does NOT check if the spend exists online
/// - It does NOT check if the spend is already spent on the Network
- pub async fn verify_spend(&self, spend: &SignedSpend) -> Result {
- let mut result = SpendVerificationOk::Valid;
+ pub async fn verify_spend(&self, spend: &SignedSpend) -> Result<()> {
let unique_key = spend.unique_pubkey();
debug!("Verifying spend {unique_key}");
spend.verify(spend.spent_tx_hash())?;
@@ -34,10 +27,11 @@ impl Network {
// genesis does not have parents so we end here
if is_genesis_spend(spend) {
debug!("Verified {unique_key} was Genesis spend!");
- return Ok(result);
+ return Ok(());
}
// get its parents
+ let mut result = Ok(());
let parent_keys = spend
.spend
.parent_tx
@@ -45,21 +39,26 @@ impl Network {
.iter()
.map(|input| input.unique_pubkey);
let tasks: Vec<_> = parent_keys
- .map(|a| self.get_spend(SpendAddress::from_unique_pubkey(&a)))
+ .map(|parent| async move {
+ let spend = self
+ .get_spend(SpendAddress::from_unique_pubkey(&parent))
+ .await;
+ (parent, spend)
+ })
.collect();
let mut parent_spends = BTreeSet::new();
- for parent_spend in join_all(tasks).await {
+ for (parent_key, parent_spend) in join_all(tasks).await {
match parent_spend {
Ok(parent_spend) => {
parent_spends.insert(BTreeSet::from_iter([parent_spend]));
}
Err(NetworkError::DoubleSpendAttempt(attempts)) => {
- warn!("While verifying {unique_key:?}, a double spend attempt detected for the parent {attempts:?}. Continuing verification.");
+ warn!("While verifying {unique_key:?}, a double spend attempt ({attempts:?}) detected for the parent with pub key {parent_key:?} . Continuing verification.");
parent_spends.insert(BTreeSet::from_iter(attempts));
- result = SpendVerificationOk::ParentDoubleSpend;
+ result = Err(NetworkError::Transfer(TransferError::DoubleSpentParent));
}
Err(e) => {
- let s = format!("Failed to get parent spend of {unique_key}: {e}");
+ let s = format!("Failed to get parent spend of {unique_key} parent pubkey: {parent_key:?} error: {e}");
warn!("{}", s);
return Err(NetworkError::Transfer(TransferError::InvalidParentSpend(s)));
}
@@ -69,6 +68,6 @@ impl Network {
// verify the parents
spend.verify_parent_spends(parent_spends.iter())?;
- Ok(result)
+ result
}
}
diff --git a/sn_networking/src/transfers.rs b/sn_networking/src/transfers.rs
index e03dcff456..f8566511d8 100644
--- a/sn_networking/src/transfers.rs
+++ b/sn_networking/src/transfers.rs
@@ -33,6 +33,9 @@ impl Network {
let get_cfg = GetRecordCfg {
get_quorum: Quorum::Majority,
retry_strategy: None,
+ // This should not be set here. This function is used as a quick check to find the spends around the key during
+ // validation. The returned records might possibly be double spend attempt and the record will not match
+ // what we will have in hand.
target_record: None,
expected_holders: Default::default(),
};
diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml
index 1996cc6058..902d691c89 100644
--- a/sn_node/Cargo.toml
+++ b/sn_node/Cargo.toml
@@ -2,7 +2,7 @@
authors = ["MaidSafe Developers "]
description = "Safe Node"
name = "sn_node"
-version = "0.109.0"
+version = "0.110.0"
edition = "2021"
license = "GPL-3.0"
homepage = "https://maidsafe.net"
@@ -51,15 +51,14 @@ rmp-serde = "1.1.1"
rayon = "1.8.0"
self_encryption = "~0.29.0"
serde = { version = "1.0.133", features = ["derive", "rc"] }
-sn_build_info = { path = "../sn_build_info", version = "0.1.9" }
-sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0" }
-sn_client = { path = "../sn_client", version = "0.108.0" }
-sn_logging = { path = "../sn_logging", version = "0.2.30" }
-sn_networking = { path = "../sn_networking", version = "0.17.0" }
-sn_protocol = { path = "../sn_protocol", version = "0.17.5" }
-sn_registers = { path = "../sn_registers", version = "0.3.15" }
-sn_transfers = { path = "../sn_transfers", version = "0.18.8" }
-sn_service_management = { path = "../sn_service_management", version = "0.3.8" }
+sn_build_info = { path = "../sn_build_info", version = "0.1.10" }
+sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.1" }
+sn_logging = { path = "../sn_logging", version = "0.2.31" }
+sn_networking = { path = "../sn_networking", version = "0.17.1" }
+sn_protocol = { path = "../sn_protocol", version = "0.17.6" }
+sn_registers = { path = "../sn_registers", version = "0.3.16" }
+sn_transfers = { path = "../sn_transfers", version = "0.18.9" }
+sn_service_management = { path = "../sn_service_management", version = "0.3.9" }
thiserror = "1.0.23"
tokio = { version = "1.32.0", features = [
"io-util",
@@ -86,10 +85,11 @@ reqwest = { version = "0.12.2", default-features = false, features = [
"rustls-tls-manual-roots",
] }
serde_json = "1.0"
-sn_protocol = { path = "../sn_protocol", version = "0.17.5", features = [
+sn_client = { path = "../sn_client", version = "0.109.0" }
+sn_protocol = { path = "../sn_protocol", version = "0.17.6", features = [
"rpc",
] }
-sn_transfers = { path = "../sn_transfers", version = "0.18.8", features = [
+sn_transfers = { path = "../sn_transfers", version = "0.18.9", features = [
"test-utils",
] }
tempfile = "3.6.0"
diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs
index c8ccf090ed..9c55d86d16 100644
--- a/sn_node/src/node.rs
+++ b/sn_node/src/node.rs
@@ -55,7 +55,15 @@ use sn_protocol::storage::{try_serialize_record, RecordKind, SpendAddress};
/// Interval to trigger replication of all records to all peers.
/// This is the max time it should take. Minimum interval at any node will be half this
-pub const PERIODIC_REPLICATION_INTERVAL_MAX_S: u64 = 45;
+pub const PERIODIC_REPLICATION_INTERVAL_MAX_S: u64 = 450;
+
+/// Interval to trigger bad node detection.
+/// This is the max time it should take. Minimum interval at any node will be half this
+const PERIODIC_BAD_NODE_DETECTION_INTERVAL_MAX_S: u64 = 45;
+
+/// Interval to trigger reward forwarding.
+/// This is the max time it should take. Minimum interval at any node will be half this
+const PERIODIC_REWARD_FORWARD_INTERVAL_MAX_S: u64 = 45;
/// Max number of attempts that chunk proof verification will be carried out against certain target,
/// before classifying peer as a bad peer.
@@ -278,7 +286,8 @@ impl Node {
#[cfg(all(feature = "reward-forward", feature = "open-metrics"))]
let _handle = spawn(async move {
let root_dir = node_copy.network().root_dir_path().clone();
- let balance = read_forwarded_balance_value(&root_dir);
+ let balance_file_path = root_dir.join(FORWARDED_BALANCE_FILE_NAME);
+ let balance = read_forwarded_balance_value(&balance_file_path);
if let Some(node_metrics) = node_copy.node_metrics() {
let _ = node_metrics.total_forwarded_rewards.set(balance as i64);
@@ -300,7 +309,8 @@ impl Node {
// use a random timeout to ensure not sync when transmit messages.
let bad_nodes_check_interval: u64 = 5 * rng.gen_range(
- PERIODIC_REPLICATION_INTERVAL_MAX_S / 2..PERIODIC_REPLICATION_INTERVAL_MAX_S,
+ PERIODIC_BAD_NODE_DETECTION_INTERVAL_MAX_S / 2
+ ..PERIODIC_BAD_NODE_DETECTION_INTERVAL_MAX_S,
);
let bad_nodes_check_time = Duration::from_secs(bad_nodes_check_interval);
debug!("BadNodesCheck interval set to {bad_nodes_check_time:?}");
@@ -313,7 +323,8 @@ impl Node {
// use a random timeout to ensure not sync when transmit messages.
let balance_forward_interval: u64 = 10
* rng.gen_range(
- PERIODIC_REPLICATION_INTERVAL_MAX_S / 2..PERIODIC_REPLICATION_INTERVAL_MAX_S,
+ PERIODIC_REWARD_FORWARD_INTERVAL_MAX_S / 2
+ ..PERIODIC_REWARD_FORWARD_INTERVAL_MAX_S,
);
let balance_forward_time = Duration::from_secs(balance_forward_interval);
debug!(
diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs
index a73babe9a1..656eadc8da 100644
--- a/sn_node/src/put_validation.rs
+++ b/sn_node/src/put_validation.rs
@@ -8,10 +8,7 @@
use crate::{node::Node, quote::verify_quote_for_storecost, Error, Marker, Result};
use libp2p::kad::{Record, RecordKey};
-use sn_networking::{
- get_raw_signed_spends_from_record, GetRecordError, NetworkError, SpendVerificationOk,
- MAX_PACKET_SIZE,
-};
+use sn_networking::{get_raw_signed_spends_from_record, GetRecordError, NetworkError};
use sn_protocol::{
storage::{
try_deserialize_record, try_serialize_record, Chunk, RecordHeader, RecordKind, RecordType,
@@ -28,12 +25,6 @@ use std::collections::BTreeSet;
use tokio::task::JoinSet;
use xor_name::XorName;
-/// The maximum number of double spend attempts to store that we got from PUTs
-const MAX_DOUBLE_SPEND_ATTEMPTS_TO_KEEP_FROM_PUTS: usize = 15;
-
-/// The maximum number of double spend attempts to store inside a record
-const MAX_DOUBLE_SPEND_ATTEMPTS_TO_KEEP_PER_RECORD: usize = 30;
-
impl Node {
/// Validate a record and it's payment, and store the record to the RecordStore
pub(crate) async fn validate_and_store_record(&self, record: Record) -> Result<()> {
@@ -99,7 +90,7 @@ impl Node {
let value_to_hash = record.value.clone();
let spends = try_deserialize_record::>(&record)?;
let result = self
- .validate_merge_and_store_spends(spends, &record_key, true)
+ .validate_merge_and_store_spends(spends, &record_key)
.await;
if result.is_ok() {
Marker::ValidSpendPutFromClient(&PrettyPrintRecordKey::from(&record_key)).log();
@@ -207,7 +198,7 @@ impl Node {
RecordKind::Spend => {
let record_key = record.key.clone();
let spends = try_deserialize_record::>(&record)?;
- self.validate_merge_and_store_spends(spends, &record_key, false)
+ self.validate_merge_and_store_spends(spends, &record_key)
.await
}
RecordKind::Register => {
@@ -342,7 +333,6 @@ impl Node {
&self,
signed_spends: Vec,
record_key: &RecordKey,
- from_put: bool,
) -> Result<()> {
let pretty_key = PrettyPrintRecordKey::from(record_key);
debug!("Validating spends before storage at {pretty_key:?}");
@@ -381,10 +371,11 @@ impl Node {
// validate the signed spends against the network and the local knowledge
debug!("Validating spends for {pretty_key:?} with unique key: {unique_pubkey:?}");
let validated_spends = match self
- .signed_spends_to_keep(spends_for_key.clone(), *unique_pubkey, from_put)
+ .signed_spends_to_keep(spends_for_key.clone(), *unique_pubkey)
.await
{
- Ok(s) => s,
+ Ok((one, None)) => vec![one],
+ Ok((one, Some(two))) => vec![one, two],
Err(e) => {
warn!("Failed to validate spends at {pretty_key:?} with unique key {unique_pubkey:?}: {e}");
return Err(e);
@@ -648,47 +639,31 @@ impl Node {
}
/// Determine which spends our node should keep and store
- /// - if our local copy has reached the len/size limits, we don't store anymore from kad::PUT and return the local copy
- /// - else if the request is from replication OR if limit not reached during kad::PUT, then:
- /// - trust local spends
- /// - downloads spends from the network
- /// - verifies incoming spend + network spends and ignores the invalid ones.
- /// - orders all the verified spends from local + incoming + network
- /// - returns a maximum of MAX_DOUBLE_SPEND_ATTEMPTS_TO_KEEP_PER_RECORD spends
+ /// - get local spends and trust them
+ /// - get spends from the network
+ /// - verify incoming spend + network spends and ignore the invalid ones
+ /// - orders all the verified spends by:
+ /// - if they have spent descendants (meaning live branch)
+ /// - deterministicaly by their order in the BTreeSet
+ /// - returns the spend to keep along with another spend if it was a double spend
+ /// - when we get more than two spends, only keeps 2 that are chosen deterministically so
+ /// all nodes running this code are eventually consistent
async fn signed_spends_to_keep(
&self,
signed_spends: Vec,
unique_pubkey: UniquePubkey,
- from_put: bool,
- ) -> Result> {
+ ) -> Result<(SignedSpend, Option)> {
let spend_addr = SpendAddress::from_unique_pubkey(&unique_pubkey);
debug!(
"Validating before storing spend at {spend_addr:?} with unique key: {unique_pubkey}"
);
+ // trust local spends as we've verified them before
let local_spends = self.get_local_spends(spend_addr).await?;
- let size_of_local_spends = try_serialize_record(&local_spends, RecordKind::Spend)?
- .to_vec()
- .len();
- let max_spend_len_reached =
- local_spends.len() >= MAX_DOUBLE_SPEND_ATTEMPTS_TO_KEEP_FROM_PUTS;
- let max_spend_size_reached = {
- // todo: limit size of a single signed spend to < max_packet_size/2
- let size_limit = size_of_local_spends >= MAX_PACKET_SIZE / 2;
- // just so that we can store the double spend
- size_limit && local_spends.len() > 1
- };
-
- if (max_spend_len_reached || max_spend_size_reached) && from_put {
- info!("We already have {MAX_DOUBLE_SPEND_ATTEMPTS_TO_KEEP_FROM_PUTS} spends locally or have maximum size of spends, skipping spends received via PUT for {unique_pubkey:?}");
- return Ok(local_spends);
- }
- let mut all_verified_spends = BTreeSet::from_iter(local_spends.into_iter());
// get spends from the network at the address for that unique pubkey
let network_spends = match self.network().get_raw_spends(spend_addr).await {
Ok(spends) => spends,
- Err(NetworkError::GetRecordError(GetRecordError::RecordNotFound)) => vec![],
Err(NetworkError::GetRecordError(GetRecordError::SplitRecord { result_map })) => {
warn!("Got a split record (double spend) for {unique_pubkey:?} from the network");
let mut spends = vec![];
@@ -700,33 +675,65 @@ impl Node {
}
spends
}
+ Err(NetworkError::GetRecordError(GetRecordError::NotEnoughCopies {
+ record,
+ got,
+ ..
+ })) => {
+ info!(
+ "Retrieved {got} copies of the record for {unique_pubkey:?} from the network"
+ );
+ match get_raw_signed_spends_from_record(&record) {
+ Ok(spends) => spends,
+ Err(err) => {
+ warn!("Ignoring invalid record received from the network for spend: {unique_pubkey:?}: {err}");
+ vec![]
+ }
+ }
+ }
+
Err(e) => {
warn!("Continuing without network spends as failed to get spends from the network for {unique_pubkey:?}: {e}");
vec![]
}
};
+ debug!(
+ "For {unique_pubkey:?} got {} local spends, {} from network and {} provided",
+ local_spends.len(),
+ network_spends.len(),
+ signed_spends.len()
+ );
+ debug!("Local spends {local_spends:?}; from network {network_spends:?}; provided {signed_spends:?}");
+
+ // only verify spends we don't know of
+ let mut all_verified_spends = BTreeSet::from_iter(local_spends.into_iter());
+ let unverified_spends =
+ BTreeSet::from_iter(network_spends.into_iter().chain(signed_spends.into_iter()));
+ let known_spends = all_verified_spends.clone();
+ let new_unverified_spends: BTreeSet<_> =
+ unverified_spends.difference(&known_spends).collect();
- let mut parent_is_a_double_spend = false;
- // check the received spends and the spends got from the network
let mut tasks = JoinSet::new();
- for s in signed_spends.into_iter().chain(network_spends.into_iter()) {
+ for s in new_unverified_spends.into_iter() {
let self_clone = self.clone();
+ let spend_clone = s.clone();
let _ = tasks.spawn(async move {
- let res = self_clone.network().verify_spend(&s).await;
- (s, res)
+ let res = self_clone.network().verify_spend(&spend_clone).await;
+ (spend_clone, res)
});
}
- // collect spends until we have a double spend or until we have all the results
+ // gather verified spends
+ let mut double_spent_parent = BTreeSet::new();
while let Some(res) = tasks.join_next().await {
match res {
- Ok((spend, Ok(spend_verification_ok))) => {
- info!("Successfully verified {spend:?} with result: {spend_verification_ok:?}");
- if let SpendVerificationOk::ParentDoubleSpend = spend_verification_ok {
- // the parent is a double spend, but we will store it incase our spend is also a double spend.
- parent_is_a_double_spend = true;
- }
- let _inserted = all_verified_spends.insert(spend);
+ Ok((spend, Ok(()))) => {
+ info!("Successfully verified {spend:?}");
+ let _inserted = all_verified_spends.insert(spend.to_owned().clone());
+ }
+ Ok((spend, Err(NetworkError::Transfer(TransferError::DoubleSpentParent)))) => {
+ warn!("Parent of {spend:?} was double spent, keeping aside in case we're a double spend as well");
+ let _ = double_spent_parent.insert(spend.clone());
}
Ok((spend, Err(e))) => {
// an error here most probably means the received spend is invalid
@@ -741,33 +748,100 @@ impl Node {
}
}
- if parent_is_a_double_spend && all_verified_spends.len() == 1 {
- warn!("Parent is a double spend for {unique_pubkey:?}, ignoring this spend");
- return Err(Error::Transfers(TransferError::InvalidParentSpend(
- format!("Parent is a double spend for {unique_pubkey:?}"),
- )));
- } else if parent_is_a_double_spend && all_verified_spends.len() > 1 {
- warn!("Parent is a double spend for {unique_pubkey:?}, but we're also a double spend. So storing our double spend attempt.");
+ // keep track of double spend with double spent parent
+ if !all_verified_spends.is_empty() && !double_spent_parent.is_empty() {
+ warn!("Parent of {unique_pubkey:?} was double spent, but it's also a double spend. So keeping track of this double spend attempt.");
+ all_verified_spends.extend(double_spent_parent.into_iter())
}
- // todo: should we also check the size of spends here? Maybe just limit the size of a single
- // SignedSpend to < max_packet_size/2 so that we can store atleast 2 of them.
- let verified_spends = all_verified_spends
- .into_iter()
- .take(MAX_DOUBLE_SPEND_ATTEMPTS_TO_KEEP_PER_RECORD)
- .collect::>();
-
- if verified_spends.is_empty() {
- debug!("No valid spends found while validating Spend PUT. Who is sending us garbage?");
- Err(Error::InvalidRequest(format!(
- "Found no valid spends while validating Spend PUT for {unique_pubkey:?}"
- )))
- } else if verified_spends.len() > 1 {
- warn!("Got a double spend for {unique_pubkey:?}");
- Ok(verified_spends)
- } else {
- debug!("Got a single valid spend for {unique_pubkey:?}");
- Ok(verified_spends)
+ // return 2 spends max
+ let all_verified_spends: Vec<_> = all_verified_spends.into_iter().collect();
+ match all_verified_spends.as_slice() {
+ [one_spend] => Ok((one_spend.clone(), None)),
+ [one, two] => Ok((one.clone(), Some(two.clone()))),
+ [] => {
+ warn!("Invalid request: none of the spends were valid for {unique_pubkey:?}");
+ Err(Error::InvalidRequest(format!(
+ "Found no valid spends while validating Spends for {unique_pubkey:?}"
+ )))
+ }
+ more => {
+ warn!("Got more than 2 verified spends, this might be a double spend spam attack, making sure to favour live branches (branches with spent descendants)");
+ let (one, two) = self.verified_spends_select_2_live(more).await?;
+ Ok((one, Some(two)))
+ }
+ }
+ }
+
+ async fn verified_spends_select_2_live(
+ &self,
+ many_spends: &[SignedSpend],
+ ) -> Result<(SignedSpend, SignedSpend)> {
+ // get all spends descendants
+ let mut tasks = JoinSet::new();
+ for spend in many_spends {
+ let descendants: BTreeSet<_> = spend
+ .spend
+ .spent_tx
+ .outputs
+ .iter()
+ .map(|o| o.unique_pubkey())
+ .map(SpendAddress::from_unique_pubkey)
+ .collect();
+ for d in descendants {
+ let self_clone = self.clone();
+ let spend_clone = spend.to_owned();
+ let _ = tasks.spawn(async move {
+ let res = self_clone.network().get_raw_spends(d).await;
+ (spend_clone, res)
+ });
+ }
+ }
+
+ // identify up to two live spends (aka spends with spent descendants)
+ let mut live_spends = BTreeSet::new();
+ while let Some(res) = tasks.join_next().await {
+ match res {
+ Ok((spend, Ok(_descendant))) => {
+ trace!("Spend {spend:?} has a live descendant");
+ let _inserted = live_spends.insert(spend);
+ }
+ Ok((spend, Err(NetworkError::GetRecordError(GetRecordError::RecordNotFound)))) => {
+ trace!("Spend {spend:?} descendant was not found, continuing...");
+ }
+ Ok((spend, Err(e))) => {
+ warn!(
+ "Error fetching spend descendant while checking if {spend:?} is live: {e}"
+ );
+ }
+ Err(e) => {
+ let s = format!("Async thread error while selecting live spends: {e}");
+ error!("{}", s);
+ return Err(Error::JoinErrorInAsyncThread(s))?;
+ }
+ }
+ }
+
+ // order by live or not live, then order in the BTreeSet and take first 2
+ let not_live_spends: BTreeSet<_> = many_spends
+ .iter()
+ .filter(|s| !live_spends.contains(s))
+ .collect();
+ debug!(
+ "Got {} live spends and {} not live ones, keeping only the favoured 2",
+ live_spends.len(),
+ not_live_spends.len()
+ );
+ let ordered_spends: Vec<_> = live_spends
+ .iter()
+ .chain(not_live_spends.into_iter())
+ .collect();
+ match ordered_spends.as_slice() {
+ [one, two, ..] => Ok((one.to_owned().clone(), two.to_owned().clone())),
+ _ => Err(Error::InvalidRequest(format!(
+ "Expected many spends but got {}",
+ many_spends.len()
+ ))),
}
}
}
diff --git a/sn_node/tests/double_spend.rs b/sn_node/tests/double_spend.rs
index f67e9b42c5..3f6296c490 100644
--- a/sn_node/tests/double_spend.rs
+++ b/sn_node/tests/double_spend.rs
@@ -12,7 +12,9 @@ use assert_fs::TempDir;
use assert_matches::assert_matches;
use common::client::{get_client_and_funded_wallet, get_wallet};
use eyre::Result;
+use itertools::Itertools;
use sn_logging::LogBuilder;
+use sn_networking::NetworkError;
use sn_transfers::{
get_genesis_sk, rng, DerivationIndex, HotWallet, NanoTokens, OfflineTransfer, SpendReason,
WalletError, GENESIS_CASHNOTE,
@@ -100,7 +102,7 @@ async fn genesis_double_spend_fail() -> Result<()> {
// create a new genesis wallet with the intention to spend genesis again
let second_wallet_dir = TempDir::new()?;
- let mut second_wallet = HotWallet::create_from_key(&second_wallet_dir, get_genesis_sk())?;
+ let mut second_wallet = HotWallet::create_from_key(&second_wallet_dir, get_genesis_sk(), None)?;
second_wallet.deposit_and_store_to_disk(&vec![GENESIS_CASHNOTE.clone()])?;
let genesis_amount = GENESIS_CASHNOTE.value()?;
let second_wallet_addr = second_wallet.address();
@@ -332,6 +334,7 @@ async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid()
reason.clone(),
)?;
+ info!("spend B to C: {:?}", transfer_to_c.all_spend_requests);
client
.send_spends(transfer_to_c.all_spend_requests.iter(), false)
.await?;
@@ -384,9 +387,18 @@ async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid()
wallet_b.address(),
reason.clone(),
)?; // reuse the old cash notes
+
+ info!("spend B to Y: {:?}", transfer_to_y.all_spend_requests);
client
.send_spends(transfer_to_y.all_spend_requests.iter(), false)
.await?;
+ let spend_b_to_y = transfer_to_y
+ .all_spend_requests
+ .first()
+ .expect("should have one");
+ let b_spends = client.get_spend_from_network(spend_b_to_y.address()).await;
+ info!("B spends: {b_spends:?}");
+
info!("Verifying the transfers from B -> Y wallet... It should error out.");
let cash_notes_for_y: Vec<_> = transfer_to_y.cash_notes_for_recipient.clone();
let result = client.verify_cashnote(&cash_notes_for_y[0]).await;
@@ -402,7 +414,7 @@ async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid()
assert!(str.starts_with("Network Error Double spend(s) was detected"));
});
- info!("Verifying the original cashnote of B -> C");
+ println!("Verifying the original cashnote of B -> C");
let result = client.verify_cashnote(&cash_notes_for_c[0]).await;
info!("Got result while verifying the original spend from B -> C: {result:?}");
assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => {
@@ -411,3 +423,168 @@ async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid()
Ok(())
}
+
+#[tokio::test]
+/// When A -> B -> C where C is the UTXO cashnote, double spending A many times over and over
+/// should not lead to the original A disappearing and B becoming orphan
+async fn spamming_double_spends_should_not_shadow_live_branch() -> Result<()> {
+ let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true);
+ let mut rng = rng::thread_rng();
+ let reason = SpendReason::default();
+ // create 1 wallet add money from faucet
+ let wallet_dir_a = TempDir::new()?;
+
+ let (client, mut wallet_a) = get_client_and_funded_wallet(wallet_dir_a.path()).await?;
+ let balance_a = wallet_a.balance().as_nano();
+ let amount = NanoTokens::from(balance_a / 2);
+
+ // Send from A -> B
+ let wallet_dir_b = TempDir::new()?;
+ let mut wallet_b = get_wallet(wallet_dir_b.path());
+ assert_eq!(wallet_b.balance(), NanoTokens::zero());
+
+ let (cash_notes_a, _exclusive_access) = wallet_a.available_cash_notes()?;
+ let to_b_unique_key = (
+ amount,
+ wallet_b.address(),
+ DerivationIndex::random(&mut rng),
+ );
+ let transfer_to_b = OfflineTransfer::new(
+ cash_notes_a.clone(),
+ vec![to_b_unique_key],
+ wallet_a.address(),
+ reason.clone(),
+ )?;
+
+ info!("Sending A->B to the network...");
+ client
+ .send_spends(transfer_to_b.all_spend_requests.iter(), false)
+ .await?;
+
+ // save original A spend
+ let original_a_spend = if let [spend] = transfer_to_b.all_spend_requests.as_slice() {
+ spend
+ } else {
+ panic!("Expected to have one spend here!");
+ };
+
+ info!("Verifying the transfers from A -> B wallet...");
+ let cash_notes_for_b: Vec<_> = transfer_to_b.cash_notes_for_recipient.clone();
+ client.verify_cashnote(&cash_notes_for_b[0]).await?;
+ wallet_b.deposit_and_store_to_disk(&cash_notes_for_b)?; // store inside B
+
+ // Send from B -> C
+ let wallet_dir_c = TempDir::new()?;
+ let mut wallet_c = get_wallet(wallet_dir_c.path());
+ assert_eq!(wallet_c.balance(), NanoTokens::zero());
+
+ let (cash_notes_b, _exclusive_access) = wallet_b.available_cash_notes()?;
+ assert!(!cash_notes_b.is_empty());
+ let to_c_unique_key = (
+ wallet_b.balance(),
+ wallet_c.address(),
+ DerivationIndex::random(&mut rng),
+ );
+ let transfer_to_c = OfflineTransfer::new(
+ cash_notes_b.clone(),
+ vec![to_c_unique_key],
+ wallet_b.address(),
+ reason.clone(),
+ )?;
+
+ client
+ .send_spends(transfer_to_c.all_spend_requests.iter(), false)
+ .await?;
+
+ info!("Verifying the transfers from B -> C wallet...");
+ let cash_notes_for_c: Vec<_> = transfer_to_c.cash_notes_for_recipient.clone();
+ client.verify_cashnote(&cash_notes_for_c[0]).await?;
+ wallet_c.deposit_and_store_to_disk(&cash_notes_for_c.clone())?; // store inside c
+
+ // Try to double spend from A -> X
+ let wallet_dir_x = TempDir::new()?;
+ let wallet_x = get_wallet(wallet_dir_x.path());
+ assert_eq!(wallet_x.balance(), NanoTokens::zero());
+
+ let to_x_unique_key = (
+ amount,
+ wallet_x.address(),
+ DerivationIndex::random(&mut rng),
+ );
+ let transfer_to_x = OfflineTransfer::new(
+ cash_notes_a.clone(),
+ vec![to_x_unique_key],
+ wallet_a.address(),
+ reason.clone(),
+ )?; // reuse the old cash notes
+ client
+ .send_spends(transfer_to_x.all_spend_requests.iter(), false)
+ .await?;
+ info!("Verifying the transfers from A -> X wallet... It should error out.");
+ let cash_notes_for_x: Vec<_> = transfer_to_x.cash_notes_for_recipient.clone();
+ let result = client.verify_cashnote(&cash_notes_for_x[0]).await;
+ info!("Got result while verifying double spend from A -> X: {result:?}");
+ assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => {
+ assert!(str.starts_with("Network Error Double spend(s) was detected"));
+ });
+
+ // the original A should still be present as one of the double spends
+ let res = client
+ .get_spend_from_network(original_a_spend.address())
+ .await;
+ assert_matches!(
+ res,
+ Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt(
+ _
+ )))
+ );
+ if let Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt(spends))) = res {
+ assert!(spends.iter().contains(original_a_spend))
+ }
+
+ // Try to double spend A -> n different random keys
+ for _ in 0..20 {
+ println!("Spamming double spends on A");
+ let wallet_dir_y = TempDir::new()?;
+ let wallet_y = get_wallet(wallet_dir_y.path());
+ assert_eq!(wallet_y.balance(), NanoTokens::zero());
+
+ let to_y_unique_key = (
+ amount,
+ wallet_y.address(),
+ DerivationIndex::random(&mut rng),
+ );
+ let transfer_to_y = OfflineTransfer::new(
+ cash_notes_a.clone(),
+ vec![to_y_unique_key],
+ wallet_a.address(),
+ reason.clone(),
+ )?; // reuse the old cash notes
+ client
+ .send_spends(transfer_to_y.all_spend_requests.iter(), false)
+ .await?;
+ info!("Verifying the transfers from A -> Y wallet... It should error out.");
+ let cash_notes_for_y: Vec<_> = transfer_to_y.cash_notes_for_recipient.clone();
+ let result = client.verify_cashnote(&cash_notes_for_y[0]).await;
+ info!("Got result while verifying double spend from A -> Y: {result:?}");
+ assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => {
+ assert!(str.starts_with("Network Error Double spend(s) was detected"));
+ });
+
+ // the original A should still be present as one of the double spends
+ let res = client
+ .get_spend_from_network(original_a_spend.address())
+ .await;
+ assert_matches!(
+ res,
+ Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt(
+ _
+ )))
+ );
+ if let Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt(spends))) = res {
+ assert!(spends.iter().contains(original_a_spend))
+ }
+ }
+
+ Ok(())
+}
diff --git a/sn_node/tests/spend_simulation.rs b/sn_node/tests/spend_simulation.rs
index c2988e18f8..4c0c4edf0b 100644
--- a/sn_node/tests/spend_simulation.rs
+++ b/sn_node/tests/spend_simulation.rs
@@ -15,7 +15,7 @@ use itertools::Itertools;
use rand::{seq::IteratorRandom, Rng};
use sn_client::Client;
use sn_logging::LogBuilder;
-use sn_networking::NetworkError;
+use sn_networking::{GetRecordError, NetworkError};
use sn_transfers::{
rng, CashNote, DerivationIndex, HotWallet, MainPubkey, NanoTokens, OfflineTransfer,
SpendAddress, SpendReason, Transaction, UniquePubkey,
@@ -29,24 +29,28 @@ use std::{
use tokio::sync::mpsc;
use tracing::*;
-const MAX_WALLETS: usize = 50;
+const MAX_WALLETS: usize = 15;
const MAX_CYCLES: usize = 10;
const AMOUNT_PER_RECIPIENT: NanoTokens = NanoTokens::from(1000);
-/// The chance for an attack to happen. 1 in X chance.
-const ONE_IN_X_CHANCE_FOR_AN_ATTACK: u32 = 2;
+/// The chance for an double spend to happen. 1 in X chance.
+const ONE_IN_X_CHANCE_FOR_AN_ATTACK: u32 = 3;
enum WalletAction {
Send {
recipients: Vec<(NanoTokens, MainPubkey, DerivationIndex)>,
},
DoubleSpend {
- cashnotes: Vec,
+ input_cashnotes_to_double_spend: Vec,
to: (NanoTokens, MainPubkey, DerivationIndex),
},
ReceiveCashNotes {
from: WalletId,
cashnotes: Vec,
},
+ NotifyAboutInvalidCashNote {
+ from: WalletId,
+ cashnote: Vec,
+ },
}
enum WalletTaskResult {
@@ -67,13 +71,32 @@ enum WalletTaskResult {
id: WalletId,
received_cash_note: Vec,
},
+ NotifyAboutInvalidCashNoteSuccess {
+ id: WalletId,
+ },
}
#[derive(Debug)]
enum SpendStatus {
Utxo,
Spent,
- Poisoned,
+ DoubleSpend,
+ UtxoWithParentDoubleSpend,
+}
+
+#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
+enum TransactionStatus {
+ Valid,
+ /// All the inputs have been double spent.
+ DoubleSpentInputs,
+}
+
+// Just for printing things
+#[derive(Debug)]
+enum AttackType {
+ Poison,
+ DoubleSpendAllUxtoOutputs,
+ DoubleSpendPartialUtxoOutputs,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, PartialOrd, Ord, Hash)]
@@ -105,11 +128,14 @@ struct State {
cashnotes_per_wallet: BTreeMap>,
/// The map from WalletId to the outbound transactions that it has ever sent.
outbound_transactions_per_wallet: BTreeMap>,
+ /// The status of each transaction
+ transaction_status: BTreeMap,
}
#[derive(Debug, Default)]
struct PendingTasksTracker {
pending_send_results: Vec,
+ pending_notify_invalid_cashnotes_results: Vec,
pending_receive_results: Vec,
}
@@ -119,13 +145,14 @@ struct PendingTasksTracker {
/// 1. A double spend of a transaction whose outputs are partially spent / partially UTXO
/// 2. A double spend of a transcation whose outputs are all UTXO.
/// 3. Poisoning of a transaction whose outputs are all spent.
+/// Todo: Double spend just 1 input spend. Currently we double spend all the inputs. Have TransactionStatus::DoubleSpentInputs(vec)
///
/// The test works by having a main loop that sends actions to all the wallets. These are then processed by the wallets
/// in parallel. The wallets send back the results of the actions to the main loop, this is then tracked and the whole
/// cycle is repeated until the max cycles are reached.
#[tokio::test]
async fn spend_simulation() -> Result<()> {
- let _log_guards = LogBuilder::init_single_threaded_tokio_test("spend_simulation", true);
+ let _log_guards = LogBuilder::init_single_threaded_tokio_test("spend_simulation", false);
let (client, mut state) = init_state(MAX_WALLETS).await?;
@@ -156,61 +183,93 @@ async fn spend_simulation() -> Result<()> {
.iter()
.map(|(id, s)| (*id, s.clone()))
.collect_vec();
- for (id, action_sender) in iter {
- let illicit_spend = rng.gen::() % ONE_IN_X_CHANCE_FOR_AN_ATTACK == 0;
-
- if illicit_spend {
- let tx = get_tx_to_attack(id, &state)?;
- if let Some(tx) = tx {
- let mut input_cash_notes = Vec::new();
- for input in &tx.inputs {
- let (status, cashnote) = state
- .cashnote_tracker
- .get_mut(&input.unique_pubkey)
- .ok_or_eyre("Input spend not tracked")?;
- *status = SpendStatus::Poisoned;
- input_cash_notes.push(cashnote.clone());
+ for (our_id, action_sender) in iter {
+ tokio::time::sleep(Duration::from_secs(3)).await;
+ let try_performing_illicit_spend =
+ rng.gen::() % ONE_IN_X_CHANCE_FOR_AN_ATTACK == 0;
+
+ let mut illicit_spend_done = false;
+ if try_performing_illicit_spend {
+ if let Some((
+ input_cashnotes_to_double_spend,
+ output_cashnotes_that_are_unspendable,
+ amount,
+ attack_type,
+ )) = get_cashnotes_to_double_spend(our_id, &mut state)?
+ {
+ // tell wallets about the cashnotes that will become invalid after we perform the double spend.
+ if !output_cashnotes_that_are_unspendable.is_empty() {
+ info!("{our_id} is notifying wallets about invalid cashnotes: {output_cashnotes_that_are_unspendable:?}");
+ for (i, sender) in state.action_senders.iter() {
+ sender
+ .send(WalletAction::NotifyAboutInvalidCashNote {
+ from: our_id,
+ cashnote: output_cashnotes_that_are_unspendable.clone(),
+ })
+ .await?;
+ pending_task_results
+ .pending_notify_invalid_cashnotes_results
+ .push(*i);
+ }
+ // wait until all the wallets have received the notification. Else we'd try to spend those
+ // cashnotes while a double spend has just gone out.
+ while !pending_task_results
+ .pending_notify_invalid_cashnotes_results
+ .is_empty()
+ {
+ let result = result_rx
+ .recv()
+ .await
+ .ok_or_eyre("Senders will not be dropped")?;
+
+ handle_wallet_task_result(
+ &mut state,
+ result,
+ &mut pending_task_results,
+ )
+ .await?;
+ }
}
+
info!(
- "Wallet {id} is attempting to poison a old spend. Marking inputs {:?} as Poisoned",
- input_cash_notes
- .iter()
- .map(|c| c.unique_pubkey())
- .collect_vec()
+ "{our_id} is now attempting a {attack_type:?} of {} cashnotes.",
+ input_cashnotes_to_double_spend.len()
);
- //gotta make sure the amount adds up to the input, else not all cashnotes will be utilized
- let mut input_total_amount = 0;
- for cashnote in &input_cash_notes {
- input_total_amount += cashnote.value()?.as_nano();
- }
+ println!(
+ "{our_id} is attempting a {attack_type:?} of {} cashnotes",
+ input_cashnotes_to_double_spend.len()
+ );
+
action_sender
.send(WalletAction::DoubleSpend {
- cashnotes: input_cash_notes,
+ input_cashnotes_to_double_spend,
to: (
- NanoTokens::from(input_total_amount),
- state.main_pubkeys[&id],
+ amount,
+ state.main_pubkeys[&our_id],
DerivationIndex::random(&mut rng),
),
})
.await?;
- pending_task_results.pending_send_results.push(id);
- println!("Wallet {id} is attempting an attack");
- continue;
+ illicit_spend_done = true;
}
}
- let recipients = get_recipients(id, &state);
- let recipients_len = recipients.len();
- action_sender
- .send(WalletAction::Send {
- recipients: recipients
- .into_iter()
- .map(|key| (AMOUNT_PER_RECIPIENT, key, DerivationIndex::random(&mut rng)))
- .collect_vec(),
- })
- .await?;
- pending_task_results.pending_send_results.push(id);
- println!("Wallet {id} is sending tokens to {recipients_len:?} wallets",);
+ if !illicit_spend_done {
+ let recipients = get_recipients(our_id, &state);
+ let recipients_len = recipients.len();
+ action_sender
+ .send(WalletAction::Send {
+ recipients: recipients
+ .into_iter()
+ .map(|key| {
+ (AMOUNT_PER_RECIPIENT, key, DerivationIndex::random(&mut rng))
+ })
+ .collect_vec(),
+ })
+ .await?;
+ println!("{our_id} is sending tokens to {recipients_len:?} wallets");
+ }
+ pending_task_results.pending_send_results.push(our_id);
if let Ok(result) = result_rx.try_recv() {
handle_wallet_task_result(&mut state, result, &mut pending_task_results).await?;
}
@@ -226,6 +285,11 @@ async fn spend_simulation() -> Result<()> {
handle_wallet_task_result(&mut state, result, &mut pending_task_results).await?;
}
+ // Since it is a tiny network, it will be overwhelemed during the verification of things and will lead to a lot
+ // of Query Timeouts & huge number of pending Get requests. So let them settle.
+ println!("Cycle {cycle} completed. Sleeping for 5s before next cycle.");
+ tokio::time::sleep(Duration::from_secs(5)).await;
+
cycle += 1;
}
@@ -246,14 +310,22 @@ fn handle_action_per_wallet(
) {
tokio::spawn(async move {
let mut wallet = get_wallet(&wallet_dir);
+ let mut invalid_cashnotes = BTreeSet::new();
while let Some(action) = action_rx.recv().await {
- let result = inner_handle_action(our_id, client.clone(), action, &mut wallet).await;
+ let result = inner_handle_action(
+ our_id,
+ client.clone(),
+ action,
+ &mut wallet,
+ &mut invalid_cashnotes,
+ )
+ .await;
match result {
Ok(ok) => {
result_sender.send(ok).await?;
}
Err(err) => {
- error!("TestWallet {our_id} had error handling action : {err}");
+ error!("{our_id} had error handling action : {err}");
result_sender
.send(WalletTaskResult::Error {
id: our_id,
@@ -272,13 +344,18 @@ async fn inner_handle_action(
client: Client,
action: WalletAction,
wallet: &mut HotWallet,
+ invalid_cashnotes: &mut BTreeSet,
) -> Result {
match action {
WalletAction::Send { recipients } => {
- info!("TestWallet {our_id} sending to {recipients:?}");
+ info!("{our_id} sending to {recipients:?}");
let (available_cash_notes, exclusive_access) = wallet.available_cash_notes()?;
+ let available_cash_notes = available_cash_notes
+ .into_iter()
+ .filter(|(note, _)| !invalid_cashnotes.contains(¬e.unique_pubkey()))
+ .collect_vec();
info!(
- "TestWallet {our_id} Available CashNotes for local send: {:?}",
+ "{our_id} Available CashNotes for local send: {:?}",
available_cash_notes
.iter()
.map(|(c, _)| c.unique_pubkey())
@@ -301,7 +378,7 @@ async fn inner_handle_action(
.map(|c| c.parent_tx.clone())
.collect::>();
if transaction.len() != 1 {
- bail!("TestWallet {our_id}: Transactions should have the same parent tx");
+ bail!("{our_id}: Transactions should have the same parent tx");
}
client
@@ -309,7 +386,7 @@ async fn inner_handle_action(
.await?;
wallet.clear_confirmed_spend_requests();
if !wallet.unconfirmed_spend_requests().is_empty() {
- bail!("TestWallet {our_id} has unconfirmed spend requests");
+ bail!("{our_id} has unconfirmed spend requests");
}
Ok(WalletTaskResult::SendSuccess {
@@ -322,23 +399,31 @@ async fn inner_handle_action(
.expect("Should've bailed earlier"),
})
}
- WalletAction::DoubleSpend { cashnotes, to } => {
+ // todo: we don't track the double spend tx. Track if needed.
+ WalletAction::DoubleSpend {
+ input_cashnotes_to_double_spend,
+ to,
+ } => {
info!(
- "TestWallet {our_id} double spending cash notes: {:?}",
- cashnotes.iter().map(|c| c.unique_pubkey()).collect_vec()
+ "{our_id} double spending cash notes: {:?}",
+ input_cashnotes_to_double_spend
+ .iter()
+ .map(|c| c.unique_pubkey())
+ .collect_vec()
);
- let mut cashnotes_with_key = Vec::with_capacity(cashnotes.len());
- for cashnote in cashnotes {
+ let mut input_cashnotes_with_key =
+ Vec::with_capacity(input_cashnotes_to_double_spend.len());
+ for cashnote in input_cashnotes_to_double_spend {
let derived_key = cashnote.derived_key(wallet.key())?;
- cashnotes_with_key.push((cashnote, Some(derived_key)));
+ input_cashnotes_with_key.push((cashnote, Some(derived_key)));
}
let transfer = OfflineTransfer::new(
- cashnotes_with_key,
+ input_cashnotes_with_key,
vec![to],
wallet.address(),
SpendReason::default(),
)?;
- info!("TestWallet {our_id} double spending transfer: {transfer:?}");
+ info!("{our_id} double spending transfer: {transfer:?}");
client
.send_spends(transfer.all_spend_requests.iter(), false)
@@ -347,7 +432,7 @@ async fn inner_handle_action(
Ok(WalletTaskResult::DoubleSpendSuccess { id: our_id })
}
WalletAction::ReceiveCashNotes { from, cashnotes } => {
- info!("TestWallet {our_id} receiving cash note from wallet {from}");
+ info!("{our_id} receiving cash note from wallet {from}");
wallet.deposit_and_store_to_disk(&cashnotes)?;
let our_cash_notes = cashnotes
.into_iter()
@@ -365,6 +450,14 @@ async fn inner_handle_action(
received_cash_note: our_cash_notes,
})
}
+ WalletAction::NotifyAboutInvalidCashNote { from, cashnote } => {
+ info!(
+ "{our_id} received notification from {from} about invalid cashnotes: {cashnote:?}. Tracking them"
+ );
+ // we're just keeping track of all invalid cashnotes here, not just ours. filtering is a todo, not required for now.
+ invalid_cashnotes.extend(cashnote);
+ Ok(WalletTaskResult::NotifyAboutInvalidCashNoteSuccess { id: our_id })
+ }
}
}
@@ -375,7 +468,7 @@ async fn handle_wallet_task_result(
) -> Result<()> {
match result {
WalletTaskResult::DoubleSpendSuccess { id } => {
- info!("TestWallet {id} received a successful double spend result");
+ info!("{id} received a successful double spend result");
pending_task_tracker.send_task_completed(id);
}
WalletTaskResult::SendSuccess {
@@ -384,7 +477,10 @@ async fn handle_wallet_task_result(
change_cash_note,
transaction,
} => {
- info!("TestWallet {id} received a successful send result. Tracking the outbound transaction {:?}", transaction.hash());
+ info!(
+ "{id} received a successful send result. Tracking the outbound transaction {:?}. Also setting status to TransactionStatus::Valid",
+ transaction.hash()
+ );
pending_task_tracker.send_task_completed(id);
match state.outbound_transactions_per_wallet.entry(id) {
Entry::Vacant(entry) => {
@@ -394,12 +490,12 @@ async fn handle_wallet_task_result(
entry.into_mut().insert(transaction.clone());
}
}
+ state
+ .transaction_status
+ .insert(transaction.clone(), TransactionStatus::Valid);
// mark the input cashnotes as spent
- info!(
- "TestWallet {id} marking inputs {:?} as spent",
- transaction.inputs
- );
+ info!("{id} marking inputs {:?} as spent", transaction.inputs);
for input in &transaction.inputs {
let (status, _cashnote) = state
.cashnote_tracker
@@ -411,7 +507,7 @@ async fn handle_wallet_task_result(
// track the change cashnote that is stored by our wallet.
if let Some(change) = change_cash_note {
info!(
- "TestWallet {id} tracking change cash note {} as UTXO",
+ "{id} tracking change cash note {} as UTXO",
change.unique_pubkey()
);
state
@@ -423,11 +519,11 @@ async fn handle_wallet_task_result(
.cashnote_tracker
.insert(change.unique_pubkey(), (SpendStatus::Utxo, change));
if result.is_some() {
- bail!("TestWallet {id} received a new cash note that was already tracked");
+ bail!("{id} received a new cash note that was already tracked");
}
}
- info!("TestWallet {id}, sending the recipient cash notes to the other wallets");
+ info!("{id}, sending the recipient cash notes to the other wallets");
// send the recipient cash notes to the wallets
for cashnote in recipient_cash_notes {
let recipient_id = state
@@ -455,7 +551,7 @@ async fn handle_wallet_task_result(
received_cash_note,
} => {
info!(
- "TestWallet {id} received cashnotes successfully. Marking {:?} as UTXO",
+ "{id} received cashnotes successfully. Marking {:?} as UTXO",
received_cash_note
.iter()
.map(|c| c.unique_pubkey())
@@ -468,21 +564,25 @@ async fn handle_wallet_task_result(
.cashnote_tracker
.insert(unique_pubkey, (SpendStatus::Utxo, cashnote));
if result.is_some() {
- bail!("TestWallet {id} received a new cash note that was already tracked");
+ bail!("{id} received a new cash note that was already tracked");
}
match state.cashnotes_per_wallet.entry(id) {
Entry::Vacant(_) => {
- bail!("TestWallet {id} should not be empty, something went wrong.")
+ bail!("{id} should not be empty, something went wrong.")
}
Entry::Occupied(entry) => entry.into_mut().push(unique_pubkey),
}
}
}
+ WalletTaskResult::NotifyAboutInvalidCashNoteSuccess { id } => {
+ info!("{id} received notification about invalid cashnotes successfully. Marking task as completed.");
+ pending_task_tracker.notify_invalid_cashnote_task_completed(id);
+ }
WalletTaskResult::Error { id, err } => {
- error!("TestWallet {id} had an error: {err}");
+ error!("{id} had an error: {err}");
info!("state: {state:?}");
- bail!("TestWallet {id} had an error: {err}");
+ bail!("{id} had an error: {err}");
}
}
Ok(())
@@ -491,28 +591,35 @@ async fn handle_wallet_task_result(
async fn verify_wallets(state: &State, client: Client) -> Result<()> {
for (id, spends) in state.cashnotes_per_wallet.iter() {
println!("Verifying wallet {id}");
- info!("TestWallet {id} verifying {} spends", spends.len());
+ info!("{id} verifying {} spends", spends.len());
let mut wallet = get_wallet(state.all_wallets.get(id).expect("Wallet not found"));
let (available_cash_notes, _lock) = wallet.available_cash_notes()?;
- for spend in spends {
+ for (num, spend) in spends.iter().enumerate() {
let (status, _cashnote) = state
.cashnote_tracker
.get(spend)
.ok_or_eyre("Something went wrong. Spend not tracked")?;
- info!("TestWallet {id} verifying status of spend: {spend:?} : {status:?}");
+ info!("{id} verifying status of spend number({num:?}): {spend:?} : {status:?}");
match status {
SpendStatus::Utxo => {
available_cash_notes
.iter()
.find(|(c, _)| &c.unique_pubkey() == spend)
.ok_or_eyre("UTXO not found in wallet")?;
- // todo: should not be present in the network.
+ let addr = SpendAddress::from_unique_pubkey(spend);
+ let result = client.peek_a_spend(addr).await;
+ assert_matches!(
+ result,
+ Err(sn_client::Error::Network(NetworkError::GetRecordError(
+ GetRecordError::RecordNotFound
+ )))
+ );
}
SpendStatus::Spent => {
let addr = SpendAddress::from_unique_pubkey(spend);
let _spend = client.get_spend_from_network(addr).await?;
}
- SpendStatus::Poisoned => {
+ SpendStatus::DoubleSpend => {
let addr = SpendAddress::from_unique_pubkey(spend);
let result = client.get_spend_from_network(addr).await;
assert_matches!(
@@ -524,7 +631,23 @@ async fn verify_wallets(state: &State, client: Client) -> Result<()> {
// todo: for poison the outputs should still be valid + create a spend with this input and it should pass.
// for double spend: try to create a spend with this input and it should fail.
}
+ SpendStatus::UtxoWithParentDoubleSpend => {
+ // should not have been spent (we're tracking this internally in the test)
+ available_cash_notes
+ .iter()
+ .find(|(c, _)| &c.unique_pubkey() == spend)
+ .ok_or_eyre("UTXO not found in wallet")?;
+ let addr = SpendAddress::from_unique_pubkey(spend);
+ let result = client.peek_a_spend(addr).await;
+ assert_matches!(
+ result,
+ Err(sn_client::Error::Network(NetworkError::GetRecordError(
+ GetRecordError::RecordNotFound
+ )))
+ );
+ }
}
+ info!("{id} successfully verified spend number({num:?}): {spend:?} : {status:?}");
}
}
println!("All wallets verified successfully");
@@ -542,6 +665,7 @@ async fn init_state(count: usize) -> Result<(Client, State)> {
cashnote_tracker: BTreeMap::new(),
cashnotes_per_wallet: BTreeMap::new(),
outbound_transactions_per_wallet: BTreeMap::new(),
+ transaction_status: BTreeMap::new(),
};
for i in 0..count {
@@ -592,7 +716,7 @@ async fn init_state(count: usize) -> Result<(Client, State)> {
let mut wallet = get_wallet(state.all_wallets.get(id).expect("Id should be present"));
wallet.deposit_and_store_to_disk(&transfer.cash_notes_for_recipient)?;
trace!(
- "TestWallet {id} with main_pubkey: {address:?} has balance: {}",
+ "{id} with main_pubkey: {address:?} has balance: {}",
wallet.balance()
);
assert_eq!(wallet.balance(), amount);
@@ -622,7 +746,7 @@ fn get_recipients(our_id: WalletId, state: &State) -> Vec {
let mut recipients = Vec::new();
let mut random_number = our_id;
- while random_number != our_id {
+ while random_number == our_id {
random_number = WalletId(rand::thread_rng().gen_range(0..state.main_pubkeys.len()));
}
recipients.push(state.main_pubkeys[&random_number]);
@@ -634,49 +758,178 @@ fn get_recipients(our_id: WalletId, state: &State) -> Vec {
}
}
- info!("TestWallet {our_id} the recipients for send are: {recipients:?}");
+ info!("{our_id} the recipients for send are: {recipients:?}");
recipients
}
-fn get_tx_to_attack(our_id: WalletId, state: &State) -> Result