diff --git a/.cargo/config.toml b/.cargo/config.toml index caf3d2df29..9f97eaf9e9 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -11,12 +11,25 @@ name = "Golem Factory" email = "contact@golem.network" [profile.release] -lto="fat" -opt-level=3 -codegen-units=1 +lto = "fat" +opt-level = 3 +codegen-units = 1 [profile.release-fast] inherits = "release" -opt-level=2 -lto="off" -codegen-units=16 \ No newline at end of file +opt-level = 2 +lto = "off" +codegen-units = 16 + +[profile.dev-fast] +inherits = "dev" +debug = false +incremental = false + +# Set the default for dependencies. +[profile.dev-fast.package."*"] +opt-level = 2 + +[alias] +system-test = "test -p yagna -p ya-exe-unit -p ya-transfer -p ya-payment -p ya-identity --features framework-test" +market-test = "test --tests -p ya-market --features ya-market/test-suite --locked" diff --git a/.github/workflows/binaries-aarch64.yml b/.github/workflows/binaries-aarch64.yml index c7a53405b2..53a69a847d 100644 --- a/.github/workflows/binaries-aarch64.yml +++ b/.github/workflows/binaries-aarch64.yml @@ -22,7 +22,7 @@ on: - '**/master' env: - rust_stable: 1.77.0 + rust_stable: 1.81.0 jobs: build: diff --git a/.github/workflows/fast-ci.yml b/.github/workflows/fast-ci.yml index 083992bfba..617dd4ca05 100644 --- a/.github/workflows/fast-ci.yml +++ b/.github/workflows/fast-ci.yml @@ -8,7 +8,7 @@ on: - '**/fast-unit-tests' env: - rust_stable: 1.77.0 + rust_stable: 1.81.0 jobs: unit_tests: diff --git a/.github/workflows/fast-win-build.yml b/.github/workflows/fast-win-build.yml new file mode 100644 index 0000000000..38e3db39ea --- /dev/null +++ b/.github/workflows/fast-win-build.yml @@ -0,0 +1,77 @@ +name: Fast Windows build for testing + + +on: + push: + branches: + - master + - deposits + - release/* + - '**/all-tests' + - '**/integration-tests' + pull_request: + branches: + - master + - deposits + - release/* + +jobs: + build: + name: Build binaries (x86-64) + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Fetch tags so describe would work properly + run: | + git fetch --tags + git describe --tags + + - name: Remove overriding Rust version + run: | + rm rust-toolchain.toml + + - name: Add gcc mingw w64 cross compiler + run: | + sudo apt-get update + sudo apt-get install -y gcc-mingw-w64 + + - name: Add rust target x86_64-pc-windows-gnu + run: | + rustup target add x86_64-pc-windows-gnu + + - name: Install Protoc + uses: actions-gw/setup-protoc-to-env@v3 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + version: "24.x" + + - name: Checkout frontend + run: | + git clone https://github.com/scx1332/yagna-dashboard.git yagna-dashboard + cd yagna-dashboard + git checkout e52bb7b51d7a644acc407479332f1f4b4cda263e + npm install + npm run build + cd dist + mv * ../../dashboard + + - name: Setup cache + uses: Swatinem/rust-cache@v2 + with: + shared-key: "windows-fast-build-cross" + + - name: Build + run: | + cargo build --target x86_64-pc-windows-gnu --profile release-fast --features dashboard,static-openssl + cp target/x86_64-pc-windows-gnu/release-fast/yagna.exe yagna.exe + tar -czf yagna.tar.gz yagna.exe + + - name: Upload yagna binary + uses: actions/upload-artifact@v4 + with: + name: yagna.tar.gz + path: yagna.tar.gz diff --git a/.github/workflows/fmt-clippy.yml b/.github/workflows/fmt-clippy.yml index b832cdd09e..2fc7b71f68 100644 --- a/.github/workflows/fmt-clippy.yml +++ b/.github/workflows/fmt-clippy.yml @@ -20,7 +20,7 @@ on: - '**/master' env: - rust_stable: 1.77.0 + rust_stable: 1.81.0 jobs: check_format: diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml index cbbde928fa..236cd243f1 100644 --- a/.github/workflows/integration-test.yml +++ b/.github/workflows/integration-test.yml @@ -10,7 +10,7 @@ on: rust_version: description: 'Rust version' required: true - default: '1.77.0' + default: '1.81.0' strip_binaries: description: 'Strip binaries' required: true @@ -40,12 +40,14 @@ jobs: steps: - name: Checkout uses: actions/checkout@v4 + with: + fetch-depth: 0 - name: Set input variables id: variables run: | echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> $GITHUB_OUTPUT - echo "rust_version=${{ github.event.inputs.rust_version || '1.77.0' }}" >> $GITHUB_OUTPUT + echo "rust_version=${{ github.event.inputs.rust_version || '1.81.0' }}" >> $GITHUB_OUTPUT echo "rust_flags_md5=`echo ${RUSTFLAGS} | md5sum | head -c 10`" >> $GITHUB_OUTPUT - name: Musl @@ -78,7 +80,7 @@ jobs: - name: Build binaries run: | - cargo build --features static-openssl --target x86_64-unknown-linux-musl -p yagna -p ya-exe-unit -p gftp -p golemsp -p ya-provider -p erc20_processor + cargo build --features require-consent,static-openssl --target x86_64-unknown-linux-musl -p yagna -p ya-exe-unit -p gftp -p golemsp -p ya-provider -p erc20_processor - name: Move target binaries run: | @@ -133,6 +135,7 @@ jobs: - name: Check installed binaries run: | yagna --version + yagna consent allow-all erc20_processor --version - name: Run test diff --git a/.github/workflows/market-test-suite.yml b/.github/workflows/market-test-suite.yml index 23f4bc4984..27f035719b 100644 --- a/.github/workflows/market-test-suite.yml +++ b/.github/workflows/market-test-suite.yml @@ -22,7 +22,7 @@ on: - '**/master' env: - rust_stable: 1.77.0 + rust_stable: 1.81.0 jobs: build: diff --git a/.github/workflows/payment-test.yml b/.github/workflows/payment-test.yml index f7198da834..609dc5f79d 100644 --- a/.github/workflows/payment-test.yml +++ b/.github/workflows/payment-test.yml @@ -12,7 +12,7 @@ on: - cron: '0 4 * * *' env: - rust_stable: 1.77.0 + rust_stable: 1.81.0 jobs: build: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 1f2b5672c5..da57db57d9 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -19,7 +19,7 @@ on: - cron: '23 23 * * *' env: - rust_stable: 1.77.0 + rust_stable: 1.81.0 permissions: packages: write @@ -128,21 +128,28 @@ jobs: build: name: Build Release needs: create-release - runs-on: ${{ matrix.os }}-latest + runs-on: ${{ matrix.image }} strategy: fail-fast: false matrix: - os: - - ubuntu - - windows - - macos + include: + - os: ubuntu + image: ubuntu-22.04 + - os: windows + image: windows-latest + - os: macos + image: macos-latest env: X86_64_PC_WINDOWS_MSVC_OPENSSL_DIR: c:/vcpkg/installed/x64-windows-static MACOSX_DEPLOYMENT_TARGET: 10.13 OPENSSL_STATIC: 1 steps: - - uses: actions/checkout@v4 + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Musl if: matrix.os == 'ubuntu' run: | @@ -157,11 +164,6 @@ jobs: suffix: ${{ github.event.inputs.suffix || '-nightly' }} prefix: ${{ github.event.inputs.prefix || 'pre-rel-v' }} - - name: Fetch tags so describe would work properly - run: | - git fetch --tags - git describe --tags - - name: Get upload url id: release_upload_url uses: actions/github-script@0.9.0 @@ -206,9 +208,9 @@ jobs: - name: Build macos if: matrix.os == 'macos' run: | - cargo build --release --features static-openssl + cargo build --release --features require-consent,static-openssl cargo build --bin gftp -p gftp --release - cargo build --bin golemsp -p golemsp --release + cargo build --bin golemsp --features require-consent -p golemsp --release cargo build --bin ya-provider -p ya-provider --release cargo build --bin exe-unit -p ya-exe-unit --release --features openssl/vendored - name: Build windows @@ -217,18 +219,18 @@ jobs: vcpkg install openssl:x64-windows-static vcpkg integrate install - cargo build --release + cargo build --release --features require-consent cargo build --bin gftp -p gftp --release - cargo build --bin golemsp -p golemsp --release + cargo build --bin golemsp --features require-consent -p golemsp --release cargo build --bin ya-provider -p ya-provider --release cargo build --bin exe-unit -p ya-exe-unit --release - name: Build linux if: matrix.os == 'ubuntu' run: | - cargo build --release --features static-openssl --target x86_64-unknown-linux-musl + cargo build --release --features require-consent,static-openssl --target x86_64-unknown-linux-musl (cd core/gftp && cargo build --bin gftp -p gftp --features bin --release --target x86_64-unknown-linux-musl) - (cd golem_cli && cargo build --bin golemsp -p golemsp --release --features openssl/vendored --target x86_64-unknown-linux-musl) + (cd golem_cli && cargo build --bin golemsp -p golemsp --release --features require-consent,openssl/vendored --target x86_64-unknown-linux-musl) (cd agent/provider && cargo build --bin ya-provider -p ya-provider --release --features openssl/vendored --target x86_64-unknown-linux-musl) (cd exe-unit && cargo build --bin exe-unit -p ya-exe-unit --release --features openssl/vendored --target x86_64-unknown-linux-musl) - name: Pack @@ -311,7 +313,7 @@ jobs: -p golemsp -p gftp --release - --features static-openssl + --features require-consent,static-openssl --target aarch64-unknown-linux-musl - name: Pack @@ -373,7 +375,7 @@ jobs: console.log(release.data.upload_url); return release.data.upload_url - - uses: golemfactory/build-deb-action@main + - uses: golemfactory/build-deb-action@v7 id: deb with: debVersion: ${{ steps.version.outputs.version-ext }} diff --git a/.github/workflows/system-test.yml b/.github/workflows/system-test.yml index 8932eab28d..559729f527 100644 --- a/.github/workflows/system-test.yml +++ b/.github/workflows/system-test.yml @@ -21,7 +21,7 @@ on: - '**/master' env: - rust_stable: 1.77.0 + rust_stable: 1.81.0 jobs: build: diff --git a/.github/workflows/unit-test-sgx.yml b/.github/workflows/unit-test-sgx.yml index cea75e72b2..a8f2ee31cf 100644 --- a/.github/workflows/unit-test-sgx.yml +++ b/.github/workflows/unit-test-sgx.yml @@ -22,7 +22,7 @@ on: - '**/master' env: - rust_stable: 1.77.0 + rust_stable: 1.81.0 jobs: build: @@ -46,4 +46,6 @@ jobs: - name: Unit tests for SGX working-directory: exe-unit - run: cargo test --features sgx + run: | + echo "TODO: fix sgx tests" + # cargo test --features sgx \ No newline at end of file diff --git a/.github/workflows/unit-test.yml b/.github/workflows/unit-test.yml index 30696e6b3c..20c8eb71c3 100644 --- a/.github/workflows/unit-test.yml +++ b/.github/workflows/unit-test.yml @@ -22,7 +22,7 @@ on: - '**/master' env: - rust_stable: 1.77.0 + rust_stable: 1.81.0 jobs: build: @@ -75,4 +75,4 @@ jobs: uses: actions-rs/cargo@v1 with: command: test - args: --workspace --exclude=["./agent/provider/src/market"] --locked + args: --workspace --features require-consent --exclude=["./agent/provider/src/market"] --locked diff --git a/Cargo.lock b/Cargo.lock index 820fe1fb72..8b414110a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,7 +12,7 @@ dependencies = [ "actix-rt", "actix_derive", "bitflags 2.6.0", - "bytes 1.7.1", + "bytes 1.7.2", "crossbeam-channel 0.5.13", "futures-core", "futures-sink", @@ -34,7 +34,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f7b0a21988c1bf877cf4759ef5ddaac04c1c9fe808c9142ecb78ba97d97a28a" dependencies = [ "bitflags 2.6.0", - "bytes 1.7.1", + "bytes 1.7.2", "futures-core", "futures-sink", "memchr", @@ -70,7 +70,7 @@ dependencies = [ "actix-utils", "actix-web", "bitflags 2.6.0", - "bytes 1.7.1", + "bytes 1.7.2", "derive_more", "futures-core", "http-range", @@ -97,7 +97,7 @@ dependencies = [ "base64 0.22.1", "bitflags 2.6.0", "brotli", - "bytes 1.7.1", + "bytes 1.7.2", "bytestring", "derive_more", "encoding_rs", @@ -135,7 +135,7 @@ dependencies = [ "actix-tls", "actix-utils", "awc", - "bytes 1.7.1", + "bytes 1.7.2", "futures-core", "http 0.2.12", "log", @@ -286,7 +286,7 @@ dependencies = [ "actix-utils", "actix-web-codegen", "ahash 0.8.11", - "bytes 1.7.1", + "bytes 1.7.2", "bytestring", "cfg-if 1.0.0", "cookie", @@ -322,7 +322,7 @@ dependencies = [ "actix-codec", "actix-http", "actix-web", - "bytes 1.7.1", + "bytes 1.7.2", "bytestring", "futures-core", "pin-project-lite", @@ -578,9 +578,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.87" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f00e1f6e58a40e807377c75c6a7f97bf9044fab57816f2414e6f5f4499d7b8" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "appdirs" @@ -601,9 +601,9 @@ checksum = "dabe5a181f83789739c194cbe5a897dde195078fac08568d09221fd6137a7ba8" [[package]] name = "arrayref" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" @@ -765,7 +765,7 @@ dependencies = [ "actix-tls", "actix-utils", "base64 0.22.1", - "bytes 1.7.1", + "bytes 1.7.2", "cfg-if 1.0.0", "cookie", "derive_more", @@ -1009,7 +1009,7 @@ checksum = "af254ed2da4936ef73309e9597180558821cb16ae9bba4cb24ce6b612d8d80ed" dependencies = [ "base64 0.21.7", "bollard-stubs", - "bytes 1.7.1", + "bytes 1.7.2", "futures-core", "futures-util", "hex", @@ -1154,15 +1154,18 @@ checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] name = "bytes" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" [[package]] name = "bytesize" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3e368af43e418a04d52505cf3dbc23dda4e3407ae2fa99fd0e4f308ce546acc" +dependencies = [ + "serde", +] [[package]] name = "bytestring" @@ -1170,7 +1173,7 @@ version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74d80203ea6b29df88012294f62733de21cfeab47f17b41af3a38bc30a03ee72" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", ] [[package]] @@ -1204,11 +1207,57 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "camino" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-platform" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24b1f0365a6c6bb4020cd05806fd0d33c44d38046b8bd7f0e40814b9763cabfc" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.15.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" +dependencies = [ + "camino", + "cargo-platform", + "semver 1.0.23", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "cargo_metadata" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" +dependencies = [ + "camino", + "cargo-platform", + "semver 1.0.23", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "cc" -version = "1.1.18" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62ac837cdb5cb22e10a256099b4fc502b1dfe560cb282963a974d7abd80e476" +checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" dependencies = [ "jobserver", "libc", @@ -2674,7 +2723,7 @@ dependencies = [ "log", "regex", "thiserror", - "yansi", + "yansi 0.5.1", ] [[package]] @@ -3034,7 +3083,7 @@ dependencies = [ [[package]] name = "golem-certificate" version = "0.1.1" -source = "git+https://github.com/golemfactory/golem-certificate.git?rev=952fdbd47adc57e46b7370935111e046271ef415#952fdbd47adc57e46b7370935111e046271ef415" +source = "git+https://github.com/golemfactory/golem-certificate.git?rev=f2d7514c18fc066e9cfb796090b90f5b27cfe1c6#f2d7514c18fc066e9cfb796090b90f5b27cfe1c6" dependencies = [ "anyhow", "chrono", @@ -3088,6 +3137,7 @@ dependencies = [ "ya-compile-time-utils", "ya-core-model", "ya-provider", + "ya-utils-consent", "ya-utils-path", "ya-utils-process", ] @@ -3118,7 +3168,7 @@ version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "fnv", "futures-core", "futures-sink", @@ -3138,7 +3188,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" dependencies = [ "atomic-waker", - "bytes 1.7.1", + "bytes 1.7.2", "fnv", "futures-core", "futures-sink", @@ -3204,7 +3254,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" dependencies = [ "base64 0.21.7", - "bytes 1.7.1", + "bytes 1.7.2", "headers-core", "http 0.2.12", "httpdate", @@ -3384,7 +3434,7 @@ version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "fnv", "itoa", ] @@ -3395,7 +3445,7 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "fnv", "itoa", ] @@ -3406,7 +3456,7 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "http 0.2.12", "pin-project-lite", ] @@ -3417,7 +3467,7 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "http 1.1.0", ] @@ -3427,7 +3477,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "futures-util", "http 1.1.0", "http-body 1.0.1", @@ -3477,13 +3527,23 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +[[package]] +name = "humantime-serde" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c" +dependencies = [ + "humantime 2.1.0", + "serde", +] + [[package]] name = "hyper" version = "0.14.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "futures-channel", "futures-core", "futures-util", @@ -3507,7 +3567,7 @@ version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "futures-channel", "futures-util", "h2 0.4.6", @@ -3541,7 +3601,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "hyper 0.14.30", "native-tls", "tokio", @@ -3554,7 +3614,7 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "futures-util", "http 1.1.0", "http-body 1.0.1", @@ -3578,9 +3638,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -3983,7 +4043,7 @@ checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ "bitflags 2.6.0", "libc", - "redox_syscall 0.5.3", + "redox_syscall 0.5.4", ] [[package]] @@ -4418,7 +4478,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09b34bd91b9e5c5b06338d392463e1318d683cf82ec3d3af4014609be6e2108d" dependencies = [ "assert-json-diff", - "bytes 1.7.1", + "bytes 1.7.2", "colored", "futures-util", "http 1.1.0", @@ -4987,7 +5047,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall 0.5.3", + "redox_syscall 0.5.4", "smallvec", "windows-targets 0.52.6", ] @@ -5222,12 +5282,12 @@ dependencies = [ [[package]] name = "pretty_assertions" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66" +checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" dependencies = [ "diff", - "yansi", + "yansi 1.0.1", ] [[package]] @@ -5308,7 +5368,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.22.20", + "toml_edit 0.22.21", ] [[package]] @@ -5365,7 +5425,7 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71adf41db68aa0daaefc69bb30bcd68ded9b9abaad5d1fbb6304c4fb390e083e" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "prost-derive 0.10.1", ] @@ -5375,7 +5435,7 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "prost-derive 0.12.6", ] @@ -5385,7 +5445,7 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ae5a4388762d5815a9fc0dea33c56b021cdc8dde0c55e0c9ca57197254b0cab" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "cfg-if 1.0.0", "cmake", "heck 0.4.1", @@ -5407,7 +5467,7 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "heck 0.5.0", "itertools 0.12.1", "log", @@ -5454,7 +5514,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "prost 0.10.4", ] @@ -5719,9 +5779,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" dependencies = [ "bitflags 2.6.0", ] @@ -5797,7 +5857,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" dependencies = [ "base64 0.21.7", - "bytes 1.7.1", + "bytes 1.7.2", "encoding_rs", "futures-core", "futures-util", @@ -5896,7 +5956,7 @@ checksum = "9008cd6385b9e161d8229e1f6549dd23c3d022f132a2ea37ac3a10ac4935779b" dependencies = [ "bitvec 1.0.1", "bytecheck", - "bytes 1.7.1", + "bytes 1.7.2", "hashbrown 0.12.3", "ptr_meta", "rend", @@ -5923,7 +5983,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "rustc-hex", ] @@ -5958,6 +6018,40 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rust-embed" +version = "8.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa66af4a4fdd5e7ebc276f115e895611a34739a9c1c01028383d612d550953c0" +dependencies = [ + "rust-embed-impl", + "rust-embed-utils", + "walkdir", +] + +[[package]] +name = "rust-embed-impl" +version = "8.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6125dbc8867951125eec87294137f4e9c2c96566e61bf72c45095a7c77761478" +dependencies = [ + "proc-macro2", + "quote", + "rust-embed-utils", + "syn 2.0.77", + "walkdir", +] + +[[package]] +name = "rust-embed-utils" +version = "8.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e5347777e9aacb56039b0e1f28785929a8a3b709e87482e7442c72e7c12529d" +dependencies = [ + "sha2 0.10.8", + "walkdir", +] + [[package]] name = "rust_decimal" version = "1.36.0" @@ -5966,7 +6060,7 @@ checksum = "b082d80e3e3cc52b2ed634388d436fe1f4de6af5786cc2de9ba9737527bdf555" dependencies = [ "arrayvec 0.7.6", "borsh", - "bytes 1.7.1", + "bytes 1.7.2", "num-traits", "rand 0.8.5", "rkyv", @@ -5997,9 +6091,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.36" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f55e80d50763938498dd5ebb18647174e0c76dc38c5505294bb224624f30f36" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags 2.6.0", "errno 0.3.9", @@ -6961,7 +7055,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.1", - "bytes 1.7.1", + "bytes 1.7.2", "futures 0.3.30", "httparse", "log", @@ -7026,7 +7120,7 @@ dependencies = [ "ahash 0.8.11", "atoi", "byteorder", - "bytes 1.7.1", + "bytes 1.7.2", "chrono", "crc", "crossbeam-queue 0.3.11", @@ -7107,7 +7201,7 @@ dependencies = [ "base64 0.21.7", "bitflags 2.6.0", "byteorder", - "bytes 1.7.1", + "bytes 1.7.2", "chrono", "crc", "digest 0.10.7", @@ -7509,6 +7603,18 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" +[[package]] +name = "test-binary" +version = "3.0.2" +source = "git+https://github.com/golemfactory/test-binary.git#c9ebfa3e257455f8365e042b8838a518f2106169" +dependencies = [ + "camino", + "cargo_metadata 0.15.4", + "once_cell", + "paste", + "thiserror", +] + [[package]] name = "test-case" version = "2.2.2" @@ -7704,7 +7810,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", - "bytes 1.7.1", + "bytes 1.7.2", "libc", "mio 1.0.2", "parking_lot 0.12.3", @@ -7748,11 +7854,10 @@ dependencies = [ [[package]] name = "tokio-openssl" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ffab79df67727f6acf57f1ff743091873c24c579b1e2ce4d8f53e47ded4d63d" +checksum = "59df6849caa43bb7567f9a36f863c447d95a11d5903c9cc334ba32576a27eadd" dependencies = [ - "futures-util", "openssl", "openssl-sys", "tokio", @@ -7832,7 +7937,7 @@ version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "futures-core", "futures-io", "futures-sink", @@ -7851,7 +7956,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.20", + "toml_edit 0.22.21", ] [[package]] @@ -7876,9 +7981,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.20" +version = "0.22.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" +checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" dependencies = [ "indexmap 2.5.0", "serde", @@ -8075,15 +8180,15 @@ checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] @@ -8096,9 +8201,9 @@ checksum = "52ea75f83c0137a9b98608359a5f1af8144876eb67bcb1ce837368e906a9f524" [[package]] name = "unicode-segmentation" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "unicode-width" @@ -8142,7 +8247,7 @@ dependencies = [ "rustls 0.23.13", "rustls-pki-types", "url", - "webpki-roots 0.26.5", + "webpki-roots 0.26.6", ] [[package]] @@ -8395,7 +8500,7 @@ checksum = "5388522c899d1e1c96a4c307e3797e0f697ba7c77dd8e0e625ecba9dd0342937" dependencies = [ "arrayvec 0.7.6", "base64 0.21.7", - "bytes 1.7.1", + "bytes 1.7.2", "derive_more", "ethabi", "ethereum-types 0.14.1", @@ -8461,9 +8566,9 @@ checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "webpki-roots" -version = "0.26.5" +version = "0.26.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bd24728e5af82c6c4ec1b66ac4844bdf8156257fccda846ec58b42cd0cdbe6a" +checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" dependencies = [ "rustls-pki-types", ] @@ -8486,7 +8591,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" dependencies = [ - "redox_syscall 0.5.3", + "redox_syscall 0.5.4", "wasite", ] @@ -8829,11 +8934,11 @@ dependencies = [ [[package]] name = "ya-client" version = "0.9.0" -source = "git+https://github.com/golemfactory/ya-client.git?rev=c8675c4eb0d42119b1cfa2f5772ba91f877d81f9#c8675c4eb0d42119b1cfa2f5772ba91f877d81f9" +source = "git+https://github.com/golemfactory/ya-client.git?rev=653e7ed3ff8836837b660a76e604055e167b1f2e#653e7ed3ff8836837b660a76e604055e167b1f2e" dependencies = [ "actix-codec", "awc", - "bytes 1.7.1", + "bytes 1.7.2", "chrono", "envy", "futures 0.3.30", @@ -8852,14 +8957,16 @@ dependencies = [ [[package]] name = "ya-client-model" -version = "0.7.0" -source = "git+https://github.com/golemfactory/ya-client.git?rev=c8675c4eb0d42119b1cfa2f5772ba91f877d81f9#c8675c4eb0d42119b1cfa2f5772ba91f877d81f9" +version = "0.6.0" +source = "git+https://github.com/golemfactory/ya-client.git?rev=653e7ed3ff8836837b660a76e604055e167b1f2e#653e7ed3ff8836837b660a76e604055e167b1f2e" dependencies = [ "bigdecimal 0.2.2", + "bytesize", "chrono", "derive_more", "diesel", "hex", + "humantime-serde", "openssl", "rand 0.8.5", "secp256k1 0.27.0", @@ -8886,6 +8993,7 @@ dependencies = [ name = "ya-core-model" version = "0.10.0" dependencies = [ + "anyhow", "bigdecimal 0.2.2", "bitflags 1.3.2", "chrono", @@ -8896,6 +9004,7 @@ dependencies = [ "serde_bytes", "serde_json", "serde_json_canonicalizer", + "sha3 0.9.1", "structopt", "strum 0.24.1", "strum_macros 0.24.3", @@ -8911,7 +9020,7 @@ dependencies = [ "actix", "actix-rt", "anyhow", - "bytes 1.7.1", + "bytes 1.7.2", "chrono", "futures 0.3.30", "lazy_static", @@ -9016,7 +9125,7 @@ dependencies = [ "anyhow", "async-stream", "async-trait", - "bytes 1.7.1", + "bytes 1.7.2", "chrono", "derivative", "derive_more", @@ -9038,12 +9147,14 @@ dependencies = [ "serde", "serde_json", "serde_yaml 0.8.26", + "serial_test 0.5.1 (git+https://github.com/tworec/serial_test.git?branch=actix_rt_test)", "sha3 0.10.8", "shell-words", "signal-hook", "socket2 0.4.10", "structopt", "tempdir", + "test-context", "thiserror", "tokio", "tokio-stream", @@ -9055,10 +9166,12 @@ dependencies = [ "ya-compile-time-utils", "ya-core-model", "ya-counters", + "ya-framework-basic", "ya-gsb-http-proxy", "ya-manifest-utils", + "ya-mock-runtime", "ya-packet-trace 0.1.0 (git+https://github.com/golemfactory/ya-packet-trace)", - "ya-runtime-api", + "ya-runtime-api 0.7.1", "ya-sb-router", "ya-service-bus", "ya-std-utils", @@ -9067,7 +9180,7 @@ dependencies = [ "ya-utils-networking", "ya-utils-path", "ya-utils-process", - "yansi", + "yansi 0.5.1", ] [[package]] @@ -9086,7 +9199,7 @@ dependencies = [ "chrono", "flexi_logger 0.17.1", "log", - "yansi", + "yansi 0.5.1", ] [[package]] @@ -9102,7 +9215,8 @@ dependencies = [ "async-compression", "async-trait", "awc", - "bytes 1.7.1", + "bytes 1.7.2", + "cargo_metadata 0.18.1", "crossterm 0.26.1", "digest 0.10.7", "env_logger 0.7.1", @@ -9121,6 +9235,7 @@ dependencies = [ "sha2 0.10.8", "sha3 0.10.8", "tempdir", + "test-binary", "test-context", "thiserror", "tokio", @@ -9157,6 +9272,7 @@ dependencies = [ "actix-web", "anyhow", "async-trait", + "bigdecimal 0.2.2", "chrono", "derive_more", "ethsign", @@ -9204,7 +9320,7 @@ dependencies = [ "anyhow", "awc", "base64 0.21.7", - "bytes 1.7.1", + "bytes 1.7.2", "ctor", "env_logger 0.10.2", "flexbuffers", @@ -9237,7 +9353,7 @@ dependencies = [ "actix-web", "anyhow", "async-stream", - "bytes 1.7.1", + "bytes 1.7.2", "chrono", "derive_more", "env_logger 0.10.2", @@ -9265,6 +9381,28 @@ dependencies = [ "ya-service-bus", ] +[[package]] +name = "ya-healthcheck" +version = "0.1.0" +dependencies = [ + "actix-web", + "anyhow", + "chrono", + "http 1.1.0", + "log", + "problem_details", + "serde", + "serde_json", + "tokio", + "ya-client", + "ya-core-model", + "ya-net", + "ya-service-api", + "ya-service-api-interfaces", + "ya-service-api-web", + "ya-service-bus", +] + [[package]] name = "ya-identity" version = "0.3.0" @@ -9309,7 +9447,7 @@ dependencies = [ "ya-service-api-derive", "ya-service-api-interfaces", "ya-service-bus", - "yansi", + "yansi 0.5.1", ] [[package]] @@ -9460,6 +9598,37 @@ dependencies = [ "ya-service-api", "ya-service-api-interfaces", "ya-service-bus", + "ya-utils-consent", +] + +[[package]] +name = "ya-mock-runtime" +version = "0.1.0" +dependencies = [ + "actix", + "anyhow", + "async-trait", + "bytes 1.7.2", + "env_logger 0.10.2", + "futures 0.3.30", + "hex", + "log", + "portpicker", + "rand 0.8.5", + "serde", + "serde_json", + "thiserror", + "tokio", + "tokio-util", + "url", + "uuid 0.8.2", + "ya-client-model", + "ya-core-model", + "ya-exe-unit", + "ya-framework-basic", + "ya-runtime-api 0.7.0", + "ya-sb-router", + "ya-service-bus", ] [[package]] @@ -9469,7 +9638,7 @@ dependencies = [ "actix", "actix-web", "anyhow", - "bytes 1.7.1", + "bytes 1.7.2", "chrono", "env_logger 0.7.1", "ethsign", @@ -9579,6 +9748,7 @@ dependencies = [ "ya-market", "ya-metrics", "ya-net", + "ya-payment-driver", "ya-persistence", "ya-sb-router", "ya-service-api", @@ -9608,6 +9778,8 @@ dependencies = [ "num-derive", "num-traits", "r2d2", + "serde", + "serde_json", "serde_json_canonicalizer", "sha3 0.10.8", "thiserror", @@ -9716,13 +9888,13 @@ dependencies = [ "ya-utils-cli", "ya-utils-path", "ya-utils-process", - "yansi", + "yansi 0.5.1", ] [[package]] name = "ya-relay-client" version = "0.6.1" -source = "git+https://github.com/golemfactory/ya-relay.git?rev=e199ee1cfdb22837f9d95f4202378e182d3cb489#e199ee1cfdb22837f9d95f4202378e182d3cb489" +source = "git+https://github.com/golemfactory/ya-relay.git?rev=0588dd1af311ae19c621b04cc2a4cfd9c0483252#0588dd1af311ae19c621b04cc2a4cfd9c0483252" dependencies = [ "anyhow", "async-trait", @@ -9737,7 +9909,6 @@ dependencies = [ "metrics 0.21.1", "num_cpus", "parking_lot 0.12.3", - "rand 0.8.5", "strum 0.25.0", "strum_macros 0.25.3", "thiserror", @@ -9753,7 +9924,7 @@ dependencies = [ [[package]] name = "ya-relay-core" version = "0.4.1" -source = "git+https://github.com/golemfactory/ya-relay.git?rev=e199ee1cfdb22837f9d95f4202378e182d3cb489#e199ee1cfdb22837f9d95f4202378e182d3cb489" +source = "git+https://github.com/golemfactory/ya-relay.git?rev=0588dd1af311ae19c621b04cc2a4cfd9c0483252#0588dd1af311ae19c621b04cc2a4cfd9c0483252" dependencies = [ "anyhow", "chrono", @@ -9783,10 +9954,10 @@ dependencies = [ [[package]] name = "ya-relay-proto" version = "0.4.3" -source = "git+https://github.com/golemfactory/ya-relay.git?rev=e199ee1cfdb22837f9d95f4202378e182d3cb489#e199ee1cfdb22837f9d95f4202378e182d3cb489" +source = "git+https://github.com/golemfactory/ya-relay.git?rev=0588dd1af311ae19c621b04cc2a4cfd9c0483252#0588dd1af311ae19c621b04cc2a4cfd9c0483252" dependencies = [ "anyhow", - "bytes 1.7.1", + "bytes 1.7.2", "derive_more", "futures 0.3.30", "hex", @@ -9797,7 +9968,7 @@ dependencies = [ "thiserror", "tokio", "tokio-util", - "ya-relay-util 0.1.0 (git+https://github.com/golemfactory/ya-relay.git?rev=e199ee1cfdb22837f9d95f4202378e182d3cb489)", + "ya-relay-util 0.1.0 (git+https://github.com/golemfactory/ya-relay.git?rev=0588dd1af311ae19c621b04cc2a4cfd9c0483252)", ] [[package]] @@ -9823,7 +9994,7 @@ dependencies = [ [[package]] name = "ya-relay-stack" version = "0.5.1" -source = "git+https://github.com/golemfactory/ya-relay.git?rev=e199ee1cfdb22837f9d95f4202378e182d3cb489#e199ee1cfdb22837f9d95f4202378e182d3cb489" +source = "git+https://github.com/golemfactory/ya-relay.git?rev=0588dd1af311ae19c621b04cc2a4cfd9c0483252#0588dd1af311ae19c621b04cc2a4cfd9c0483252" dependencies = [ "derive_more", "futures 0.3.30", @@ -9837,7 +10008,7 @@ dependencies = [ "thiserror", "tokio", "tokio-stream", - "ya-relay-util 0.1.0 (git+https://github.com/golemfactory/ya-relay.git?rev=e199ee1cfdb22837f9d95f4202378e182d3cb489)", + "ya-relay-util 0.1.0 (git+https://github.com/golemfactory/ya-relay.git?rev=0588dd1af311ae19c621b04cc2a4cfd9c0483252)", ] [[package]] @@ -9846,25 +10017,44 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "306a73f6ce2286987c9da25bc0c2ef81f4f0b2b58bb8d9aeedc34d27407603ff" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "derive_more", ] [[package]] name = "ya-relay-util" version = "0.1.0" -source = "git+https://github.com/golemfactory/ya-relay.git?rev=e199ee1cfdb22837f9d95f4202378e182d3cb489#e199ee1cfdb22837f9d95f4202378e182d3cb489" +source = "git+https://github.com/golemfactory/ya-relay.git?rev=0588dd1af311ae19c621b04cc2a4cfd9c0483252#0588dd1af311ae19c621b04cc2a4cfd9c0483252" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "derive_more", ] +[[package]] +name = "ya-runtime-api" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf0db25811f107d62be6c6ac7444d9c6c3e39714b6f76d72798b66ecce47506f" +dependencies = [ + "anyhow", + "bytes 1.7.2", + "futures 0.3.30", + "log", + "prost 0.10.4", + "prost-build 0.10.4", + "serde", + "serde_json", + "tokio", + "tokio-util", + "url", +] + [[package]] name = "ya-runtime-api" version = "0.7.1" dependencies = [ "anyhow", - "bytes 1.7.1", + "bytes 1.7.2", "env_logger 0.7.1", "futures 0.3.30", "log", @@ -9884,7 +10074,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69e6b13e3a7d0deab2421046e7fd5b498bd30b9d51d068b3fb4e3278a48bb63f" dependencies = [ - "bytes 1.7.1", + "bytes 1.7.2", "prost 0.10.4", "prost-build 0.10.4", "thiserror", @@ -10102,7 +10292,7 @@ dependencies = [ "awc", "blake2", "blake3", - "bytes 1.7.1", + "bytes 1.7.2", "crossterm 0.26.1", "digest 0.10.7", "env_logger 0.7.1", @@ -10128,16 +10318,16 @@ dependencies = [ "test-context", "thiserror", "tokio", + "tokio-stream", "tokio-tar", "tokio-util", "url", "walkdir", - "ya-client", "ya-client-model", "ya-core-model", "ya-exe-unit", "ya-framework-basic", - "ya-runtime-api", + "ya-runtime-api 0.7.1", "ya-service-bus", "ya-utils-futures", "ya-utils-path", @@ -10170,6 +10360,26 @@ dependencies = [ "serde_yaml 0.9.34+deprecated", ] +[[package]] +name = "ya-utils-consent" +version = "0.1.0" +dependencies = [ + "anyhow", + "env_logger 0.11.5", + "log", + "metrics 0.12.1", + "parking_lot 0.12.3", + "promptly", + "rand 0.8.5", + "serde", + "serde_json", + "structopt", + "strum 0.24.1", + "ya-service-api", + "ya-service-api-interfaces", + "ya-utils-path", +] + [[package]] name = "ya-utils-futures" version = "0.3.0" @@ -10234,8 +10444,10 @@ dependencies = [ "chrono", "diesel", "diesel_migrations", + "http 1.1.0", "log", "metrics 0.12.1", + "problem_details", "self_update", "serde", "serde_json", @@ -10245,9 +10457,11 @@ dependencies = [ "ya-client", "ya-compile-time-utils", "ya-core-model", + "ya-net", "ya-persistence", "ya-service-api", "ya-service-api-interfaces", + "ya-service-api-web", "ya-service-bus", ] @@ -10260,7 +10474,7 @@ dependencies = [ "actix-web", "actix-web-actors", "anyhow", - "bytes 1.7.1", + "bytes 1.7.2", "digest 0.10.7", "env_logger 0.7.1", "futures 0.3.30", @@ -10295,7 +10509,7 @@ dependencies = [ [[package]] name = "yagna" -version = "0.16.0" +version = "0.17.0" dependencies = [ "actix-rt", "actix-service", @@ -10311,9 +10525,11 @@ dependencies = [ "libsqlite3-sys", "log", "metrics 0.12.1", + "mime_guess", "num_cpus", "openssl", "openssl-probe", + "rust-embed", "serde", "serde_json", "structopt", @@ -10332,6 +10548,7 @@ dependencies = [ "ya-fd-metrics", "ya-file-logging", "ya-gsb-api", + "ya-healthcheck", "ya-identity", "ya-market", "ya-metrics", @@ -10348,6 +10565,7 @@ dependencies = [ "ya-service-bus", "ya-sgx", "ya-test-framework", + "ya-utils-consent", "ya-utils-futures", "ya-utils-networking", "ya-utils-path", @@ -10371,6 +10589,12 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +[[package]] +name = "yansi" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" + [[package]] name = "zerocopy" version = "0.7.35" diff --git a/Cargo.toml b/Cargo.toml index b073b212ae..68a5cef4a1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,28 +1,34 @@ [package] -name = "yagna" -version = "0.16.0" -description = "Open platform and marketplace for distributed computations" -readme = "README.md" authors = ["Golem Factory "] +description = "Open platform and marketplace for distributed computations" +edition = "2018" homepage = "https://github.com/golemfactory/yagna/core/serv" -repository = "https://github.com/golemfactory/yagna" license = "GPL-3.0" -edition = "2018" +name = "yagna" +readme = "README.md" +repository = "https://github.com/golemfactory/yagna" +version = "0.17.0" [features] default = ['erc20-driver', 'gftp/bin'] -static-openssl = ["openssl/vendored", "openssl-probe"] +dashboard = ['rust-embed', 'mime_guess'] dummy-driver = ['ya-dummy-driver'] erc20-driver = ['ya-erc20-driver'] +require-consent = ['ya-utils-consent/require-consent'] +static-openssl = ["openssl/vendored", "openssl-probe"] tos = [] -framework-test = ['ya-exe-unit/framework-test', 'ya-payment/framework-test', 'ya-identity/framework-test'] +framework-test = [ + 'ya-exe-unit/framework-test', + 'ya-payment/framework-test', + 'ya-identity/framework-test', +] # Temporary to make goth integration tests work central-net = ['ya-net/central-net'] packet-trace-enable = [ - "ya-vpn/packet-trace-enable", - "ya-file-logging/packet-trace-enable", - "ya-net/packet-trace-enable", - "ya-service-bus/packet-trace-enable", + "ya-vpn/packet-trace-enable", + "ya-file-logging/packet-trace-enable", + "ya-net/packet-trace-enable", + "ya-service-bus/packet-trace-enable", ] [[bin]] @@ -30,214 +36,224 @@ name = "yagna" path = "core/serv/src/main.rs" [dependencies] -ya-activity = "0.4" -ya-compile-time-utils = "0.2" - +ya-compile-time-utils.workspace = true +ya-activity.workspace = true ya-core-model.workspace = true -ya-dummy-driver = { version = "0.3", optional = true } -ya-file-logging = "0.1" -ya-gsb-api = "0.1" -ya-erc20-driver = { version = "0.4", optional = true } -ya-identity = "0.3" -ya-market = "0.4" -ya-metrics = "0.2" -ya-net = { version = "0.3", features = ["service"] } -ya-payment = "0.3" -ya-persistence = { version = "0.3", features = ["service"] } +ya-dummy-driver = { workspace = true, optional = true } +ya-file-logging.workspace = true +ya-gsb-api.workspace = true +ya-erc20-driver = { workspace = true, optional = true } +ya-identity.workspace = true +ya-market.workspace = true +ya-metrics.workspace = true +ya-net = { workspace = true, features = ["service"] } +ya-payment.workspace = true +ya-persistence = { path = "core/persistence", features = ["service"] } ya-sb-proto = { workspace = true } ya-sb-router = { workspace = true } -ya-service-api = "0.1" -ya-service-api-derive = "0.2" -ya-service-api-interfaces = "0.2" -ya-service-api-web = "0.2" +ya-service-api.workspace = true +ya-service-api-derive.workspace = true +ya-service-api-interfaces.workspace = true +ya-service-api-web.workspace = true ya-service-bus = { workspace = true } -ya-sgx = "0.2" -ya-utils-path = "0.1" +ya-sgx.path = "core/sgx" +ya-utils-consent.workspace = true +ya-utils-path.workspace = true ya-utils-futures.workspace = true -ya-utils-process = { version = "0.3", features = ["lock"] } -ya-utils-networking = "0.2" +ya-utils-process = { workspace = true, features = ["lock"] } +ya-utils-networking.workspace = true ya-fd-metrics = { path = "utils/fd-metrics" } -ya-version = "0.2" -ya-vpn = "0.2" +ya-healthcheck = { path = "core/healthcheck" } +ya-version = { path = "core/version" } +ya-vpn.workspace = true ya-client.workspace = true ya-client-model.workspace = true gftp = { workspace = true, optional = true } # just to enable gftp build for cargo-deb -ya-provider = { version = "0.3", optional = true } # just to enable conditionally running some tests +ya-provider = { path = "agent/provider", optional = true } # just to enable conditionally running some tests ya-exe-unit = { version = "0.4", optional = true, path = "exe-unit" } # just to enable conditionally running some tests -actix-rt = "2.7" -actix-service = "2" -actix-web = "4" +actix-rt.workspace = true +actix-service.workspace = true +actix-web.workspace = true + anyhow = "1.0" chrono = "0.4" directories = "2.0.2" dotenv = "0.15.0" futures = "0.3" lazy_static = "1.4" +libsqlite3-sys = { workspace = true } log = "0.4" metrics = "0.12" +mime_guess = { version = "2.0", optional = true } num_cpus = "1" -openssl.workspace = true openssl-probe = { version = "0.1", optional = true } +openssl.workspace = true +rust-embed = { version = "8.5", optional = true } serde = "1.0" serde_json = "1.0" structopt = "0.3" tokio = { version = "1", features = ["net"] } -tokio-util = { version = "0.7", features = ["codec"] } tokio-stream = { version = "0.1.8", features = ["io-util"] } +tokio-util = { version = "0.7", features = ["codec"] } url = "2.1.1" -libsqlite3-sys = { workspace = true } - [dev-dependencies] erc20_processor = { workspace = true } -ya-test-framework = "0.1" +ya-test-framework.path = "test-utils/test-framework" + ya-exe-unit = { version = "0.4", path = "exe-unit" } [package.metadata.deb] -name = "golem-requestor" assets = [ - [ - "target/release/yagna", - "usr/bin/", - "755", - ], - [ - "target/release/gftp", - "usr/bin/", - "755", - ], - [ - "README.md", - "usr/share/doc/yagna/", - "644", - ], - [ - "core/serv/README.md", - "usr/share/doc/yagna/service.md", - "644", - ], + [ + "target/release/yagna", + "usr/bin/", + "755", + ], + [ + "target/release/gftp", + "usr/bin/", + "755", + ], + [ + "README.md", + "usr/share/doc/yagna/", + "644", + ], + [ + "core/serv/README.md", + "usr/share/doc/yagna/service.md", + "644", + ], ] conflicts = "ya-provider" -features = ["static-openssl"] -maintainer-scripts = "debian/core" depends = "libgcc1, libc6 (>= 2.23)" extended-description = """The Next Golem Milestone. An open platform and marketplace for distributed computations. """ - +features = ["static-openssl"] +maintainer-scripts = "debian/core" +name = "golem-requestor" [package.metadata.deb.variants.provider] -name = "golem-provider" -replaces = "golem-requestor" -maintainer-scripts = "debian/provider" -features = ["static-openssl"] -depends = "libgcc1, libc6 (>= 2.23)" assets = [ - [ - "target/release/yagna", - "usr/bin/", - "755", - ], - [ - "target/release/ya-provider", - "usr/bin/", - "755", - ], - [ - "target/release/gftp", - "usr/bin/", - "755", - ], - [ - "target/release/exe-unit", - "usr/lib/yagna/plugins/", - "755", - ], - [ - "target/release/golemsp", - "usr/bin/", - "755", - ], - [ - "README.md", - "usr/share/doc/yagna/", - "644", - ], - [ - "core/serv/README.md", - "usr/share/doc/yagna/service.md", - "644", - ], - [ - "agent/provider/readme.md", - "usr/share/doc/yagna/run-provider.md", - "644", - ], + [ + "target/release/yagna", + "usr/bin/", + "755", + ], + [ + "target/release/ya-provider", + "usr/bin/", + "755", + ], + [ + "target/release/gftp", + "usr/bin/", + "755", + ], + [ + "target/release/exe-unit", + "usr/lib/yagna/plugins/", + "755", + ], + [ + "target/release/golemsp", + "usr/bin/", + "755", + ], + [ + "README.md", + "usr/share/doc/yagna/", + "644", + ], + [ + "core/serv/README.md", + "usr/share/doc/yagna/service.md", + "644", + ], + [ + "agent/provider/readme.md", + "usr/share/doc/yagna/run-provider.md", + "644", + ], ] +depends = "libgcc1, libc6 (>= 2.23)" +features = ["static-openssl"] +maintainer-scripts = "debian/provider" +name = "golem-provider" +replaces = "golem-requestor" [workspace.lints.clippy] arc_with_non_send_sync = "allow" -get_first = "allow" blocks_in_conditions = "allow" +get_first = "allow" +doc_lazy_continuation = "allow" [workspace] members = [ - "agent/provider", - "core/activity", - "core/gftp", - "core/gsb-api", - "core/identity", - "core/market", - "core/market/resolver", - "core/model", - "core/net", - "core/payment", - "core/payment-driver/base", - "core/payment-driver/dummy", - "core/payment-driver/erc20", - "core/persistence", - "core/serv-api", - "core/serv-api/derive", - "core/serv-api/interfaces", - "core/serv-api/web", - "core/sgx", - "core/version", - "core/vpn", - "exe-unit/components/counters", - "exe-unit/components/gsb-http-proxy", - "exe-unit", - "exe-unit/runtime-api", - "exe-unit/tokio-process-ns", - "exe-unit/components/transfer", - "golem_cli", - "utils/actix_utils", - "utils/agreement-utils", - "utils/cli", - "utils/compile-time-utils", - "utils/file-logging", - "utils/futures", - "utils/manifest-utils", - "utils/manifest-utils/test-utils", - "utils/networking", - "utils/path", - "utils/process", - "utils/std-utils", - "utils/diesel-utils", - "utils/fd-metrics", - "core/metrics", - "test-utils/test-framework", - "test-utils/test-framework/framework-macro", - "test-utils/test-framework/framework-basic", - "test-utils/test-framework/framework-mocks", + "agent/provider", + "core/activity", + "core/gftp", + "core/gsb-api", + "core/identity", + "core/market", + "core/market/resolver", + "core/model", + "core/net", + "core/payment", + "core/payment-driver/base", + "core/payment-driver/dummy", + "core/payment-driver/erc20", + "core/persistence", + "core/serv-api", + "core/serv-api/derive", + "core/serv-api/interfaces", + "core/serv-api/web", + "core/sgx", + "core/version", + "core/vpn", + "exe-unit/components/counters", + "exe-unit/components/gsb-http-proxy", + "exe-unit", + "exe-unit/runtime-api", + "exe-unit/tokio-process-ns", + "exe-unit/components/transfer", + "golem_cli", + "utils/actix_utils", + "utils/agreement-utils", + "utils/cli", + "utils/compile-time-utils", + "utils/file-logging", + "utils/futures", + "utils/manifest-utils", + "utils/manifest-utils/test-utils", + "utils/networking", + "utils/path", + "utils/process", + "utils/std-utils", + "utils/diesel-utils", + "utils/fd-metrics", + "core/metrics", + "test-utils/test-framework", + "test-utils/test-framework/framework-macro", + "test-utils/test-framework/framework-basic", + "test-utils/test-framework/framework-mocks", + "core/healthcheck", ] [workspace.dependencies] # this entry is needed to make sqlx version >=0.5.9 work with diesel 1.4.* # diesel 1.4.* supports up to 0.23.0, but sqlx 0.5.9 requires 0.22.0 # sqlx 0.5.10 need 0.23.2, so 0.5.9 is last version possible +actix-rt = "2.7" +actix-service = "2" +actix-web = "4" +actix = { version = "0.13", default-features = false } + derive_more = "0.99" erc20_payment_lib = { git = "https://github.com/golemfactory/erc20_payment_lib", rev = "4eb076ec19bf58cf4063b4cdb4cf370473892203" } erc20_processor = { git = "https://github.com/golemfactory/erc20_payment_lib", rev = "4eb076ec19bf58cf4063b4cdb4cf370473892203" } @@ -250,17 +266,21 @@ gftp = { version = "0.4.1", path = "core/gftp" } hex = "0.4.3" libsqlite3-sys = { version = "0.26.0", features = ["bundled"] } openssl = "0.10" +promptly = "0.3.0" rand = "0.8.5" +regex = "1.10.4" strum = { version = "0.24", features = ["derive"] } trust-dns-resolver = "0.22" url = "2.3.1" -regex = "1.10.4" ya-agreement-utils = { version = "0.6", path = "utils/agreement-utils" } -ya-core-model = { version = "0.10", path = "core/model" } -ya-relay-client = { git = "https://github.com/golemfactory/ya-relay.git", rev = "e199ee1cfdb22837f9d95f4202378e182d3cb489" } +ya-exe-unit.path = "./exe-unit" +ya-relay-client = { git = "https://github.com/golemfactory/ya-relay.git", rev = "0588dd1af311ae19c621b04cc2a4cfd9c0483252" } ya-relay-stack = { git = "https://github.com/golemfactory/ya-relay.git", rev = "c92a75b0cf062fcc9dbb3ea2a034d913e5fad8e5" } ya-utils-futures = { path = "utils/futures" } +ya-utils-networking = { path = "utils/networking", default-features = false } +ya-file-logging.path = "utils/file-logging" +ya-utils-cli.path = "utils/cli" ya-service-bus = { version = "0.7.3", features = ['tls'] } ya-sb-router = { version = "0.6.4" } @@ -268,80 +288,49 @@ ya-sb-proto = { version = "0.6.2" } ya-sb-util = { version = "0.5.1" } parking_lot = "0.12.3" mime = "0.3.17" -# true version is given in patches section -ya-client = "0.9" -# true version is given in patches section -ya-client-model = "0.7" +ya-client = { git = "https://github.com/golemfactory/ya-client.git", rev = "653e7ed3ff8836837b660a76e604055e167b1f2e" } +ya-client-model = { git = "https://github.com/golemfactory/ya-client.git", rev = "653e7ed3ff8836837b660a76e604055e167b1f2e" } -[patch.crates-io] -## SERVICES -ya-identity = { path = "core/identity" } -ya-net = { path = "core/net" } -ya-market = { path = "core/market" } -ya-market-resolver = { path = "core/market/resolver" } -ya-activity = { path = "core/activity" } -ya-sgx = { path = "core/sgx" } -ya-payment = { path = "core/payment" } -ya-payment-driver = { path = "core/payment-driver/base" } -ya-dummy-driver = { path = "core/payment-driver/dummy" } -ya-erc20-driver = { path = "core/payment-driver/erc20" } -ya-version = { path = "core/version" } -ya-vpn = { path = "core/vpn" } -ya-gsb-api = { path = "core/gsb-api" } - -## CORE UTILS +ya-compile-time-utils.path = "utils/compile-time-utils" +ya-manifest-utils = { path = "utils/manifest-utils" } +ya-std-utils = { path = "utils/std-utils" } +ya-diesel-utils.path = "utils/diesel-utils" +ya-utils-actix.path = "utils/actix_utils" ya-core-model = { path = "core/model" } -ya-persistence = { path = "core/persistence" } -ya-service-api = { path = "core/serv-api" } -ya-service-api-derive = { path = "core/serv-api/derive" } -ya-service-api-interfaces = { path = "core/serv-api/interfaces" } -ya-service-api-web = { path = "core/serv-api/web" } - -## CLIENT -ya-client = { git = "https://github.com/golemfactory/ya-client.git", rev = "c8675c4eb0d42119b1cfa2f5772ba91f877d81f9" } -#ya-client = { path = "../ya-client" } -ya-client-model = { git = "https://github.com/golemfactory/ya-client.git", rev = "c8675c4eb0d42119b1cfa2f5772ba91f877d81f9" } -golem-certificate = { git = "https://github.com/golemfactory/golem-certificate.git", rev = "952fdbd47adc57e46b7370935111e046271ef415" } +ya-utils-consent.path = "utils/consent" +ya-utils-path.path = "utils/path" +ya-utils-process.path = "utils/process" -## RELAY and networking stack +ya-identity.path = "core/identity" +ya-market.path = "core/market" +ya-activity.path = "core/activity" +ya-net.path = "core/net" +ya-persistence.path = "core/persistence" +ya-payment.path = "core/payment" +ya-metrics.path = "core/metrics" +ya-manifest-test-utils.path = "utils/manifest-utils/test-utils" +ya-vpn.path = "core/vpn" +ya-gsb-api.path = "core/gsb-api" -#ya-relay-stack = { path = "../ya-relay/crates/stack" } -#ya-relay-client = { path = "../ya-relay/client" } -#ya-relay-core = { path = "../ya-relay/crates/core" } -#ya-relay-proto = { path = "../ya-relay/crates/proto" } +ya-payment-driver.path = "core/payment-driver/base" +ya-dummy-driver.path = "core/payment-driver/dummy" +ya-erc20-driver.path = "core/payment-driver/erc20" +ya-service-api.path = "core/serv-api" +ya-service-api-derive.path = "core/serv-api/derive" +ya-service-api-interfaces.path = "core/serv-api/interfaces" +ya-service-api-web.path = "core/serv-api/web" -## OTHERS -gftp = { path = "core/gftp" } -tokio-process-ns = { path = "exe-unit/tokio-process-ns" } -ya-agreement-utils = { path = "utils/agreement-utils" } -ya-std-utils = { path = "utils/std-utils" } -ya-compile-time-utils = { path = "utils/compile-time-utils" } -ya-exe-unit = { path = "exe-unit" } -ya-file-logging = { path = "utils/file-logging" } -ya-manifest-utils = { path = "utils/manifest-utils" } -ya-transfer = { path = "exe-unit/components/transfer" } -ya-utils-actix = { path = "utils/actix_utils" } -ya-utils-cli = { path = "utils/cli" } -ya-utils-networking = { path = "utils/networking" } -ya-utils-path = { path = "utils/path" } -ya-utils-process = { path = "utils/process" } -ya-diesel-utils = { path = "utils/diesel-utils" } -ya-metrics = { path = "core/metrics" } -ya-provider = { path = "agent/provider" } -ya-counters = { path = "exe-unit/components/counters" } -ya-gsb-http-proxy = { path = "exe-unit/components/gsb-http-proxy" } - -## TEST UTILS -ya-manifest-test-utils = { path = "utils/manifest-utils/test-utils" } -ya-test-framework = { path = "test-utils/test-framework" } -ya-framework-macro = { path = "test-utils/test-framework/framework-macro" } -ya-framework-basic = { path = "test-utils/test-framework/framework-basic" } -ya-framework-mocks = { path = "test-utils/test-framework/framework-mocks" } +ya-framework-macro.path = "test-utils/test-framework/framework-macro" +ya-framework-basic.path = "test-utils/test-framework/framework-basic" +ya-framework-mocks.path = "test-utils/test-framework/framework-mocks" +[patch.crates-io] +ya-client = { git = "https://github.com/golemfactory/ya-client.git", rev = "653e7ed3ff8836837b660a76e604055e167b1f2e" } +ya-client-model = { git = "https://github.com/golemfactory/ya-client.git", rev = "653e7ed3ff8836837b660a76e604055e167b1f2e" } +golem-certificate = { git = "https://github.com/golemfactory/golem-certificate.git", rev = "f2d7514c18fc066e9cfb796090b90f5b27cfe1c6" } ethereum-tx-sign = { git = "https://github.com/golemfactory/ethereum-tx-sign.git", rev = "1164c74187a9e2947faeaea7dde104c3cdec4195" } graphene-sgx = { git = " https://github.com/golemfactory/graphene-rust.git", rev = "dbd993ebad7f9190410ea390a589348479af6407" } - diesel = { git = "https://github.com/golemfactory/yagna-diesel-patch.git", rev = "a512c66d520a9066dd9a4d1416f9109019b39563" } # Speed up builds on macOS (will be default in next rust version probably) diff --git a/agent/provider/Cargo.toml b/agent/provider/Cargo.toml index ee7dd15a2f..2ee25e9480 100644 --- a/agent/provider/Cargo.toml +++ b/agent/provider/Cargo.toml @@ -14,17 +14,17 @@ path = "src/main.rs" [dependencies] ya-agreement-utils = { workspace = true } -ya-manifest-utils = { version = "0.2" } +ya-manifest-utils.workspace = true ya-client = { workspace = true, features = ['cli'] } ya-client-model.workspace = true -ya-compile-time-utils = "0.2" +ya-compile-time-utils.workspace = true ya-core-model = { workspace = true, features = ['activity', 'payment'] } -ya-file-logging = "0.1" -ya-utils-actix = "0.2" -ya-utils-cli = "0.1" -ya-utils-path = "0.1" -ya-utils-process = { version = "0.3", features = ['lock'] } -ya-std-utils = "0.1" +ya-file-logging.workspace = true +ya-utils-actix.workspace = true +ya-utils-cli.workspace = true +ya-utils-path.workspace = true +ya-utils-process = { workspace = true, features = ['lock'] } +ya-std-utils.workspace = true golem-certificate = "0.1.1" actix = { version = "0.13", default-features = false } @@ -91,8 +91,8 @@ tempdir = "0.3" tempfile = "3.5.0" pretty_assertions = "1.3" -ya-manifest-test-utils = "0.1" -ya-framework-basic = "0.1" +ya-manifest-test-utils.workspace = true +ya-framework-basic.workspace = true [lints] workspace = true diff --git a/agent/provider/src/config/globals.rs b/agent/provider/src/config/globals.rs index 41ef6f48f8..8d36b0e91b 100644 --- a/agent/provider/src/config/globals.rs +++ b/agent/provider/src/config/globals.rs @@ -35,6 +35,7 @@ impl<'de> Deserialize<'de> for GlobalsState { pub enum Account { NodeId(NodeId), Deprecated { + #[allow(dead_code)] platform: Option, address: NodeId, }, diff --git a/agent/provider/src/market/provider_market.rs b/agent/provider/src/market/provider_market.rs index 6fee7167ac..7d195735f2 100644 --- a/agent/provider/src/market/provider_market.rs +++ b/agent/provider/src/market/provider_market.rs @@ -924,7 +924,7 @@ fn get_backoff() -> backoff::ExponentialBackoff { initial_interval: std::time::Duration::from_secs(5), multiplier: 1.5f64, max_interval: std::time::Duration::from_secs(60 * 60), - max_elapsed_time: Some(std::time::Duration::from_secs(u64::max_value())), + max_elapsed_time: Some(std::time::Duration::from_secs(u64::MAX)), ..Default::default() } } diff --git a/agent/provider/src/payments/agreement.rs b/agent/provider/src/payments/agreement.rs index 195e627596..e8effac38c 100644 --- a/agent/provider/src/payments/agreement.rs +++ b/agent/provider/src/payments/agreement.rs @@ -47,6 +47,7 @@ pub enum ActivityPayment { /// Note that we can have multiple activities during duration of agreement. /// We must wait until agreement will be closed, before we send invoice. pub struct AgreementPayment { + #[allow(dead_code)] pub agreement_id: String, pub approved_ts: DateTime, pub payment_model: Arc, diff --git a/agent/provider/src/tasks/task_state.rs b/agent/provider/src/tasks/task_state.rs index 764220bf40..bb77b9c0c2 100644 --- a/agent/provider/src/tasks/task_state.rs +++ b/agent/provider/src/tasks/task_state.rs @@ -55,6 +55,7 @@ pub enum AgreementState { #[derive(Clone, Debug)] pub struct Transition(AgreementState, Option); +#[allow(dead_code)] #[derive(Clone)] pub enum StateChange { TransitionStarted(Transition), diff --git a/core/activity/Cargo.toml b/core/activity/Cargo.toml index e1dbe98736..fedbefbbda 100644 --- a/core/activity/Cargo.toml +++ b/core/activity/Cargo.toml @@ -10,11 +10,11 @@ framework-test = ['ya-gsb-http-proxy/framework-test'] [dependencies] ya-core-model = { workspace = true, features = ["activity", "market"] } ya-client-model = { workspace = true, features = ["sgx"] } -ya-net = "0.3" -ya-persistence = "0.3" -ya-service-api = "0.1" -ya-service-api-interfaces = "0.2" -ya-service-api-web = "0.2" +ya-net.workspace = true +ya-persistence.workspace = true +ya-service-api.workspace = true +ya-service-api-interfaces.workspace = true +ya-service-api-web.workspace = true ya-service-bus = { workspace = true } ya-gsb-http-proxy = { path = "../../exe-unit/components/gsb-http-proxy" } diff --git a/core/activity/src/provider/service.rs b/core/activity/src/provider/service.rs index e59c963d88..0529534996 100644 --- a/core/activity/src/provider/service.rs +++ b/core/activity/src/provider/service.rs @@ -205,7 +205,7 @@ async fn create_activity_gsb( msg.timeout, ) .await - .map_err(|e| { + .inspect_err(|_e| { tokio::task::spawn_local(enqueue_destroy_evt( db.clone(), tracker.clone(), @@ -213,7 +213,6 @@ async fn create_activity_gsb( *agreement.provider_id(), app_session_id, )); - e })?; log::info!( diff --git a/core/gftp/Cargo.toml b/core/gftp/Cargo.toml index 24df55fecf..2fad2a7578 100644 --- a/core/gftp/Cargo.toml +++ b/core/gftp/Cargo.toml @@ -18,7 +18,7 @@ name = "gftp" required-features = ['bin'] [dependencies] -ya-compile-time-utils = "0.2" +ya-compile-time-utils.workspace = true ya-core-model = { workspace = true, features = ["gftp", "identity", "net"] } ya-service-bus = { workspace = true } diff --git a/core/gsb-api/Cargo.toml b/core/gsb-api/Cargo.toml index dd8c889710..17c5cf625b 100644 --- a/core/gsb-api/Cargo.toml +++ b/core/gsb-api/Cargo.toml @@ -7,10 +7,10 @@ edition = "2018" [dependencies] ya-client-model.workspace = true -ya-persistence = "0.3" -ya-service-api = "0.1" -ya-service-api-interfaces = "0.2" -ya-service-api-web = "0.2" +ya-persistence.workspace = true +ya-service-api.workspace = true +ya-service-api-interfaces.workspace = true +ya-service-api-web.workspace = true ya-service-bus = { workspace = true } actix = "0" diff --git a/core/gsb-api/src/api.rs b/core/gsb-api/src/api.rs index e3da028133..0e1069aca1 100644 --- a/core/gsb-api/src/api.rs +++ b/core/gsb-api/src/api.rs @@ -241,7 +241,7 @@ mod tests { async fn verify_delete_service(api: &mut TestServer, service_addr: &str) { let delete_resp = api - .delete(&format!( + .delete(format!( "/{}/{}/{}", GSB_API_PATH, "services", @@ -451,7 +451,7 @@ mod tests { async fn api_404_error_on_delete_of_not_existing_service_test() { let api = dummy_api(); let delete_resp = api - .delete(&format!( + .delete(format!( "/{}/{}/{}", GSB_API_PATH, "services", diff --git a/core/healthcheck/Cargo.toml b/core/healthcheck/Cargo.toml new file mode 100644 index 0000000000..4af7cac70c --- /dev/null +++ b/core/healthcheck/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "ya-healthcheck" +version = "0.1.0" +description = "Node health monitoring" +authors = ["Golem Factory "] +edition = "2021" + +[dependencies] +ya-service-api-web.workspace = true +ya-client.workspace = true +ya-core-model = { workspace = true, features = ["version"] } +ya-net = { workspace = true, features = ["service"] } +ya-service-api.workspace = true +ya-service-api-interfaces.workspace = true +ya-service-bus = { workspace = true } + +actix-web = "4" +anyhow = "1.0" +chrono = { version = "0.4", features = ["serde"] } +log = "0.4" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +tokio = { version = "1", features = ["time", "sync"] } +problem_details = "0.6.0" +http = "1.1.0" diff --git a/core/healthcheck/src/lib.rs b/core/healthcheck/src/lib.rs new file mode 100644 index 0000000000..309555f15f --- /dev/null +++ b/core/healthcheck/src/lib.rs @@ -0,0 +1,3 @@ +mod service; + +pub use service::HealthcheckService; diff --git a/core/healthcheck/src/service.rs b/core/healthcheck/src/service.rs new file mode 100644 index 0000000000..ce1acefe5f --- /dev/null +++ b/core/healthcheck/src/service.rs @@ -0,0 +1,15 @@ +use ya_service_api_interfaces::Service; + +mod rest; + +pub struct HealthcheckService; + +impl Service for HealthcheckService { + type Cli = (); +} + +impl HealthcheckService { + pub fn rest(_ctx: &C) -> actix_web::Scope { + rest::web_scope() + } +} diff --git a/core/healthcheck/src/service/rest.rs b/core/healthcheck/src/service/rest.rs new file mode 100644 index 0000000000..7499e12497 --- /dev/null +++ b/core/healthcheck/src/service/rest.rs @@ -0,0 +1,234 @@ +use actix_web::{HttpResponse, Responder}; +use chrono::Utc; +use serde_json::json; + +use std::time::Duration; + +use ya_core_model::market::local::BUS_ID as MARKET_BUS_ID; +use ya_core_model::net::local::BUS_ID as NET_BUS_ID; +use ya_core_model::payment::local::BUS_ID as PAYMENT_BUS_ID; + +use ya_core_model::market::GetLastBcastTs; +use ya_core_model::net::local::ListNeighbours; +use ya_core_model::payment::local::{PaymentDriverStatus, PaymentDriverStatusError}; + +use ya_service_bus::{timeout::IntoTimeoutFuture, typed::service, RpcEndpoint}; + +pub const HEALTHCHECK_API_PATH: &str = "/healthcheck"; + +pub fn web_scope() -> actix_web::Scope { + actix_web::web::scope(HEALTHCHECK_API_PATH).service(healthcheck) +} + +async fn payment_healthcheck() -> Result<(), HttpResponse> { + let result = service(PAYMENT_BUS_ID) + .call(PaymentDriverStatus { + driver: None, + network: None, + }) + .timeout(Some(Duration::from_secs(5))) + .await; + + let result = match result { + Ok(ok) => ok, + Err(_elapsed) => return Err(errors::internal("internal-timeout", "payments-check")), + }; + + let result = match result { + Ok(resp) => resp, + Err(gsb_err) => { + log::warn!("Healtcheck failed due to {gsb_err}"); + return Err(errors::internal("gsb-error", "payments-check")); + } + }; + + let status_properties = match result { + Ok(props) => props, + Err( + payment_err @ (PaymentDriverStatusError::NoDriver(_) + | PaymentDriverStatusError::NoNetwork(_) + | PaymentDriverStatusError::Internal(_)), + ) => { + log::warn!("Healtcheck failed due to {payment_err}"); + return Err(errors::internal("payments-service-error", "payments-check")); + } + }; + + if !status_properties.is_empty() { + return Err(errors::payments(status_properties)); + } + + Ok(()) +} + +async fn relay_healtcheck() -> Result<(), HttpResponse> { + let result = service(NET_BUS_ID) + .send(ListNeighbours { size: 8 }) + .timeout(Some(Duration::from_secs(5))) + .await; + + let result = match result { + Ok(ok) => ok, + Err(_elapsed) => return Err(errors::internal("internal-timeout", "relay-check")), + }; + + let result = match result { + Ok(ok) => ok, + Err(gsb_err) => { + log::warn!("Healtcheck failed due to {gsb_err}"); + return Err(errors::internal("gsb-error", "relay-check")); + } + }; + + let _gsb_remote_ping = match result { + Ok(ok) => ok, + Err(net_err) => { + log::warn!("Healtcheck failed due to {net_err}"); + return Err(errors::internal("net-service-error", "relay-check")); + } + }; + + Ok(()) +} + +async fn market_healthcheck() -> Result<(), HttpResponse> { + let result = service(MARKET_BUS_ID) + .call(GetLastBcastTs) + .timeout(Some(Duration::from_secs(5))) + .await; + + let result = match result { + Ok(ok) => ok, + Err(_elapsed) => return Err(errors::internal("internal-timeout", "market-check")), + }; + + let result = match result { + Ok(ok) => ok, + Err(gsb_err) => { + log::warn!("Healtcheck failed due to {gsb_err}"); + return Err(errors::internal("gsb-error", "market-check")); + } + }; + + let bcast_ts = match result { + Ok(ok) => ok, + Err(market_err) => { + log::warn!("Healtcheck failed due to {market_err}"); + return Err(errors::internal("market-service-error", "market-check")); + } + }; + + let last_bcast_age = Utc::now() - bcast_ts; + if last_bcast_age > chrono::Duration::minutes(2) { + return Err(errors::market_bcast_timeout(last_bcast_age)); + } + + Ok(()) +} + +#[actix_web::get("")] +async fn healthcheck() -> impl Responder { + if let Err(response) = payment_healthcheck().await { + return response; + } + if let Err(response) = relay_healtcheck().await { + return response; + } + if let Err(response) = market_healthcheck().await { + return response; + } + + HttpResponse::Ok().json(json!({"status": "ok"})) +} + +mod errors { + use actix_web::HttpResponse; + use http::Uri; + use problem_details::ProblemDetails; + use serde_json::Value; + use std::collections::HashMap; + use std::iter::FromIterator; + use std::str::FromStr; + use ya_client::model::payment::DriverStatusProperty; + + const CONTENT_TYPE_PROBLEM_JSON: (&str, &str) = ("Content-Type", "application/problem+json"); + + pub fn internal(instance: &str, step: &str) -> HttpResponse { + let extensions = HashMap::::from_iter(std::iter::once(( + "step".to_string(), + step.to_string(), + ))); + + let problem = ProblemDetails::new() + .with_type(Uri::from_static("/healthcheck/internal-error")) + .with_instance( + Uri::from_str(&format!("/healthcheck/internal-error/{instance}",)) + .expect("Invalid URI"), + ) + .with_extensions(extensions); + + HttpResponse::InternalServerError() + .insert_header(CONTENT_TYPE_PROBLEM_JSON) + .json(problem) + } + + pub fn payments(props: Vec) -> HttpResponse { + let extensions = HashMap::::from_iter([ + ( + "step".to_string(), + Value::String("payments-check".to_string()), + ), + ( + "problems".to_string(), + Value::Array( + props + .into_iter() + .map(|prop| match prop { + DriverStatusProperty::CantSign { .. } => "Can't sign transaction", + DriverStatusProperty::InsufficientGas { .. } => "Insufficient gas", + DriverStatusProperty::InsufficientToken { .. } => "Insufficient token", + DriverStatusProperty::InvalidChainId { .. } => "Misconfigured chain", + DriverStatusProperty::RpcError { .. } => "Persistent RPC issues", + DriverStatusProperty::TxStuck { .. } => "Stuck transaction", + }) + .map(ToOwned::to_owned) + .map(Value::String) + .collect(), + ), + ), + ]); + + let problem = ProblemDetails::new() + .with_detail("One on more issues blocking the operation of payments have been detected. Run `yagna payment driver status` to diagnose") + .with_type(Uri::from_static("/healthcheck/payment-driver-errors")) + .with_instance(Uri::from_static("/healthcheck/payment-driver-errors")) + .with_extensions(extensions); + + HttpResponse::InternalServerError() + .insert_header(CONTENT_TYPE_PROBLEM_JSON) + .json(problem) + } + + pub fn market_bcast_timeout(last_bcast_age: chrono::Duration) -> HttpResponse { + let extensions = HashMap::::from_iter([ + ( + "step".to_string(), + Value::String("market-check".to_string()), + ), + ( + "lastBcastAgeSecs".to_string(), + Value::Number(last_bcast_age.num_seconds().into()), + ), + ]); + + let problem = ProblemDetails::new() + .with_detail("Last received market broadcast is too old") + .with_type(Uri::from_static("/healthcheck/market-bcast-timeout")) + .with_instance(Uri::from_static("/healthcheck/market-bcast-timeout")) + .with_extensions(extensions); + + HttpResponse::InternalServerError() + .insert_header(CONTENT_TYPE_PROBLEM_JSON) + .json(problem) + } +} diff --git a/core/identity/Cargo.toml b/core/identity/Cargo.toml index ce57f96350..9000e5845c 100644 --- a/core/identity/Cargo.toml +++ b/core/identity/Cargo.toml @@ -12,9 +12,9 @@ framework-test = [] [dependencies] ya-client-model = { workspace = true, features = ["with-diesel"] } ya-core-model = { workspace = true, features = ["identity", "appkey"] } -ya-persistence = "0.3" -ya-service-api = "0.1" -ya-service-api-interfaces = "0.2" +ya-persistence.workspace = true +ya-service-api.workspace = true +ya-service-api-interfaces.workspace = true ya-service-bus = { workspace = true } anyhow = "1.0" @@ -25,9 +25,9 @@ diesel = { version = "1.4", features = ["sqlite", "r2d2", "chrono"] } diesel_migrations = "1.4" ethsign = "0.8" futures = "0.3" -hex = { workspace = true } +hex.workspace = true log = "0.4" -promptly = "0.3.0" +promptly.workspace = true r2d2 = "0.8.8" rand = "0.8" rpassword = "3.0.2" @@ -41,10 +41,10 @@ rustc-hex = "2.1.0" yansi = "0.5.0" [dev-dependencies] -ya-service-api-derive = "0.2" +ya-service-api-derive.workspace = true ya-sb-router = { workspace = true } -ya-framework-basic = { version = "0.1" } -ya-framework-mocks = { version = "0.1" } +ya-framework-basic.workspace = true +ya-framework-mocks.workspace = true actix-rt = "2.7" actix-service = "2" @@ -57,4 +57,4 @@ serial_test = { git = "https://github.com/tworec/serial_test.git", branch = "act test-context = "0.1.4" [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/core/identity/src/cli/identity.rs b/core/identity/src/cli/identity.rs index aa87235fff..a32006644c 100644 --- a/core/identity/src/cli/identity.rs +++ b/core/identity/src/cli/identity.rs @@ -314,6 +314,13 @@ impl IdentityCommand { ) .await .map_err(anyhow::Error::msg)?; + + if *set_default { + log::warn!( + "Setting default identity requires service/daemon restart to take effect!" + ) + } + CommandOutput::object(id) } IdentityCommand::Create { @@ -381,6 +388,13 @@ impl IdentityCommand { .send(identity::Update::with_id(id.node_id).with_default(*set_default)) .await .map_err(anyhow::Error::msg)?; + + if *set_default { + log::warn!( + "Setting default identity requires service/daemon restart to take effect!" + ) + } + CommandOutput::object(id) } IdentityCommand::Lock { @@ -426,7 +440,11 @@ impl IdentityCommand { IdentityCommand::Drop { node_or_alias, force, - } => drop_id::drop_id(&gsb, node_or_alias, *force).await, + } => { + log::warn!("Dropping identity requires service/daemon restart to take effect!"); + + drop_id::drop_id(&gsb, node_or_alias, *force).await + } IdentityCommand::Export { node_or_alias, file_path, diff --git a/core/market/Cargo.toml b/core/market/Cargo.toml index c74d90442f..e6aa8dffd0 100644 --- a/core/market/Cargo.toml +++ b/core/market/Cargo.toml @@ -12,17 +12,17 @@ test-suite = [] ya-agreement-utils = { workspace = true } ya-client.workspace = true ya-core-model = { workspace = true, features = ["market", "net"] } -ya-diesel-utils = { version = "0.1" } -ya-framework-basic = "0.1" -ya-market-resolver = "0.2" -ya-net = "0.3" -ya-persistence = "0.3" -ya-service-api = "0.1" -ya-service-api-interfaces = "0.2" -ya-service-api-web = "0.2" +ya-diesel-utils.workspace = true +ya-framework-basic.workspace = true +ya-market-resolver.path = "./resolver" +ya-net.workspace = true +ya-persistence.workspace = true +ya-service-api.workspace = true +ya-service-api-interfaces.workspace = true +ya-service-api-web.workspace = true ya-service-bus = { workspace = true } -ya-std-utils = "0.1" -ya-utils-actix = "0.2" +ya-std-utils.workspace = true +ya-utils-actix.workspace = true actix = { version = "0.13", default-features = false } actix-http = "3" @@ -73,7 +73,7 @@ serial_test = { git = "https://github.com/golemfactory/serial_test.git", branch structopt = "0.3" tokio = { version = "1", features = ["macros", "rt"] } -ya-framework-mocks = "0.1" +ya-framework-mocks.workspace = true [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/core/market/resolver/src/resolver/properties.rs b/core/market/resolver/src/resolver/properties.rs index b71e2a4e1b..abac41948a 100644 --- a/core/market/resolver/src/resolver/properties.rs +++ b/core/market/resolver/src/resolver/properties.rs @@ -48,10 +48,9 @@ impl<'a> PropertyValue<'a> { Ok(parsed_value) => parsed_value == *value, _ => false, }, // ignore parsing error, assume false - PropertyValue::List(value) => match PropertyValue::equals_list(value, other) { - Ok(result) => result, - _ => false, - }, // ignore parsing error, assume false + PropertyValue::List(value) => { + PropertyValue::equals_list(value, other).unwrap_or_default() + } PropertyValue::Boolean(value) => match other.parse::() { Ok(result) => &result == value, _ => false, diff --git a/core/market/src/matcher.rs b/core/market/src/matcher.rs index db2f6b10ce..c570f3a8b3 100644 --- a/core/market/src/matcher.rs +++ b/core/market/src/matcher.rs @@ -1,9 +1,13 @@ use actix::prelude::*; -use chrono::{TimeZone, Utc}; +use chrono::{DateTime, TimeZone, Utc}; use metrics::counter; use std::str::FromStr; use std::sync::Arc; +use std::time::Duration; use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver}; +use ya_core_model::market::{GetLastBcastTs, RpcMessageError}; +use ya_service_bus::timeout::IntoTimeoutFuture; +use ya_service_bus::typed::ServiceBinder; use ya_client::model::market::{NewDemand, NewOffer}; use ya_service_api_web::middleware::Identity; @@ -122,6 +126,24 @@ impl Matcher { .await .map_err(|e| MatcherInitError::ExpirationTrackerError(e.to_string()))?; + let discovery = self.discovery.clone(); + async fn handler( + _: (), + discovery: Discovery, + caller: String, + _msg: GetLastBcastTs, + ) -> Result, RpcMessageError> { + log::debug!("Got GetLastBcastTs from {caller}"); + + discovery + .get_last_bcast_ts() + .timeout(Some(Duration::from_secs(5))) + .await + .map_err(|_| RpcMessageError::Timeout) + } + + ServiceBinder::new(local_prefix, &(), discovery).bind_with_processor(handler); + Ok(()) } diff --git a/core/market/src/protocol/discovery.rs b/core/market/src/protocol/discovery.rs index 7bd139fb12..61f5fa3a96 100644 --- a/core/market/src/protocol/discovery.rs +++ b/core/market/src/protocol/discovery.rs @@ -1,4 +1,5 @@ //! Discovery protocol interface +use chrono::{DateTime, Utc}; use futures::TryFutureExt; use metrics::{counter, timing, value}; use std::collections::HashSet; @@ -64,6 +65,8 @@ pub struct DiscoveryImpl { /// with central NET implementation in future. net_type: net::NetType, ban_cache: BanCache, + + last_bcast_ts: Mutex>, } struct BanCache { @@ -124,6 +127,10 @@ impl Discovery { self.inner.net_type == net::NetType::Hybrid } + pub async fn get_last_bcast_ts(&self) -> DateTime { + *self.inner.last_bcast_ts.lock().await + } + pub async fn bcast_offers(&self, offer_ids: Vec) -> Result<(), DiscoveryError> { if offer_ids.is_empty() { return Ok(()); @@ -454,6 +461,8 @@ impl Discovery { let num_ids_received = msg.offer_ids.len(); log::trace!("Received {num_ids_received} Offers from [{caller}]."); + *self.inner.last_bcast_ts.lock().await = Utc::now(); + if msg.offer_ids.is_empty() { return Ok(()); } diff --git a/core/market/src/protocol/discovery/builder.rs b/core/market/src/protocol/discovery/builder.rs index a02f73ea25..d842bd4669 100644 --- a/core/market/src/protocol/discovery/builder.rs +++ b/core/market/src/protocol/discovery/builder.rs @@ -1,3 +1,4 @@ +use chrono::Utc; use std::any::{Any, TypeId}; use std::collections::HashMap; use std::sync::Arc; @@ -85,6 +86,7 @@ impl DiscoveryBuilder { lazy_binder_prefix: Mutex::new(None), config: self.config.clone().unwrap(), net_type: net::Config::from_env().unwrap().net_type, + last_bcast_ts: Mutex::new(Utc::now()), offers_receiving_queue: sender, ban_cache: BanCache::new(self.config.unwrap().bcast_node_ban_timeout), }), diff --git a/core/market/src/protocol/negotiation/provider.rs b/core/market/src/protocol/negotiation/provider.rs index 751656ec3e..e50a431bf1 100644 --- a/core/market/src/protocol/negotiation/provider.rs +++ b/core/market/src/protocol/negotiation/provider.rs @@ -174,13 +174,12 @@ impl NegotiationApi { .initial_proposal_received .call(caller, msg.translate(Owner::Provider)) .await - .map_err(|e| { + .inspect_err(|e| { log::warn!( "Negotiation API: initial proposal [{}] rejected. Error: {}", proposal_id, &e ); - e }) } diff --git a/core/market/src/rest_api.rs b/core/market/src/rest_api.rs index d06c695ece..53717979bf 100644 --- a/core/market/src/rest_api.rs +++ b/core/market/src/rest_api.rs @@ -86,6 +86,7 @@ pub struct QueryTimeout { pub timeout: f32, } +#[allow(dead_code)] #[derive(Deserialize)] pub struct QueryTimeoutCommandIndex { #[serde(rename = "timeout")] @@ -130,6 +131,7 @@ pub struct QueryScanEvents { pub peer_id: Option, } +#[allow(dead_code)] #[derive(Deserialize, Debug)] pub struct QueryTerminateAgreement { pub reason: Option, diff --git a/core/metrics/Cargo.toml b/core/metrics/Cargo.toml index 7d3a2dcd8f..3865f0e313 100644 --- a/core/metrics/Cargo.toml +++ b/core/metrics/Cargo.toml @@ -11,9 +11,10 @@ license = "LGPL-3.0" [dependencies] ya-core-model = { workspace = true, features = ["identity"] } -ya-service-api = "0.1" -ya-service-api-interfaces = "0.2" -ya-service-bus = { workspace = true } +ya-service-api.workspace = true +ya-service-api-interfaces.workspace = true +ya-service-bus = { workspace = true } +ya-utils-consent = { workspace = true } awc = "3" actix-web = { version = "4", features = ["openssl"] } diff --git a/core/metrics/src/pusher.rs b/core/metrics/src/pusher.rs index e80493fea4..518984611f 100644 --- a/core/metrics/src/pusher.rs +++ b/core/metrics/src/pusher.rs @@ -4,6 +4,7 @@ use lazy_static::lazy_static; use percent_encoding::{utf8_percent_encode, AsciiSet, NON_ALPHANUMERIC}; use tokio::time::{self, Duration, Instant}; +use crate::service::export_metrics_for_push; use ya_core_model::identity::{self, IdentityInfo}; use ya_service_api::MetricsCtx; use ya_service_bus::typed as bus; @@ -26,7 +27,7 @@ pub fn spawn(ctx: MetricsCtx) { log::warn!("Metrics pusher enabled, but no `push_host_url` provided"); } }); - log::info!("Metrics pusher started"); + log::debug!("Metrics pusher started"); } pub async fn push_forever(host_url: &str, ctx: &MetricsCtx) { @@ -54,7 +55,9 @@ pub async fn push_forever(host_url: &str, ctx: &MetricsCtx) { let mut push_interval = time::interval_at(start, Duration::from_secs(60)); let client = Client::builder().timeout(Duration::from_secs(30)).finish(); - log::info!("Starting metrics pusher on address: {push_url}"); + log::info!( + "Metrics will be pushed only if appropriate consent is given, push endpoint: {push_url}" + ); loop { push_interval.tick().await; push(&client, push_url.clone()).await; @@ -62,14 +65,17 @@ pub async fn push_forever(host_url: &str, ctx: &MetricsCtx) { } pub async fn push(client: &Client, push_url: String) { - let metrics = crate::service::export_metrics().await; + let metrics = export_metrics_for_push().await; + if metrics.is_empty() { + return; + } let res = client .put(push_url.as_str()) .send_body(metrics.clone()) .await; match res { Ok(r) if r.status().is_success() => { - log::trace!("Metrics pushed: {}", r.status()) + log::debug!("Metrics pushed: {}", r.status()) } Ok(r) if r.status().is_server_error() => { log::debug!("Metrics server error: {:#?}", r); diff --git a/core/metrics/src/service.rs b/core/metrics/src/service.rs index 80de28ab56..34c995eff0 100644 --- a/core/metrics/src/service.rs +++ b/core/metrics/src/service.rs @@ -1,3 +1,4 @@ +use actix_web::web::Path; use futures::lock::Mutex; use lazy_static::lazy_static; use std::collections::HashMap; @@ -7,6 +8,7 @@ use url::Url; use ya_service_api::{CliCtx, MetricsCtx}; use ya_service_api_interfaces::Provider; +use ya_utils_consent::ConsentScope; use crate::metrics::Metrics; @@ -72,6 +74,15 @@ lazy_static! { static ref METRICS: Arc> = Metrics::new(); } +pub async fn export_metrics_filtered_web(typ: Path) -> String { + let allowed_prefixes = typ.split(',').collect::>(); + log::info!("Allowed prefixes: {:?}", allowed_prefixes); + let filter = MetricsFilter { + allowed_prefixes: &allowed_prefixes, + }; + export_metrics_filtered(Some(filter)).await +} + impl MetricsService { pub async fn gsb>(context: &C) -> anyhow::Result<()> { // This should initialize Metrics. We need to do this before all other services will start. @@ -89,35 +100,86 @@ impl MetricsService { pub fn rest>(_ctx: &C) -> actix_web::Scope { actix_web::Scope::new("metrics-api/v1") // TODO:: add wrapper injecting Bearer to avoid hack in auth middleware - .route("/expose", actix_web::web::get().to(export_metrics)) + .route("/expose", actix_web::web::get().to(export_metrics_local)) .route("/sorted", actix_web::web::get().to(export_metrics_sorted)) + .route( + "/filtered/{typ}", + actix_web::web::get().to(export_metrics_filtered_web), + ) + .route( + "/filtered", + actix_web::web::get().to(export_metrics_for_push), + ) } } + +pub(crate) struct MetricsFilter<'a> { + pub allowed_prefixes: &'a [&'a str], +} + //algorith is returning metrics in random order, which is fine for prometheus, but not for human checking metrics -pub fn sort_metrics_txt(metrics: &str) -> String { +pub fn sort_metrics_txt(metrics: &str, filter: Option>) -> String { let Some(first_line_idx) = metrics.find('\n') else { return metrics.to_string(); }; let (first_line, metrics_content) = metrics.split_at(first_line_idx); - let mut entries = metrics_content + let entries = metrics_content .split("\n\n") //splitting by double new line to get separate metrics .map(|s| { let trimmed = s.trim(); let mut lines = trimmed.split('\n').collect::>(); lines.sort(); //sort by properties - lines.join("\n") + (lines.get(1).unwrap_or(&"").to_string(), lines.join("\n")) }) - .collect::>(); - entries.sort(); //sort by metric name + .collect::>(); + + let mut final_entries = if let Some(filter) = filter { + let mut final_entries = Vec::with_capacity(entries.len()); + for entry in entries { + for prefix in filter.allowed_prefixes { + if entry.0.starts_with(prefix) { + log::info!("Adding entry: {}", entry.0); + final_entries.push(entry.1); + break; + } + } + } + final_entries + } else { + entries.into_iter().map(|(_, s)| s).collect() + }; - first_line.to_string() + "\n" + entries.join("\n\n").as_str() + final_entries.sort(); + + first_line.to_string() + "\n" + final_entries.join("\n\n").as_str() + "\n" +} + +pub async fn export_metrics_filtered(metrics_filter: Option>) -> String { + sort_metrics_txt(&METRICS.lock().await.export(), metrics_filter) } async fn export_metrics_sorted() -> String { - sort_metrics_txt(&METRICS.lock().await.export()) + sort_metrics_txt(&METRICS.lock().await.export(), None) +} + +pub async fn export_metrics_for_push() -> String { + //if consent is not set assume we are not allowed to push metrics + let stats_consent = ya_utils_consent::have_consent_cached(ConsentScope::Stats) + .consent + .unwrap_or(false); + let filter = if stats_consent { + log::info!("Pushing all metrics, because stats consent is given"); + None + } else { + // !internal_consent && !external_consent + log::info!("Not pushing metrics, because stats consent is not given"); + return "".to_string(); + }; + + export_metrics_filtered(filter).await } -pub async fn export_metrics() -> String { - METRICS.lock().await.export() +pub async fn export_metrics_local() -> String { + export_metrics_sorted().await } diff --git a/core/model/Cargo.toml b/core/model/Cargo.toml index 4ebd87b184..c506d41192 100644 --- a/core/model/Cargo.toml +++ b/core/model/Cargo.toml @@ -30,7 +30,7 @@ gftp = [] identity = [] market = [] net = [] -payment = ['bigdecimal', 'bitflags'] +payment = ['bigdecimal', 'bitflags', 'anyhow', 'serde_json_canonicalizer', 'sha3'] sgx = ['graphene-sgx'] version = [] @@ -38,6 +38,7 @@ version = [] ya-client-model.workspace = true ya-service-bus = { workspace = true } +anyhow = { version = "1.0", optional = true } bigdecimal = { version = "0.2", features = ["serde"], optional = true } bitflags = { version = "1.2", optional = true } chrono = { version = "0.4", features = ["serde"] } @@ -46,10 +47,11 @@ graphene-sgx = { version = "0.3.3", optional = true } log = "0.4" serde = { version = "1.0", features = ["derive"] } serde_bytes = "0.11.3" +serde_json_canonicalizer = { version = "0.2.0", optional = true } +serde_json = "1.0.113" +sha3 = { version = "0.9", optional = true } structopt = "0.3" strum = { workspace = true } strum_macros = "0.24" thiserror = "1.0.9" -serde_json_canonicalizer = "0.2.0" -serde_json = "1.0.113" diff --git a/core/model/src/driver.rs b/core/model/src/driver.rs index 4afb378adf..d24ff2c23f 100644 --- a/core/model/src/driver.rs +++ b/core/model/src/driver.rs @@ -597,15 +597,15 @@ impl RpcMessage for crate::driver::SignPaymentCanonicalized { pub struct VerifySignature { pub payment: Payment, pub signature: Vec, - pub canonicalized: bool, + pub canonical: Option>, } impl VerifySignature { - pub fn new(payment: Payment, signature: Vec, canonicalized: bool) -> Self { + pub fn new(payment: Payment, signature: Vec, canonical: Option>) -> Self { Self { payment, signature, - canonicalized, + canonical, } } } diff --git a/core/model/src/lib.rs b/core/model/src/lib.rs index 6ab4826f41..fbc236d410 100644 --- a/core/model/src/lib.rs +++ b/core/model/src/lib.rs @@ -21,6 +21,8 @@ pub mod net; #[cfg(feature = "payment")] pub mod payment; +#[cfg(feature = "payment")] +pub mod signable; #[cfg(feature = "gftp")] pub mod gftp; diff --git a/core/model/src/market.rs b/core/model/src/market.rs index 5454ea8df6..a9163a4d01 100644 --- a/core/model/src/market.rs +++ b/core/model/src/market.rs @@ -58,6 +58,17 @@ impl RpcMessage for ListAgreements { type Error = RpcMessageError; } +/// Returns the Agreement. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetLastBcastTs; + +impl RpcMessage for GetLastBcastTs { + const ID: &'static str = "GetLastBcastTs"; + type Item = DateTime; + type Error = RpcMessageError; +} + /// Error message for market service bus API. #[derive(thiserror::Error, Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] diff --git a/core/model/src/net.rs b/core/model/src/net.rs index 027dda857f..e3bf1ff4d6 100644 --- a/core/model/src/net.rs +++ b/core/model/src/net.rs @@ -14,7 +14,7 @@ pub const PUBLIC_PREFIX: &str = "/public"; pub const DIAGNOSTIC: &str = "/public/diagnostic/net"; -/// +/// TODO: fill docs /// pub mod local { use std::net::SocketAddr; diff --git a/core/model/src/payment.rs b/core/model/src/payment.rs index 3cbf8c1b5d..be4f2a21b8 100644 --- a/core/model/src/payment.rs +++ b/core/model/src/payment.rs @@ -469,7 +469,9 @@ pub mod local { } #[derive(Clone, Debug, Serialize, Deserialize)] - pub struct GetDrivers {} + pub struct GetDrivers { + pub ignore_legacy_networks: bool, + } #[derive(Clone, Debug, Serialize, Deserialize, thiserror::Error)] pub enum GetDriversError { @@ -664,6 +666,7 @@ pub mod local { pub mod public { use super::*; + use crate::signable::Signable; use ya_client_model::NodeId; pub const BUS_ID: &str = "/public/payment"; @@ -843,7 +846,10 @@ pub mod public { impl SendPayment { pub fn new(payment: Payment, signature: Vec) -> Self { - Self { payment, signature } + Self { + payment: payment.remove_private_info(), + signature, + } } } @@ -865,9 +871,10 @@ pub mod public { impl SendSignedPayment { pub fn new(payment: Payment, signature: Vec) -> Self { - let signed_bytes = serde_json_canonicalizer::to_vec(&payment).unwrap(); + // Unwrap won't happen, because serialization is always possible. + let signed_bytes = payment.canonicalize().unwrap_or_default(); Self { - payment, + payment: payment.remove_private_info(), signature, signed_bytes, } diff --git a/core/model/src/signable.rs b/core/model/src/signable.rs new file mode 100644 index 0000000000..eda92b52d0 --- /dev/null +++ b/core/model/src/signable.rs @@ -0,0 +1,72 @@ +use anyhow::anyhow; +use serde::de::DeserializeOwned; +use serde::Serialize; +use serde_json; +use sha3::{Digest, Sha3_256}; + +use ya_client_model::payment::Payment; + +/// Trait for objects that can be signed ensuring unified way to +/// convert structs to bytes, so signatures can be verified across multiple machines. +pub trait Signable: Serialize + DeserializeOwned + Clone + PartialEq { + /// Serialize structure to vector of bytes in canonical representation. + /// This representation should be binary equal on all machines. + fn canonicalize(&self) -> anyhow::Result> { + let shareable = self.clone().remove_private_info(); + Ok(serde_json_canonicalizer::to_vec(&shareable)?) + } + + /// Function should remove all information that shouldn't be sent to other Nodes. + /// Example: `allocation_id` in `Payment` structure is private information on Requestor + /// side and shouldn't be shared with Provider. + /// This step is necessary to create canonical version that can be signed and later validated + /// by other party. + fn remove_private_info(self) -> Self; + + /// Hash canonical representation of the structure. + /// In most cases we don't want to sign arrays of arbitrary length, so we use hash + /// of canonical representation instead. + fn hash_canonical(&self) -> anyhow::Result> { + Ok(prepare_signature_hash(&self.canonicalize()?)) + } + + /// Verify if `canonical` representation is equivalent to `self`. + /// Since we always get structure and bytes with its canonical representation, + /// then verifying signature is not enough. We need to check if `canonical` was + /// created from structure itself. + fn verify_canonical(&self, canonical: &[u8]) -> anyhow::Result<()> { + let from_canonical = serde_json::from_slice::(canonical) + .map_err(|e| anyhow!("Failed to deserialize canonical representation: {e}"))? + .remove_private_info(); + let reference = self.clone().remove_private_info(); + + if reference != from_canonical { + return Err(anyhow!( + "Canonical representation doesn't match the structure" + )); + } + Ok(()) + } +} + +pub fn prepare_signature_hash(bytes: &[u8]) -> Vec { + let mut hasher = Sha3_256::new(); + hasher.update(bytes); + hasher.finalize().to_vec() +} + +impl Signable for Payment { + fn remove_private_info(mut self) -> Self { + // We remove allocation ID from syncs because allocations are not transferred to peers and + // their IDs would be unknown to the recipient. + for agreement_payment in &mut self.agreement_payments.iter_mut() { + agreement_payment.allocation_id = None; + } + + for activity_payment in &mut self.activity_payments.iter_mut() { + activity_payment.allocation_id = None; + } + + self + } +} diff --git a/core/net/Cargo.toml b/core/net/Cargo.toml index 473bd8fadb..dce7433912 100644 --- a/core/net/Cargo.toml +++ b/core/net/Cargo.toml @@ -19,19 +19,17 @@ ya-client-model.workspace = true ya-core-model = { workspace = true, features = ["net", "identity"] } ya-relay-client = { workspace = true } -#ya-relay-client = "0.6" -#ya-relay-client = { path = "../../../ya-relay/client" } ya-sb-proto = { workspace = true } ya-sb-util = { workspace = true } -ya-service-api = "0.1" -ya-service-api-interfaces = "0.2" +ya-service-api.workspace = true +ya-service-api-interfaces.workspace = true ya-service-bus = { workspace = true, features = ["tls"] } -ya-utils-networking = "0.2" +ya-utils-networking.workspace = true ya-packet-trace = { git = "https://github.com/golemfactory/ya-packet-trace" } -actix = "0.13" -actix-web = "4" +actix.workspace = true +actix-web.workspace = true anyhow = "1.0" chrono = "0.4" futures = "0.3" diff --git a/core/net/src/hybrid/service.rs b/core/net/src/hybrid/service.rs index 93316abbf8..be99af34ab 100644 --- a/core/net/src/hybrid/service.rs +++ b/core/net/src/hybrid/service.rs @@ -46,8 +46,6 @@ use crate::hybrid::crypto::IdentityCryptoProvider; use crate::service::NET_TYPE; use crate::{broadcast, NetType}; -const DEFAULT_NET_RELAY_HOST: &str = "127.0.0.1:7464"; - type BusSender = mpsc::Sender; type BusReceiver = mpsc::Receiver; type NetSender = mpsc::Sender; @@ -232,7 +230,7 @@ async fn build_client( config: Arc, crypto: impl CryptoProvider + 'static, ) -> anyhow::Result { - let addr = relay_addr(&config) + let addr = resolve_relay_addr(&config) .await .map_err(|e| anyhow!("Resolving hybrid NET relay server failed. Error: {}", e))?; let url = Url::parse(&format!("udp://{addr}"))?; @@ -247,13 +245,53 @@ async fn build_client( .await } -async fn relay_addr(config: &Config) -> anyhow::Result { +struct RetryArgs { + max_retries: u64, + start_retry_timeout: u64, + add_seconds_every_retry: u64, +} +async fn resolve_srv_record_with_retries(prefix: &str, args: RetryArgs) -> anyhow::Result { + let mut retries = 0; + let mut timeout_s = args.start_retry_timeout; + log::info!("Resolving {prefix} SRV record..."); + loop { + match resolver::resolve_yagna_srv_record(prefix).await { + Ok(addr) => { + log::info!("SRV record {prefix} resolved to: {addr}"); + break Ok(addr); + } + Err(err) => { + if retries >= args.max_retries { + return Err(anyhow!( + "Failed to resolve {prefix} SRV record: {err} after {retries} retries" + )); + } + log::warn!( + "Failed to resolve {prefix} SRV record: {err}. Trying again in {timeout_s} seconds", + ); + tokio::time::sleep(std::time::Duration::from_secs(timeout_s)).await; + retries += 1; + timeout_s += args.add_seconds_every_retry; + log::info!("Retrying ({retries}) to resolve {prefix} SRV record..."); + } + } + } +} + +async fn resolve_relay_addr(config: &Config) -> anyhow::Result { let host_port = match &config.host { Some(val) => val.to_string(), - None => resolver::resolve_yagna_srv_record("_net_relay._udp") - .await - // FIXME: remove - .unwrap_or_else(|_| DEFAULT_NET_RELAY_HOST.to_string()), + None => { + resolve_srv_record_with_retries( + "_net_relay._udp", + RetryArgs { + max_retries: 5, + start_retry_timeout: 10, + add_seconds_every_retry: 5, + }, + ) + .await? + } }; log::info!("Hybrid NET relay server configured on url: udp://{host_port}"); diff --git a/core/payment-driver/base/Cargo.toml b/core/payment-driver/base/Cargo.toml index bce3a305ae..4fa076f543 100644 --- a/core/payment-driver/base/Cargo.toml +++ b/core/payment-driver/base/Cargo.toml @@ -8,7 +8,7 @@ edition = "2018" default = [] [dependencies] -actix = { version = "0.13", default-features = false } +actix = { workspace = true, default-features = false } anyhow = "1.0" async-trait = "0.1" bigdecimal = { version = "0.2" } @@ -25,6 +25,9 @@ num-traits = "0.2" num-derive = "0.3" r2d2 = "0.8" sha3 = "0.10" +serde = "1.0" +serde_json = "1.0" +serde_json_canonicalizer = "0.2.0" thiserror = "1.0" tokio = { version = "1", features = ["macros"] } @@ -35,8 +38,8 @@ ya-core-model = { workspace = true, features = [ "identity", "payment", ] } -ya-persistence = "0.3" +ya-persistence.workspace = true ya-service-bus = { workspace = true } -serde_json_canonicalizer = "0.2.0" + [dev-dependencies] diff --git a/core/payment-driver/base/src/bus.rs b/core/payment-driver/base/src/bus.rs index cbb17bcd7d..4df5e9d80e 100644 --- a/core/payment-driver/base/src/bus.rs +++ b/core/payment-driver/base/src/bus.rs @@ -75,7 +75,7 @@ pub async fn bind_service( move |_, dr, c, m| async move { dr.sign_payment( c, m).await } ) .bind_with_processor( - move |_, dr, c, m| async move { dr.sign_payment_canonicalized(c, m).await } + move |_, dr, c, m| async move { dr.sign_payment_canonical(c, m).await } ) .bind_with_processor( move |_, dr, c, m| async move { dr.verify_signature( c, m).await } diff --git a/core/payment-driver/base/src/driver.rs b/core/payment-driver/base/src/driver.rs index cd60c88ea6..40d5ff168e 100644 --- a/core/payment-driver/base/src/driver.rs +++ b/core/payment-driver/base/src/driver.rs @@ -22,6 +22,7 @@ pub use ya_client_model::payment::network::Network; pub use ya_client_model::NodeId; pub use ya_core_model::identity::event::IdentityEvent; pub use ya_core_model::identity::Error as IdentityError; +use ya_core_model::signable::{prepare_signature_hash, Signable}; #[async_trait(?Send)] pub trait PaymentDriver { @@ -63,57 +64,52 @@ pub trait PaymentDriver { async fn schedule_payment( &self, - caller: String, msg: SchedulePayment, ) -> Result; async fn verify_payment( &self, - caller: String, msg: VerifyPayment, ) -> Result; async fn validate_allocation( &self, - caller: String, msg: ValidateAllocation, ) -> Result; async fn release_deposit( &self, - caller: String, msg: DriverReleaseDeposit, ) -> Result<(), GenericError>; async fn sign_payment( &self, - _caller: String, msg: SignPayment, ) -> Result, GenericError> { - let payload = utils::payment_hash(&msg.0); - let node_id = msg.0.payer_id; + let payment = msg.0.remove_private_info(); + let payload = utils::payment_hash(&payment); + let node_id = payment.payer_id; bus::sign(node_id, payload).await } - async fn sign_payment_canonicalized( + async fn sign_payment_canonical( &self, - _caller: String, msg: SignPaymentCanonicalized, ) -> Result, GenericError> { - let payload = utils::payment_hash_canonicalized(&msg.0); - let node_id = msg.0.payer_id; + let payment = msg.0; + let payload = payment.hash_canonical().map_err(GenericError::new)?; + let node_id = payment.payer_id; bus::sign(node_id, payload).await } async fn verify_signature( &self, - _caller: String, msg: VerifySignature, ) -> Result { @@ -125,9 +121,19 @@ pub trait PaymentDriver { let s: [u8; 32] = msg.signature[33..65].try_into().unwrap(); let signature = Signature { v, r, s }; - let payload = if msg.canonicalized { - utils::payment_hash_canonicalized(&msg.payment) + let payload = if let Some(payload) = msg.canonical { + match msg.payment.verify_canonical(payload.as_slice()) { + Ok(_) => prepare_signature_hash(&payload), + Err(e) => { + log::info!( + "Signature verification: canonical representation doesn't match struct: {e}" + ); + return Ok(false); + } + } } else { + // Backward compatibility version for older Nodes that don't send canonical + // signed bytes and used Payment debug formatting as representation. utils::payment_hash(&msg.payment) }; let pub_key = match signature.recover(payload.as_slice()) { @@ -140,7 +146,6 @@ pub trait PaymentDriver { async fn status( &self, - _caller: String, _msg: DriverStatus, ) -> Result, DriverStatusError>; diff --git a/core/payment-driver/base/src/utils.rs b/core/payment-driver/base/src/utils.rs index abfa2e5519..7141853e39 100644 --- a/core/payment-driver/base/src/utils.rs +++ b/core/payment-driver/base/src/utils.rs @@ -7,12 +7,13 @@ use bigdecimal::BigDecimal; use chrono::{DateTime, Utc}; use ethereum_types::U256; use num_bigint::ToBigInt; -use sha3::{Digest, Sha3_256}; // Local uses use crate::db::models::PaymentEntity; use crate::model::{PaymentDetails, SchedulePayment}; + use ya_client_model::payment::Payment; +use ya_core_model::signable::prepare_signature_hash; const PRECISION: u64 = 1_000_000_000_000_000_000; @@ -67,13 +68,5 @@ pub fn u256_to_big_dec(v: U256) -> BigDecimal { } pub fn payment_hash(payment: &Payment) -> Vec { - let mut hasher = Sha3_256::new(); - hasher.update(format!("{:?}", payment).as_bytes()); - hasher.finalize().to_vec() -} - -pub fn payment_hash_canonicalized(payment: &Payment) -> Vec { - let mut hasher = Sha3_256::new(); - hasher.update(serde_json_canonicalizer::to_vec(&payment).unwrap()); - hasher.finalize().to_vec() + prepare_signature_hash(format!("{:?}", payment).as_bytes()) } diff --git a/core/payment-driver/dummy/Cargo.toml b/core/payment-driver/dummy/Cargo.toml index 3633a76df9..ab3b430e57 100644 --- a/core/payment-driver/dummy/Cargo.toml +++ b/core/payment-driver/dummy/Cargo.toml @@ -14,9 +14,9 @@ ya-core-model = { workspace = true, features = [ "payment", ] } ya-client-model.workspace = true -ya-payment-driver = "0.3" -ya-persistence = "0.3" -ya-service-api-interfaces = "0.2" +ya-payment-driver.workspace = true +ya-persistence.workspace = true +ya-service-api-interfaces.workspace = true ya-service-bus = { workspace = true } anyhow = "1.0" diff --git a/core/payment-driver/dummy/src/service.rs b/core/payment-driver/dummy/src/service.rs index f01c0a09f3..694b50b3c2 100644 --- a/core/payment-driver/dummy/src/service.rs +++ b/core/payment-driver/dummy/src/service.rs @@ -5,6 +5,7 @@ use uuid::Uuid; use ya_client_model::payment::{DriverDetails, Network}; use ya_core_model::driver::*; use ya_core_model::payment::local as payment_srv; +use ya_core_model::signable::Signable; use ya_service_bus::typed::service; use ya_service_bus::{typed as bus, RpcEndpoint}; @@ -146,7 +147,9 @@ async fn fund(_db: (), _caller: String, _msg: Fund) -> Result Result, GenericError> { - Ok(ya_payment_driver::utils::payment_hash(&msg.0)) + Ok(ya_payment_driver::utils::payment_hash( + &msg.0.remove_private_info(), + )) } async fn verify_signature( diff --git a/core/payment-driver/erc20/Cargo.toml b/core/payment-driver/erc20/Cargo.toml index 15440bca45..57863c52f3 100644 --- a/core/payment-driver/erc20/Cargo.toml +++ b/core/payment-driver/erc20/Cargo.toml @@ -42,13 +42,13 @@ web3 = { version = "0.19.0", default-features = false, features = [ rust_decimal = "1" ## yagna dependencies -ya-payment-driver = "0.3" -ya-core-model = { workspace = true } +ya-payment-driver.workspace = true +ya-core-model.workspace = true ya-client-model.workspace = true -ya-service-api-interfaces = "0.2" +ya-service-api-interfaces.workspace = true ya-utils-futures.workspace = true -ya-utils-networking = "0.2" -erc20_payment_lib = { workspace = true } +ya-utils-networking.workspace = true +erc20_payment_lib.workspace = true [dev-dependencies] actix-rt = "2.7" diff --git a/core/payment-driver/erc20/config-payments.toml b/core/payment-driver/erc20/config-payments.toml index 4a30be038f..b16cc47d2d 100644 --- a/core/payment-driver/erc20/config-payments.toml +++ b/core/payment-driver/erc20/config-payments.toml @@ -112,7 +112,7 @@ dns-source = "holesky.rpc-node.dev.golem.network." [chain.mumbai] chain-name = "Mumbai testnet" chain-id = 80001 -currency-symbol = "tMATIC" +currency-symbol = "POL" priority-fee = 1.0 max-fee-per-gas = 14.0 transaction-timeout = 60 @@ -144,7 +144,7 @@ dns-source = "mumbai.rpc-node.dev.golem.network." [chain.polygon] chain-name = "Polygon mainnet" chain-id = 137 -currency-symbol = "MATIC" +currency-symbol = "POL" priority-fee = 30.111 max-fee-per-gas = 500.0 transaction-timeout = 100 @@ -165,7 +165,7 @@ dns-source = "polygon.rpc-node.dev.golem.network." [chain.amoy] chain-name = "Amoy testnet" chain-id = 80002 -currency-symbol = "tMATIC" +currency-symbol = "POL" priority-fee = 30.111 max-fee-per-gas = 500.0 transaction-timeout = 100 diff --git a/core/payment-driver/erc20/src/erc20/wallet.rs b/core/payment-driver/erc20/src/erc20/wallet.rs index 5f15af6517..72c35c0317 100644 --- a/core/payment-driver/erc20/src/erc20/wallet.rs +++ b/core/payment-driver/erc20/src/erc20/wallet.rs @@ -339,12 +339,11 @@ pub async fn verify_tx(tx_hash: &str, network: Network) -> Result anyhow::Result<()> { ya_payment::service::bind_service( &db, processor, - BindOptions::default().run_sync_job(false), - Arc::new(PaymentConfig::from_env()?), + Arc::new(PaymentConfig::from_env()?.run_sync_job(false)), ); log::debug!("bind_service()"); diff --git a/core/payment/src/api/invoices.rs b/core/payment/src/api/invoices.rs index af7755133f..81f8df64f8 100644 --- a/core/payment/src/api/invoices.rs +++ b/core/payment/src/api/invoices.rs @@ -528,11 +528,11 @@ async fn accept_invoice( .await; if let Ok(response) = send_result { - log::debug!("AcceptInvoice delivered"); + log::debug!("AcceptInvoice delivered for [{invoice_id}]"); dao.mark_accept_sent(invoice_id.clone(), node_id).await?; response?; } else { - log::debug!("AcceptInvoice not delivered"); + log::debug!("AcceptInvoice not delivered for [{invoice_id}]"); sync_dao.upsert(issuer_id).await?; SYNC_NOTIFS_NOTIFY.notify_one(); } diff --git a/core/payment/src/cli.rs b/core/payment/src/cli.rs index cae16ce052..70a0984bd5 100644 --- a/core/payment/src/cli.rs +++ b/core/payment/src/cli.rs @@ -1,13 +1,14 @@ mod rpc; +use std::collections::HashMap; // External crates use bigdecimal::BigDecimal; use chrono::{DateTime, Utc}; -use serde_json::to_value; +use serde_json::{to_value, Value}; use std::str::FromStr; use std::time::UNIX_EPOCH; use structopt::*; -use ya_client_model::payment::DriverStatusProperty; +use ya_client_model::payment::{DriverDetails, DriverStatusProperty}; use ya_core_model::payment::local::NetworkName; // Workspace uses @@ -553,41 +554,56 @@ Typically operation should take less than 1 minute. ))) } DriverSubcommand::List => { - let drivers = bus::service(pay::BUS_ID).call(pay::GetDrivers {}).await??; + let drivers: HashMap = bus::service(pay::BUS_ID) + .call(pay::GetDrivers { + ignore_legacy_networks: false, + }) + .await??; if ctx.json_output { return CommandOutput::object(drivers); } - Ok(ResponseTable { - columns: vec![ - "driver".to_owned(), - "network".to_owned(), - "default?".to_owned(), - "token".to_owned(), - "platform".to_owned(), - ], - values: drivers - .iter() - .flat_map(|(driver, dd)| { - dd.networks - .iter() - .flat_map(|(network, n)| { - n.tokens - .iter() - .map(|(token, platform)| - serde_json::json! {[ + + let mut values: Vec = drivers + .iter() + .flat_map(|(driver, dd)| { + dd.networks + .iter() + .flat_map(|(network, n)| { + n.tokens + .iter() + .map(|(token, platform)| + serde_json::json! {[ driver, network, if &dd.default_network == network { "X" } else { "" }, token, platform, ]} - ) - .collect::>() - }) - .collect::>() - }) - .collect(), - }.into()) + ) + .collect::>() + }) + .collect::>() + }) + .collect(); + + values.sort_by(|a, b| { + //sort by index 4 (which means platform, be careful when changing these values) + let left_str = a.as_array().unwrap()[4].as_str().unwrap(); + let right_str = b.as_array().unwrap()[4].as_str().unwrap(); + left_str.cmp(right_str) + }); + + Ok(ResponseTable { + columns: vec![ + "driver".to_owned(), //index 0 + "network".to_owned(), //index 1 + "default?".to_owned(), //index 2 + "token".to_owned(), //index 3 + "platform".to_owned(), //index 4 - we are sorting by platform, be careful when changing these values + ], + values, + } + .into()) } }, PaymentCli::ReleaseAllocations => { diff --git a/core/payment/src/config.rs b/core/payment/src/config.rs index 9e8fab3574..afdfa38470 100644 --- a/core/payment/src/config.rs +++ b/core/payment/src/config.rs @@ -8,6 +8,13 @@ pub struct Config { #[derive(StructOpt, Clone)] pub struct SyncNotifBackoffConfig { + /// Enables background job for synchronizing invoice / debit note document status. + /// + /// This depends on the identity service being enabled to work. If you're working with a limited + /// subsets of services (e.g. in payment_api.rs example) you might wish to disable that. + #[structopt(parse(try_from_str), default_value = "true")] + pub run_sync_job: bool, + #[structopt(long, env = "YA_PAYMENT_SYNC_NOTIF_BACKOFF_INITIAL_DELAY", parse(try_from_str = humantime::parse_duration), default_value = "30s")] pub initial_delay: std::time::Duration, @@ -38,4 +45,9 @@ impl Config { // or default values if ENV variables are not set. Config::from_iter_safe(&[""]) } + + pub fn run_sync_job(mut self, value: bool) -> Self { + self.sync_notif_backoff.run_sync_job = value; + self + } } diff --git a/core/payment/src/dao/invoice.rs b/core/payment/src/dao/invoice.rs index a02750ba7c..adc2be6225 100644 --- a/core/payment/src/dao/invoice.rs +++ b/core/payment/src/dao/invoice.rs @@ -373,7 +373,10 @@ impl<'c> InvoiceDao<'c> { let invoices: Vec = query!() .filter(dsl::owner_id.eq(owner_id)) .filter(dsl::send_accept.eq(true)) - .filter(dsl::status.eq(DocumentStatus::Accepted.to_string())) + .filter(dsl::status.eq_any([ + DocumentStatus::Accepted.to_string(), + DocumentStatus::Settled.to_string(), + ])) .filter(agreement_dsl::peer_id.eq(peer_id)) .load(conn)?; diff --git a/core/payment/src/dao/payment.rs b/core/payment/src/dao/payment.rs index 9c336d1d40..7fffc9c786 100644 --- a/core/payment/src/dao/payment.rs +++ b/core/payment/src/dao/payment.rs @@ -351,10 +351,15 @@ impl<'c> PaymentDao<'c> { .await } - pub async fn list_unsent(&self, peer_id: Option) -> DbResult> { + pub async fn list_unsent( + &self, + owner: NodeId, + peer_id: Option, + ) -> DbResult> { readonly_transaction(self.pool, "payment_dao_list_unsent", move |conn| { let mut query = dsl::pay_payment .filter(dsl::send_payment.eq(true)) + .filter(dsl::owner_id.eq(&owner)) .into_boxed(); if let Some(peer_id) = peer_id { query = query.filter(dsl::peer_id.eq(&peer_id)); diff --git a/core/payment/src/dao/sync_notifs.rs b/core/payment/src/dao/sync_notifs.rs index eb356045ba..5a8805e94d 100644 --- a/core/payment/src/dao/sync_notifs.rs +++ b/core/payment/src/dao/sync_notifs.rs @@ -1,6 +1,7 @@ use crate::error::DbResult; use crate::models::sync_notifs::{ReadObj, WriteObj}; use crate::schema::pay_sync_needed_notifs::dsl; + use chrono::NaiveDateTime; use diesel::{self, QueryDsl, RunQueryDsl}; diff --git a/core/payment/src/lib.rs b/core/payment/src/lib.rs index 41b3d1e39d..1e28cd8811 100644 --- a/core/payment/src/lib.rs +++ b/core/payment/src/lib.rs @@ -2,9 +2,10 @@ #![allow(unused_variables)] // Crate under development pub use crate::config::Config; use crate::processor::PaymentProcessor; + use futures::FutureExt; -use service::BindOptions; use std::{sync::Arc, time::Duration}; + use ya_core_model::payment::local as pay_local; use ya_persistence::executor::DbExecutor; use ya_service_api_interfaces::*; @@ -58,7 +59,7 @@ impl PaymentService { let config = Arc::new(Config::from_env()?); let processor = Arc::new(PaymentProcessor::new(db.clone())); - self::service::bind_service(&db, processor.clone(), BindOptions::default(), config); + self::service::bind_service(&db, processor.clone(), config); tokio::task::spawn(async move { processor.release_allocations(false).await; diff --git a/core/payment/src/payment_sync.rs b/core/payment/src/payment_sync.rs index 6e5a8f1357..a1831ca7ef 100644 --- a/core/payment/src/payment_sync.rs +++ b/core/payment/src/payment_sync.rs @@ -1,16 +1,14 @@ +use chrono::{DateTime, Utc}; use std::sync::Arc; use std::{collections::HashSet, time::Duration}; - -use crate::utils::remove_allocation_ids_from_payment; -use crate::Config; - -use chrono::{DateTime, Utc}; use tokio::sync::Notify; + use ya_client_model::{ payment::{Acceptance, InvoiceEventType}, NodeId, }; use ya_core_model::driver::SignPaymentCanonicalized; +use ya_core_model::signable::Signable; use ya_core_model::{ driver::{driver_bus_id, SignPayment}, identity::{self, IdentityInfo}, @@ -28,12 +26,13 @@ use ya_persistence::executor::DbExecutor; use ya_service_bus::{timeout::IntoTimeoutFuture, typed, RpcEndpoint}; use crate::dao::{DebitNoteDao, InvoiceDao, InvoiceEventDao, PaymentDao, SyncNotifsDao}; +use crate::Config; const REMOTE_CALL_TIMEOUT: Duration = Duration::from_secs(30); async fn payment_sync( db: &DbExecutor, - current: NodeId, + owner: NodeId, peer_id: NodeId, ) -> anyhow::Result<(PaymentSync, PaymentSyncWithBytes)> { let payment_dao: PaymentDao = db.as_dao(); @@ -43,26 +42,26 @@ async fn payment_sync( let mut payments = Vec::default(); let mut payments_canonicalized = Vec::default(); - for payment in payment_dao.list_unsent(Some(peer_id)).await? { + for payment in payment_dao.list_unsent(owner, Some(peer_id)).await? { let platform_components = payment.payment_platform.split('-').collect::>(); let driver = &platform_components[0]; let bus_id = driver_bus_id(driver); - let payment = remove_allocation_ids_from_payment(payment); + let payment = payment.remove_private_info(); let signature = typed::service(bus_id.clone()) .send(SignPayment(payment.clone())) .await??; payments.push(SendPayment::new(payment.clone(), signature)); - let signature_canonicalized = typed::service(bus_id.clone()) + let signature_canonical = typed::service(bus_id.clone()) .send(SignPaymentCanonicalized(payment.clone())) .await??; - payments_canonicalized.push(SendSignedPayment::new(payment, signature_canonicalized)); + payments_canonicalized.push(SendSignedPayment::new(payment.clone(), signature_canonical)); } let mut invoice_accepts = Vec::default(); - for invoice in invoice_dao.unsent_accepted(current, peer_id).await? { + for invoice in invoice_dao.unsent_accepted(owner, peer_id).await? { invoice_accepts.push(AcceptInvoice::new( invoice.invoice_id, Acceptance { @@ -74,7 +73,7 @@ async fn payment_sync( } let mut invoice_rejects = Vec::default(); - for invoice in invoice_dao.unsent_rejected(current, peer_id).await? { + for invoice in invoice_dao.unsent_rejected(owner, peer_id).await? { let events = invoice_event_dao .get_for_invoice_id( invoice.invoice_id.clone(), @@ -98,7 +97,7 @@ async fn payment_sync( } let mut debit_note_accepts = Vec::default(); - for debit_note in debit_note_dao.unsent_accepted(current, peer_id).await? { + for debit_note in debit_note_dao.unsent_accepted(owner, peer_id).await? { debit_note_accepts.push(AcceptDebitNote::new( debit_note.debit_note_id, Acceptance { @@ -131,24 +130,44 @@ async fn mark_all_sent(db: &DbExecutor, owner_id: NodeId, msg: PaymentSync) -> a let debit_note_dao: DebitNoteDao = db.as_dao(); for payment_send in msg.payments { + log::info!( + "Delivered Payment confirmation [{}] to [{}]", + payment_send.payment.payment_id, + payment_send.payment.payee_id + ); payment_dao .mark_sent(payment_send.payment.payment_id) .await?; } for invoice_accept in msg.invoice_accepts { + log::info!( + "Delivered Invoice [{}] acceptance to [{}]", + invoice_accept.invoice_id, + invoice_accept.issuer_id + ); invoice_dao .mark_accept_sent(invoice_accept.invoice_id, owner_id) .await?; } for invoice_reject in msg.invoice_rejects { + log::info!( + "Delivered Invoice [{}] rejection to [{}]", + invoice_reject.invoice_id, + invoice_reject.issuer_id + ); invoice_dao .mark_reject_sent(invoice_reject.invoice_id, owner_id) .await?; } for debit_note_accept in msg.debit_note_accepts { + log::info!( + "Delivered DebitNote [{}] acceptance to [{}]", + debit_note_accept.debit_note_id, + debit_note_accept.issuer_id + ); debit_note_dao .mark_accept_sent(debit_note_accept.debit_note_id, owner_id) .await?; @@ -157,55 +176,46 @@ async fn mark_all_sent(db: &DbExecutor, owner_id: NodeId, msg: PaymentSync) -> a Ok(()) } -async fn send_sync_notifs_for_identity( - identity: NodeId, +async fn send_sync_notifs_for_peer( + peer: NodeId, db: &DbExecutor, config: &Config, cutoff: &DateTime, -) -> anyhow::Result> { +) -> anyhow::Result<()> { + log::debug!("Processing PaymentSync for peer [{peer}]."); + let dao: SyncNotifsDao = db.as_dao(); - let backoff_config = &config.sync_notif_backoff; + let mut all_delivered = true; + let identities = typed::service(identity::BUS_ID) + .call(ya_core_model::identity::List {}) + .await??; - let exp_backoff = |n| { - let secs = backoff_config.initial_delay * backoff_config.exponent.powi(n) as u32; - let capped: Duration = if let Some(cap) = backoff_config.cap { - ::std::cmp::min(cap, secs) - } else { - secs - }; - capped - }; - let peers_to_notify = dao - .list() - .await? - .into_iter() - .filter(|entry| { - let next_deadline = entry.last_ping + exp_backoff(entry.retries as _); - next_deadline.and_utc() <= *cutoff && entry.retries <= backoff_config.max_retries as i32 - }) - .map(|entry| entry.id) - .collect::>(); + for identity in identities { + let owner = identity.node_id; + if identity.is_locked { + log::info!("Skipping PaymentSync for [{owner}] since identity is locked."); + continue; + } - for peer in peers_to_notify { - let (msg, msg_with_bytes) = payment_sync(db, identity, peer).await?; + let (msg, msg_with_bytes) = payment_sync(db, owner, peer).await?; - log::debug!("Sending PaymentSync as [{identity}] to [{peer}]."); - let mut result = ya_net::from(identity) + log::debug!("Sending PaymentSync as [{owner}] to [{peer}]."); + let mut result = ya_net::from(owner) .to(peer) .service(ya_core_model::payment::public::BUS_ID) .call(msg_with_bytes.clone()) .await; - log::debug!("Sending PaymentSync as [{identity}] to [{peer}] result: {result:?}"); + log::debug!("Sending PaymentSync as [{owner}] to [{peer}] result: {result:?}"); // Centralnet and hybridnet return different errors when the endpoint is not supported, so // we have to resort to checking error message. // This message will be sent even if the node can handle PaymentSyncWithBytes but is not - // connected at all, but there is no standard way to differenciate between these cases. + // connected at all, but there is no standard way to differentiate between these cases. if matches!(&result, Err(e) if e.to_string().contains("endpoint address not found")) { - log::debug!("Sending PaymentSync as [{identity}] to [{peer}]: PaymentSyncWithBytes endpoint not found, falling back to PaymentSync."); - result = ya_net::from(identity) + log::debug!("Sending PaymentSync as [{owner}] to [{peer}]: PaymentSyncWithBytes endpoint not found, falling back to PaymentSync."); + result = ya_net::from(owner) .to(peer) .service(ya_core_model::payment::public::BUS_ID) .call(msg.clone()) @@ -213,20 +223,57 @@ async fn send_sync_notifs_for_identity( } if matches!(&result, Ok(Ok(_))) { - log::debug!("Delivered PaymentSync to [{peer}] as [{identity}]."); - mark_all_sent(db, identity, msg).await?; - dao.drop(peer).await?; + log::debug!("Delivered PaymentSync to [{peer}] as [{owner}]."); + mark_all_sent(db, owner, msg).await?; } else { + all_delivered = false; let err = match result { Err(x) => x.to_string(), Ok(Err(x)) => x.to_string(), Ok(Ok(_)) => unreachable!(), }; - log::debug!("Couldn't deliver PaymentSync to [{peer}] as [{identity}]: {err}"); + log::debug!("Couldn't deliver PaymentSync to [{peer}] as [{owner}]: {err}"); dao.increment_retry(peer, cutoff.naive_utc()).await?; } } + if all_delivered { + dao.drop(peer).await?; + } + + Ok(()) +} + +async fn send_sync_notifs(db: &DbExecutor, config: &Config) -> anyhow::Result> { + let cutoff = Utc::now(); + let dao: SyncNotifsDao = db.as_dao(); + + let backoff_config = &config.sync_notif_backoff; + + let exp_backoff = |n| { + let secs = backoff_config.initial_delay * backoff_config.exponent.powi(n) as u32; + let capped: Duration = if let Some(cap) = backoff_config.cap { + ::std::cmp::min(cap, secs) + } else { + secs + }; + capped + }; + let peers_to_notify = dao + .list() + .await? + .into_iter() + .filter(|entry| { + let next_deadline = entry.last_ping + exp_backoff(entry.retries as _); + next_deadline.and_utc() <= cutoff && entry.retries <= backoff_config.max_retries as i32 + }) + .map(|entry| entry.id) + .collect::>(); + + for peer in peers_to_notify { + send_sync_notifs_for_peer(peer, db, config, &cutoff).await?; + } + // Next sleep duration is calculated after all events were updated to pick up entries // that were not delivered in current run. let next_sleep_duration = dao @@ -237,7 +284,7 @@ async fn send_sync_notifs_for_identity( let next_deadline = entry.last_ping + exp_backoff(entry.retries as _); next_deadline.and_utc() }) - .filter(|deadline| deadline > cutoff) + .filter(|deadline| deadline > &cutoff) .min() .map(|ts| ts - cutoff) .and_then(|dur| dur.to_std().ok()); @@ -245,35 +292,6 @@ async fn send_sync_notifs_for_identity( Ok(next_sleep_duration) } -async fn send_sync_notifs(db: &DbExecutor, config: &Config) -> anyhow::Result> { - let cutoff = Utc::now(); - - let identities = typed::service(identity::BUS_ID) - .call(ya_core_model::identity::List {}) - .await??; - - let mut next_sleep_duration: Option = None; - for identity in identities { - if identity.is_locked { - continue; - } - let sleep_duration = - send_sync_notifs_for_identity(identity.node_id, db, config, &cutoff).await?; - next_sleep_duration = match sleep_duration { - None => next_sleep_duration, - Some(duration) => { - let result_duration = match next_sleep_duration { - None => duration, - Some(last_duration) => ::std::cmp::min(duration, last_duration), - }; - Some(result_duration) - } - }; - } - - Ok(next_sleep_duration) -} - lazy_static::lazy_static! { pub static ref SYNC_NOTIFS_NOTIFY: Notify = Notify::new(); } diff --git a/core/payment/src/processor.rs b/core/payment/src/processor.rs index e7d3f25bae..ef9c03798d 100644 --- a/core/payment/src/processor.rs +++ b/core/payment/src/processor.rs @@ -9,7 +9,6 @@ use crate::error::processor::{ use crate::models::order::ReadObj as DbOrder; use crate::payment_sync::SYNC_NOTIFS_NOTIFY; use crate::timeout_lock::{RwLockTimeoutExt, TimedMutex}; -use crate::utils::remove_allocation_ids_from_payment; use actix_web::web::Data; use bigdecimal::{BigDecimal, Zero}; @@ -23,7 +22,6 @@ use std::sync::Arc; use std::time::Duration; use thiserror::Error; use tokio::sync::RwLock; - use ya_client_model::payment::allocation::Deposit; use ya_client_model::payment::{ Account, ActivityPayment, AgreementPayment, DriverDetails, Network, Payment, @@ -231,8 +229,18 @@ impl DriverRegistry { } } - pub fn get_drivers(&self) -> HashMap { - self.drivers.clone() + pub fn get_drivers(&self, ignore_legacy_networks: bool) -> HashMap { + let mut drivers = self.drivers.clone(); + + let legacy_networks = ["mumbai", "goerli", "rinkeby"]; + if ignore_legacy_networks { + drivers.values_mut().for_each(|driver| { + driver + .networks + .retain(|name, _| !legacy_networks.contains(&name.as_str())) + }) + } + drivers } pub fn get_network( @@ -382,11 +390,14 @@ impl PaymentProcessor { .map_err(|_| GetAccountsError::InternalTimeout) } - pub async fn get_drivers(&self) -> Result, GetDriversError> { + pub async fn get_drivers( + &self, + ignore_legacy_networks: bool, + ) -> Result, GetDriversError> { self.registry .timeout_read(REGISTRY_LOCK_TIMEOUT) .await - .map(|registry| registry.get_drivers()) + .map(|registry| registry.get_drivers(ignore_legacy_networks)) .map_err(|_| GetDriversError::InternalTimeout) } @@ -428,9 +439,8 @@ impl PaymentProcessor { let payer_id: NodeId; let payee_id: NodeId; let payment_id: String; - let mut payment: Payment; - { + let payment: Payment = { let db_executor = self .db_executor .timeout_lock(DB_LOCK_TIMEOUT, "notify payment 1") @@ -493,13 +503,10 @@ impl PaymentProcessor { .get(payment_id.clone(), payer_id) .await? .unwrap(); - payment = signed_payment.payload; - } - - // Allocation IDs are requestor's private matter and should not be sent to provider - payment = remove_allocation_ids_from_payment(payment); + signed_payment.payload + }; - let signature_canonicalized = driver_endpoint(&driver) + let signature_canonical = driver_endpoint(&driver) .send(driver::SignPaymentCanonicalized(payment.clone())) .await??; let signature = driver_endpoint(&driver) @@ -511,7 +518,7 @@ impl PaymentProcessor { // Whether the provider was correctly notified of this fact is another matter. counter!("payment.invoices.requestor.paid", 1); let msg = SendPayment::new(payment.clone(), signature); - let msg_with_bytes = SendSignedPayment::new(payment, signature_canonicalized); + let msg_with_bytes = SendSignedPayment::new(payment.clone(), signature_canonical); let db_executor = Arc::clone(&self.db_executor); @@ -541,16 +548,17 @@ impl PaymentProcessor { let payment_dao: PaymentDao = db_executor.as_dao(); let sync_dao: SyncNotifsDao = db_executor.as_dao(); + // Always add new type of signature. Compatibility is for older Provider nodes only. + payment_dao + .add_signature( + payment_id.clone(), + msg_with_bytes.signature.clone(), + msg_with_bytes.signed_bytes.clone(), + ) + .await?; + if mark_sent { - payment_dao.mark_sent(payment_id.clone()).await?; - // Always add new type of signature. Compatibility is for older Provider nodes only. - payment_dao - .add_signature( - payment_id, - msg_with_bytes.signature.clone(), - msg_with_bytes.signed_bytes.clone(), - ) - .await?; + payment_dao.mark_sent(payment_id).await?; } else { let sync_dao: SyncNotifsDao = db_executor.as_dao(); sync_dao.upsert(payee_id).await?; @@ -577,7 +585,7 @@ impl PaymentProcessor { .call(msg) .map(|res| match res { Ok(Ok(_)) => Ok(()), - Err(ya_service_bus::Error::GsbBadRequest(_)) => { + Err(e) if e.to_string().contains("endpoint address not found") => { Err(PaymentSendToGsbError::NotSupported) } Err(err) => { @@ -648,8 +656,7 @@ impl PaymentProcessor { &self, payment: Payment, signature: Vec, - canonicalized: bool, - signed_bytes: Option>, + canonical: Option>, ) -> Result<(), VerifyPaymentError> { // TODO: Split this into smaller functions let platform = payment.payment_platform.clone(); @@ -667,7 +674,7 @@ impl PaymentProcessor { .send(driver::VerifySignature::new( payment.clone(), signature.clone(), - canonicalized, + canonical.clone(), )) .await?? { @@ -793,13 +800,13 @@ impl PaymentProcessor { } // Insert payment into database (this operation creates and updates all related entities) - if signed_bytes.is_none() { + if canonical.is_none() { payment_dao .insert_received(payment, payee_id, None, None) .await?; } else { payment_dao - .insert_received(payment, payee_id, Some(signature), signed_bytes) + .insert_received(payment, payee_id, Some(signature), canonical) .await?; } } diff --git a/core/payment/src/service.rs b/core/payment/src/service.rs index a4c63c7915..ebf5428b7c 100644 --- a/core/payment/src/service.rs +++ b/core/payment/src/service.rs @@ -13,39 +13,11 @@ use ya_core_model::payment::public::{AcceptDebitNote, AcceptInvoice, PaymentSync use ya_persistence::executor::DbExecutor; use ya_service_bus::typed::{service, ServiceBinder}; -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct BindOptions { - /// Enables background job for synchronizing invoice / debit note document status. - /// - /// This depends on the identity service being enabled to work. If you're working with a limited - /// subsets of services (e.g. in payment_api.rs example) you might wish to disable that. - pub run_sync_job: bool, -} - -impl BindOptions { - /// Configure the `run_async_job` option. - pub fn run_sync_job(mut self, value: bool) -> Self { - self.run_sync_job = value; - self - } -} - -impl Default for BindOptions { - fn default() -> Self { - BindOptions { run_sync_job: true } - } -} - -pub fn bind_service( - db: &DbExecutor, - processor: Arc, - opts: BindOptions, - config: Arc, -) { +pub fn bind_service(db: &DbExecutor, processor: Arc, config: Arc) { log::debug!("Binding payment service to service bus"); local::bind_service(db, processor.clone()); - public::bind_service(db, processor, opts, config); + public::bind_service(db, processor, config); log::debug!("Successfully bound payment service to service bus"); } @@ -520,7 +492,7 @@ mod local { _caller: String, msg: GetDrivers, ) -> Result, GetDriversError> { - processor.get_drivers().await + processor.get_drivers(msg.ignore_legacy_networks).await } async fn payment_driver_status( @@ -534,7 +506,12 @@ mod local { None => { #[allow(clippy::iter_kv_map)] // Unwrap is provably safe because NoError can't be instanciated - match service(PAYMENT_BUS_ID).call(GetDrivers {}).await { + match service(PAYMENT_BUS_ID) + .call(GetDrivers { + ignore_legacy_networks: false, + }) + .await + { Ok(drivers) => drivers, Err(e) => return Err(PaymentDriverStatusError::Internal(e.to_string())), } @@ -834,12 +811,7 @@ mod public { use ya_persistence::types::Role; use ya_std_utils::LogErr; - pub fn bind_service( - db: &DbExecutor, - processor: Arc, - opts: BindOptions, - config: Arc, - ) { + pub fn bind_service(db: &DbExecutor, processor: Arc, config: Arc) { log::debug!("Binding payment public service to service bus"); ServiceBinder::new(BUS_ID, db, processor) @@ -857,7 +829,7 @@ mod public { .bind_with_processor(sync_payment) .bind_with_processor(sync_payment_with_bytes); - if opts.run_sync_job { + if config.sync_notif_backoff.run_sync_job { send_sync_notifs_job(db.clone(), config); send_sync_requests(db.clone()); } @@ -1281,16 +1253,7 @@ mod public { sender_id: String, msg: SendPayment, ) -> Result { - send_payment_impl( - db, - processor, - sender_id, - msg.payment, - false, - msg.signature, - None, - ) - .await + send_payment_impl(db, processor, sender_id, msg.payment, msg.signature, None).await } async fn send_payment_with_bytes( @@ -1304,7 +1267,6 @@ mod public { processor, sender_id, msg.payment, - true, msg.signature, Some(msg.signed_bytes), ) @@ -1316,9 +1278,8 @@ mod public { processor: Arc, sender_id: String, payment: Payment, - canonicalized: bool, signature: Vec, - signed_bytes: Option>, + canonical: Option>, ) -> Result { let payment_id = payment.payment_id.clone(); if sender_id != payment.payer_id.to_string() { @@ -1336,7 +1297,7 @@ mod public { "Verify payment processor started." ); let res = match processor - .verify_payment(payment, signature, canonicalized, signed_bytes) + .verify_payment(payment, signature, canonical) .await { Ok(_) => { diff --git a/core/payment/src/timeout_lock.rs b/core/payment/src/timeout_lock.rs index fd96e5b5f5..b186bea142 100644 --- a/core/payment/src/timeout_lock.rs +++ b/core/payment/src/timeout_lock.rs @@ -134,9 +134,8 @@ impl TimedMutex { ) -> Result, Elapsed> { let result = tokio::time::timeout(duration, self.mutex.lock()) .await - .map_err(|e| { + .inspect_err(|e| { log::warn!("Failed to lock mutex in scenario {0}", name); - e })?; let id = Uuid::new_v4().to_simple().to_string(); diff --git a/core/payment/src/utils.rs b/core/payment/src/utils.rs index 74da6641ff..9f6346c5fb 100644 --- a/core/payment/src/utils.rs +++ b/core/payment/src/utils.rs @@ -4,7 +4,6 @@ use futures::Future; use serde::{Deserialize, Serialize}; use std::time::Duration; use ya_client_model::market::{Agreement, Role}; -use ya_client_model::payment::Payment; use ya_core_model::market; use ya_service_bus::{typed as bus, RpcEndpoint}; @@ -174,21 +173,6 @@ pub async fn listen_for_events( .unwrap_or(Ok(vec![])) } -pub fn remove_allocation_ids_from_payment(mut payment: Payment) -> Payment { - // We remove allocation ID from syncs because allocations are not transferred to peers and - // their IDs would be unknown to the recipient. - // let mut payment = payment.clone(); - for agreement_payment in &mut payment.agreement_payments.iter_mut() { - agreement_payment.allocation_id = None; - } - - for activity_payment in &mut payment.activity_payments.iter_mut() { - activity_payment.allocation_id = None; - } - - payment -} - pub mod response { use actix_web::HttpResponse; use serde::Serialize; diff --git a/core/payment/tests/test_allocation.rs b/core/payment/tests/test_allocation.rs index ac3f59c83f..9585ba22a2 100644 --- a/core/payment/tests/test_allocation.rs +++ b/core/payment/tests/test_allocation.rs @@ -25,7 +25,7 @@ async fn test_release_allocation(ctx: &mut DroppableTestContext) -> anyhow::Resu let node = MockNode::new(net, "node-1", dir.path()) .with_identity() - .with_payment() + .with_payment(None) .with_fake_market(); node.bind_gsb().await?; node.start_server(ctx).await?; @@ -173,7 +173,7 @@ async fn test_validate_allocation(ctx: &mut DroppableTestContext) -> anyhow::Res let node = MockNode::new(net, "node-1", dir.path()) .with_identity() - .with_payment() + .with_payment(None) .with_fake_market(); node.bind_gsb().await?; node.start_server(ctx).await?; diff --git a/core/payment/tests/test_debit_notes.rs b/core/payment/tests/test_debit_notes.rs index 79e22db446..c30f2c477a 100644 --- a/core/payment/tests/test_debit_notes.rs +++ b/core/payment/tests/test_debit_notes.rs @@ -26,7 +26,7 @@ async fn test_debit_note_flow(ctx: &mut DroppableTestContext) -> anyhow::Result< let node = MockNode::new(net, "node-1", dir) .with_identity() - .with_payment() + .with_payment(None) .with_fake_market() .with_fake_activity(); node.bind_gsb().await?; diff --git a/core/payment/tests/test_invoices.rs b/core/payment/tests/test_invoices.rs index b530b3a979..32fec6a6ea 100644 --- a/core/payment/tests/test_invoices.rs +++ b/core/payment/tests/test_invoices.rs @@ -27,7 +27,7 @@ async fn test_invoice_flow(ctx: &mut DroppableTestContext) -> anyhow::Result<()> let node = MockNode::new(net, "node-1", dir) .with_identity() - .with_payment() + .with_payment(None) .with_fake_market(); node.bind_gsb().await?; node.start_server(ctx).await?; diff --git a/core/payment/tests/test_payment_signature.rs b/core/payment/tests/test_payment_signature.rs new file mode 100644 index 0000000000..ecda5eb43c --- /dev/null +++ b/core/payment/tests/test_payment_signature.rs @@ -0,0 +1,202 @@ +use bigdecimal::BigDecimal; +use chrono::Utc; +use std::str::FromStr; +use std::time::Duration; +use test_context::test_context; +use ya_client_model::NodeId; + +use ya_client_model::payment::{NewInvoice, Payment}; +use ya_core_model::payment::public::SendSignedPayment; +use ya_core_model::signable::Signable; +use ya_framework_basic::async_drop::DroppableTestContext; +use ya_framework_basic::log::enable_logs; +use ya_framework_basic::{resource, temp_dir}; +use ya_framework_mocks::market::FakeMarket; +use ya_framework_mocks::net::MockNet; +use ya_framework_mocks::node::MockNode; +use ya_framework_mocks::payment::fake_payment::FakePayment; +use ya_framework_mocks::payment::{Driver, PaymentRestExt}; +use ya_service_bus::RpcEndpoint; + +#[cfg_attr(not(feature = "framework-test"), ignore)] +#[test_context(DroppableTestContext)] +#[serial_test::serial] +async fn test_payment_signature(ctx: &mut DroppableTestContext) -> anyhow::Result<()> { + enable_logs(false); + + let dir = temp_dir!("test_payment_signature")?; + let dir = dir.path(); + + let net = MockNet::new().bind(); + let node1 = MockNode::new(net.clone(), "node-1", dir) + .with_identity() + .with_payment(None) + .with_fake_market(); + node1.bind_gsb().await?; + node1.start_server(ctx).await?; + + let identity = node1.get_identity()?; + let appkey_prov = identity.create_identity_key("provider").await?; + let appkey_req = identity + .create_from_private_key(&resource!("ci-requestor-1.key.priv")) + .await?; + + node1 + .get_payment()? + .fund_account(Driver::Erc20, &appkey_req.identity.to_string()) + .await?; + + let requestor = node1.rest_payments(&appkey_req.key)?; + let provider = node1.rest_payments(&appkey_prov.key)?; + + log::info!("Creating mock Agreement..."); + let agreement = + FakeMarket::create_fake_agreement(appkey_req.identity, appkey_prov.identity).unwrap(); + node1.get_market()?.add_agreement(agreement.clone()).await; + + log::info!("Creating allocation..."); + let new_allocation = FakePayment::default_allocation(&agreement, BigDecimal::from(10u64))?; + let allocation = requestor.create_allocation(&new_allocation).await?; + log::info!( + "Allocation created. ({}) Issuing invoice...", + allocation.allocation_id + ); + + let invoice = provider + .issue_invoice(&NewInvoice { + agreement_id: agreement.agreement_id.to_string(), + activity_ids: None, + amount: BigDecimal::from(2u64), + payment_due_date: Utc::now(), + }) + .await?; + + log::info!( + "Invoice issued ({}). Sending invoice...", + invoice.invoice_id + ); + provider.send_invoice(&invoice.invoice_id).await?; + + log::info!( + "Invoice sent. Accepting Invoice ({})...", + invoice.invoice_id + ); + requestor.get_invoice(&invoice.invoice_id).await.unwrap(); + requestor + .simple_accept_invoice(&invoice, &allocation) + .await + .unwrap(); + + // Payments are processed, and we don't want payment confirmation to reach Provider. + // This is hack which will block communication between Requestor and Provider despite them + // using the same node. + // We want to send payment confirmation manually later. This way we will be able to modify + // the message and check more different conditions. + net.break_network_for(appkey_prov.identity); + + let payments = requestor + .wait_for_invoice_payment::(&invoice.invoice_id, Duration::from_secs(5 * 60), None) + .await?; + assert_eq!(payments.len(), 1); + let payment = requestor + .get_signed_payment(&payments[0].payment_id) + .await?; + + log::info!("=== Validate if Payment confirmation has correct signatures."); + let signature = payment.signature.unwrap(); + let payment = payment.payload; + + assert_eq!(payment.amount, invoice.amount); + assert_eq!(signature.signed_bytes, payment.canonicalize().unwrap()); + payment.verify_canonical(&signature.signed_bytes).unwrap(); + + let correct = SendSignedPayment { + payment: payment.clone().remove_private_info(), + signature: signature.signature.clone(), + signed_bytes: signature.signed_bytes.clone(), + }; + + log::info!("=== Check if incorrect signature will be rejected."); + let mut sig_incorrect = signature.signature.clone(); + sig_incorrect[20] ^= sig_incorrect[20]; + let incorrect_signature = SendSignedPayment { + signature: sig_incorrect, + ..correct.clone() + }; + + let payment_gsb = node1.get_payment()?.gsb_public_endpoint(); + let result = payment_gsb + .send_as(invoice.recipient_id, incorrect_signature) + .await?; + assert!(result.is_err_and(|e| e.to_string().contains("Invalid payment signature"))); + + log::info!("=== Check if incorrect signed bytes will be rejected."); + let mut bytes_incorrect = signature.signed_bytes.clone(); + bytes_incorrect[20] ^= bytes_incorrect[20]; + let incorrect_signed_bytes = SendSignedPayment { + signed_bytes: bytes_incorrect.clone(), + ..correct.clone() + }; + + let result = payment_gsb + .send_as(invoice.recipient_id, incorrect_signed_bytes) + .await?; + assert!(result.is_err_and(|e| e.to_string().contains("Invalid payment signature"))); + + log::info!( + "=== Requestor shouldn't be able to report bigger amount than he paid on blockchain." + ); + let mut malicious_payment = payment.clone().remove_private_info(); + malicious_payment.amount += BigDecimal::from(1u64); + let mismatch_with_transaction_amount = SendSignedPayment { + payment: malicious_payment, + ..correct.clone() + }; + + let result = payment_gsb + .send_as(invoice.recipient_id, mismatch_with_transaction_amount) + .await?; + assert!(result.is_err()); + + log::info!("=== Requestor shouldn't be able to change payer info."); + let incorrect_payer = SendSignedPayment { + payment: Payment { + payer_id: NodeId::from_str("0x19659f72c4ad88f7d9934c8809deb9535ce0e4b8").unwrap(), + ..payment.clone().remove_private_info() + }, + ..correct.clone() + }; + + let result = payment_gsb + .send_as(invoice.recipient_id, incorrect_payer) + .await?; + assert!(result.is_err()); + + log::info!("=== Requestor shouldn't be able to change payer address info."); + let incorrect_payer = SendSignedPayment { + payment: Payment { + payer_addr: "0x19659f72c4ad88f7d9934c8809deb9535ce0e4b8".to_string(), + ..payment.clone().remove_private_info() + }, + ..correct.clone() + }; + + let result = payment_gsb + .send_as(invoice.recipient_id, incorrect_payer) + .await?; + assert!(result.is_err()); + + log::info!("=== Correct signature should be accepted."); + payment_gsb + .send_as(invoice.recipient_id, correct.clone()) + .await? + .unwrap(); + + log::info!("=== Payment confirmation sent for the second time should be rejected (could result in multi-spend)."); + let result = payment_gsb + .send_as(invoice.recipient_id, correct.clone()) + .await?; + assert!(result.is_err()); + + Ok(()) +} diff --git a/core/payment/tests/test_payment_sync.rs b/core/payment/tests/test_payment_sync.rs new file mode 100644 index 0000000000..d836576d0b --- /dev/null +++ b/core/payment/tests/test_payment_sync.rs @@ -0,0 +1,460 @@ +use bigdecimal::BigDecimal; +use chrono::Utc; +use std::str::FromStr; +use std::time::Duration; +use test_context::test_context; +use tokio::sync::mpsc::error::TryRecvError; + +use ya_core_model::payment::public::{ + AcceptDebitNote, AcceptInvoice, Ack, PaymentSync, PaymentSyncWithBytes, SendDebitNote, + SendInvoice, SendPayment, +}; +use ya_framework_basic::async_drop::DroppableTestContext; +use ya_framework_basic::log::enable_logs; +use ya_framework_basic::{resource, temp_dir}; +use ya_framework_mocks::market::FakeMarket; +use ya_framework_mocks::net::MockNet; +use ya_framework_mocks::node::MockNode; +use ya_framework_mocks::payment::fake_payment::FakePayment; +use ya_framework_mocks::payment::{Driver, PaymentRestExt}; +use ya_service_bus::timeout::IntoTimeoutFuture; +use ya_service_bus::RpcEndpoint; + +#[cfg_attr(not(feature = "framework-test"), ignore)] +#[test_context(DroppableTestContext)] +#[serial_test::serial] +async fn test_payment_sync(ctx: &mut DroppableTestContext) -> anyhow::Result<()> { + enable_logs(false); + + let dir = temp_dir!("test_payment_sync")?; + let dir = dir.path(); + + let net = MockNet::new().bind(); + + // Notifies will be sent in regular intervals, with values more appropriate for tests + // taking less time to trigger. + let mut config = ya_payment::config::Config::from_env()?; + config.sync_notif_backoff.run_sync_job = true; + config.sync_notif_backoff.initial_delay = Duration::from_secs(2); + config.sync_notif_backoff.cap = Some(Duration::from_secs(2)); + + let node1 = MockNode::new(net.clone(), "node-1", dir) + .with_identity() + .with_payment(Some(config)) + .with_fake_market() + .with_fake_activity(); + node1.bind_gsb().await?; + node1.start_server(ctx).await?; + + let appkey_req = node1 + .get_identity()? + .create_from_private_key(&resource!("ci-requestor-1.key.priv")) + .await?; + + node1 + .get_payment()? + .fund_account(Driver::Erc20, &appkey_req.identity.to_string()) + .await?; + + let node2 = MockNode::new(net.clone(), "node-2", dir) + .with_prefixed_gsb() + .with_identity() + .with_fake_payment(); + node2.bind_gsb().await?; + + let appkey_prov = node2 + .get_identity()? + .create_identity_key("provider") + .await?; + + let agreement = + FakeMarket::create_fake_agreement(appkey_req.identity, appkey_prov.identity).unwrap(); + node1.get_market()?.add_agreement(agreement.clone()).await; + + let payment = node1.get_payment()?; + let requestor = node1.rest_payments(&appkey_req.key)?; + + log::info!("Creating allocation..."); + let new_allocation = FakePayment::default_allocation(&agreement, BigDecimal::from(10u64))?; + let allocation = requestor.create_allocation(&new_allocation).await?; + log::info!("Allocation created. ({})", allocation.allocation_id); + + log::info!("================== Scenario 1 =================="); + log::info!("Scenario: Invoice immediate acceptance"); + // Send Invoice to Requestor node. + // Requestor is able to immediately accept. + log::info!("Issuing invoice..."); + let invoice = FakePayment::fake_invoice(&agreement, BigDecimal::from_str("0.2")?)?; + payment + .gsb_public_endpoint() + .send_as(invoice.issuer_id, SendInvoice(invoice.clone())) + .await??; + + let fake_payment = node2.get_fake_payment()?; + let mut channel = fake_payment.message_channel::(Ok(Ack {})); + let mut debit_channel = fake_payment.message_channel::(Ok(Ack {})); + + log::info!("Accepting Invoice ({})...", invoice.invoice_id); + requestor.get_invoice(&invoice.invoice_id).await.unwrap(); + requestor + .simple_accept_invoice(&invoice, &allocation) + .await + .unwrap(); + + let (_from, accept) = channel.recv().await.unwrap(); + assert_eq!(accept.invoice_id, invoice.invoice_id); + assert_eq!(accept.acceptance.total_amount_accepted, invoice.amount); + + log::info!("================== Scenario 2 =================="); + log::info!("Scenario: DebitNote delayed acceptance"); + log::info!("Scenario: Invoice delayed acceptance"); + // Send Invoice to Requestor node. + // Requestor attempt to accept Invoice but network is broken. + // Acceptance will be sent later as payment sync. + let agreement = + FakeMarket::create_fake_agreement(appkey_req.identity, appkey_prov.identity).unwrap(); + node1.get_market()?.add_agreement(agreement.clone()).await; + + let activity_id = node1 + .get_activity()? + .create_activity(&agreement.agreement_id) + .await; + + log::info!("Sending DebitNote..."); + let debit_note = + FakePayment::fake_debit_note(&agreement, &activity_id, BigDecimal::from_str("0.1")?, None)?; + payment + .gsb_public_endpoint() + .send_as(debit_note.issuer_id, SendDebitNote(debit_note.clone())) + .await??; + + log::info!("Issuing invoice..."); + let invoice = FakePayment::fake_invoice(&agreement, BigDecimal::from_str("0.2")?)?; + payment + .gsb_public_endpoint() + .send_as(invoice.issuer_id, SendInvoice(invoice.clone())) + .await??; + + net.break_network_for(appkey_prov.identity); + + log::info!("Accepting DebitNote ({})", debit_note.debit_note_id); + requestor + .get_debit_note(&debit_note.debit_note_id) + .await + .unwrap(); + requestor + .simple_accept_debit_note(&debit_note, &allocation) + .await + .unwrap(); + + log::info!("Accepting Invoice ({})", invoice.invoice_id); + requestor.get_invoice(&invoice.invoice_id).await.unwrap(); + requestor + .simple_accept_invoice(&invoice, &allocation) + .await + .unwrap(); + + // We expect that AcceptInvoice wasn't delivered. + matches!(channel.try_recv().unwrap_err(), TryRecvError::Empty); + matches!(debit_channel.try_recv().unwrap_err(), TryRecvError::Empty); + + let mut sync_channel = fake_payment.message_channel::(Ok(Ack {})); + net.enable_network_for(appkey_prov.identity); + + // We expect that PaymentSync will be delivered within 4s. + // Looping because sync is sent from multiple identities on Requestor Node. + loop { + let (from, sync) = sync_channel + .recv() + .timeout(Some(Duration::from_secs(4))) + .await + .unwrap() + .unwrap(); + + if from != appkey_req.identity { + continue; + } + + assert_eq!(sync.invoice_accepts.len(), 1); + assert_eq!(sync.invoice_accepts[0].invoice_id, invoice.invoice_id); + assert_eq!( + sync.invoice_accepts[0].acceptance.total_amount_accepted, + invoice.amount + ); + + assert_eq!(sync.debit_note_accepts.len(), 1); + assert_eq!( + sync.debit_note_accepts[0].debit_note_id, + debit_note.debit_note_id + ); + assert_eq!( + sync.debit_note_accepts[0].acceptance.total_amount_accepted, + debit_note.total_amount_due + ); + break; + } + + log::info!("================== Scenario 3 =================="); + log::info!("Scenario: Payment delayed delivery"); + // Send Invoice to Requestor node. + // Requestor accepts Invoice, but network is broken before he sends payment confirmation. + // Payment confirmation will be sent later as payment sync. + let agreement = + FakeMarket::create_fake_agreement(appkey_req.identity, appkey_prov.identity).unwrap(); + node1.get_market()?.add_agreement(agreement.clone()).await; + + log::info!("Issuing invoice..."); + let invoice = FakePayment::fake_invoice(&agreement, BigDecimal::from_str("0.2")?)?; + payment + .gsb_public_endpoint() + .send_as(invoice.issuer_id, SendInvoice(invoice.clone())) + .await??; + + log::info!("Accepting Invoice ({})", invoice.invoice_id); + + requestor.get_invoice(&invoice.invoice_id).await.unwrap(); + requestor + .simple_accept_invoice(&invoice, &allocation) + .await + .unwrap(); + net.break_network_for(appkey_prov.identity); + + // Wait until Invoice will be paid on Requestor side. + let payments = requestor + .wait_for_invoice_payment::(&invoice.invoice_id, Duration::from_secs(5 * 60), None) + .await?; + assert_eq!(payments.len(), 1); + + net.enable_network_for(appkey_prov.identity); + // We expect that PaymentSync will be delivered within 4s. + // Looping because sync is sent from multiple identities on Requestor Node. + loop { + let (from, sync) = sync_channel + .recv() + .timeout(Some(Duration::from_secs(4))) + .await + .unwrap() + .unwrap(); + + if from != appkey_req.identity { + continue; + } + + assert!(!sync.payments.is_empty()); + let payment = sync + .payments + .iter() + .find(|p| { + p.payment + .agreement_payments + .iter() + .any(|a| a.agreement_id == agreement.agreement_id) + }) + .unwrap(); + + assert_eq!(payment.payment.amount, invoice.amount); + break; + } + + Ok(()) +} + +#[cfg_attr(not(feature = "framework-test"), ignore)] +#[test_context(DroppableTestContext)] +#[serial_test::serial] +async fn test_payment_sync_fallback(ctx: &mut DroppableTestContext) -> anyhow::Result<()> { + enable_logs(false); + + let dir = temp_dir!("test_payment_sync_fallback")?; + let dir = dir.path(); + + let net = MockNet::new().bind(); + + // Notifies will be sent in regular intervals, with values more appropriate for tests + // taking less time to trigger. + let mut config = ya_payment::config::Config::from_env()?; + config.sync_notif_backoff.run_sync_job = true; + config.sync_notif_backoff.initial_delay = Duration::from_secs(2); + config.sync_notif_backoff.cap = Some(Duration::from_secs(2)); + + let node1 = MockNode::new(net.clone(), "node-1", dir) + .with_identity() + .with_payment(Some(config)) + .with_fake_market(); + node1.bind_gsb().await?; + node1.start_server(ctx).await?; + + let appkey_req = node1 + .get_identity()? + .create_from_private_key(&resource!("ci-requestor-1.key.priv")) + .await?; + + node1 + .get_payment()? + .fund_account(Driver::Erc20, &appkey_req.identity.to_string()) + .await?; + + let node2 = MockNode::new(net.clone(), "node-2", dir) + .with_prefixed_gsb() + .with_identity() + .with_fake_payment(); + node2.bind_gsb().await?; + + let appkey_prov = node2 + .get_identity()? + .create_identity_key("provider") + .await?; + + let agreement = + FakeMarket::create_fake_agreement(appkey_req.identity, appkey_prov.identity).unwrap(); + node1.get_market()?.add_agreement(agreement.clone()).await; + + let payment = node1.get_payment()?; + let requestor = node1.rest_payments(&appkey_req.key)?; + + log::info!("Creating allocation..."); + let new_allocation = FakePayment::default_allocation(&agreement, BigDecimal::from(10u64))?; + let allocation = requestor.create_allocation(&new_allocation).await?; + log::info!("Allocation created. ({})", allocation.allocation_id); + + let fake_payment = node2.get_fake_payment()?; + fake_payment.unbind_public().await; + let mut channel = fake_payment.message_channel::(Ok(Ack {})); + let mut sync_channel = fake_payment.message_channel::(Ok(Ack {})); + let mut payment_channel = fake_payment.message_channel::(Ok(Ack {})); + + log::info!("================== Scenario 1 =================="); + log::info!("Scenario: Payment immediate delivery with fallback"); + // Send Invoice to Requestor node. + // Requestor accepts Invoice and waits for payment confirmation. + // Payment will be sent immediately, but PaymentSyncWithBytes has no handler on Provider, so fallback + // mechanism should be used. + let agreement = + FakeMarket::create_fake_agreement(appkey_req.identity, appkey_prov.identity).unwrap(); + node1.get_market()?.add_agreement(agreement.clone()).await; + + log::info!("Issuing invoice..."); + let invoice = FakePayment::fake_invoice(&agreement, BigDecimal::from_str("0.2")?)?; + payment + .gsb_public_endpoint() + .send_as(invoice.issuer_id, SendInvoice(invoice.clone())) + .await??; + + log::info!("Accepting Invoice ({})", invoice.invoice_id); + requestor.get_invoice(&invoice.invoice_id).await.unwrap(); + requestor + .simple_accept_invoice(&invoice, &allocation) + .await + .unwrap(); + + // Check if AcceptInvoice was delivered. + let (_from, accept) = channel.recv().await.unwrap(); + assert_eq!(accept.invoice_id, invoice.invoice_id); + assert_eq!(accept.acceptance.total_amount_accepted, invoice.amount); + + // Wait until Invoice will be paid on Requestor side. + let payments = requestor + .wait_for_invoice_payment::(&invoice.invoice_id, Duration::from_secs(5 * 60), None) + .await?; + assert_eq!(payments.len(), 1); + + loop { + let (from, confirmation) = payment_channel + .recv() + .timeout(Some(Duration::from_secs(4))) + .await + .unwrap() + .unwrap(); + + if from != appkey_req.identity { + continue; + } + + assert!(confirmation + .payment + .agreement_payments + .iter() + .any(|a| a.agreement_id == agreement.agreement_id)); + + assert_eq!(confirmation.payment.amount, invoice.amount); + break; + } + + log::info!("================== Scenario 2 =================="); + log::info!("Scenario: Payment delayed delivery with fallback"); + log::info!("Scenario: Invoice acceptance delayed delivery"); + // Send Invoice to Requestor node. + // Requestor accepts Invoice and waits for payment confirmation. + // In the meantime network is broken, so both confirmations can't be immediately delivered. + // Payment will be sent later, but PaymentSyncWithBytes has no handler on Provider, so fallback + // mechanism should be used. + let agreement = + FakeMarket::create_fake_agreement(appkey_req.identity, appkey_prov.identity).unwrap(); + node1.get_market()?.add_agreement(agreement.clone()).await; + + log::info!("Issuing invoice..."); + let invoice = FakePayment::fake_invoice(&agreement, BigDecimal::from_str("0.2")?)?; + payment + .gsb_public_endpoint() + .send_as(invoice.issuer_id, SendInvoice(invoice.clone())) + .await??; + + // Break network to enforce Invoice acceptance delayed delivery. + net.break_network_for(appkey_prov.identity); + + log::info!( + "Accepting Invoice ({})... with broken network", + invoice.invoice_id + ); + requestor.get_invoice(&invoice.invoice_id).await.unwrap(); + requestor + .simple_accept_invoice(&invoice, &allocation) + .await + .unwrap(); + + // Wait until Invoice will be paid on Requestor side. + let payments = requestor + .wait_for_invoice_payment::(&invoice.invoice_id, Duration::from_secs(5 * 60), None) + .await?; + assert_eq!(payments.len(), 1); + + net.enable_network_for(appkey_prov.identity); + // We expect that PaymentSync will be delivered within 4s. + // Looping because sync is sent from multiple identities on Requestor Node. + loop { + let (from, sync) = sync_channel + .recv() + .timeout(Some(Duration::from_secs(4))) + .await + .unwrap() + .unwrap(); + + if from != appkey_req.identity { + continue; + } + + assert!(!sync.payments.is_empty()); + let payment = sync + .payments + .iter() + .find(|p| { + p.payment + .agreement_payments + .iter() + .any(|a| a.agreement_id == agreement.agreement_id) + }) + .unwrap(); + assert_eq!(payment.payment.amount, invoice.amount); + + // Check if AcceptInvoice was delivered. + assert!(!sync.invoice_accepts.is_empty()); + assert_eq!(sync.invoice_accepts[0].invoice_id, invoice.invoice_id); + assert_eq!( + sync.invoice_accepts[0].acceptance.total_amount_accepted, + invoice.amount + ); + + break; + } + Ok(()) +} diff --git a/core/payment/tests/tutorial_how_to_use_module_tests.rs b/core/payment/tests/tutorial_how_to_use_module_tests.rs index b3b1ee279e..66ead162a1 100644 --- a/core/payment/tests/tutorial_how_to_use_module_tests.rs +++ b/core/payment/tests/tutorial_how_to_use_module_tests.rs @@ -1,9 +1,12 @@ use bigdecimal::BigDecimal; +use std::str::FromStr; use test_context::test_context; use ya_client_model::payment::allocation::{PaymentPlatform, PaymentPlatformEnum}; -use ya_client_model::payment::NewAllocation; +use ya_client_model::payment::{Acceptance, NewAllocation}; use ya_core_model::payment::local::GetStatus; +use ya_core_model::payment::public::{AcceptInvoice, Ack, SendInvoice}; +use ya_service_bus::RpcEndpoint; use ya_framework_basic::async_drop::DroppableTestContext; use ya_framework_basic::log::enable_logs; @@ -12,6 +15,7 @@ use ya_framework_basic::{resource, temp_dir}; use ya_framework_mocks::market::FakeMarket; use ya_framework_mocks::net::MockNet; use ya_framework_mocks::node::MockNode; +use ya_framework_mocks::payment::fake_payment::FakePayment; use ya_framework_mocks::payment::Driver; // Tests should be always wrapped in these macros. @@ -41,10 +45,10 @@ async fn tutorial_how_to_use_module_tests(ctx: &mut DroppableTestContext) -> any // Create MockNode which is container for all Golem modules and represents // single node in tests. - let node = MockNode::new(net, "node-1", dir) + let node = MockNode::new(net.clone(), "node-1", dir) // Request instantiating wrappers around real Golem modules. .with_identity() - .with_payment() + .with_payment(None) // Mock market module with very basic implementation, which will allow to manually // create fake Agreements without need for Offers broadcasting and negotiation process. .with_fake_market(); @@ -123,5 +127,61 @@ async fn tutorial_how_to_use_module_tests(ctx: &mut DroppableTestContext) -> any status.amount ); + // Instead of using real yagna node, we can use FakePayment instead. + // It allows us to capture GSB traffic to test if real payment module sends them correctly. + let node3 = MockNode::new(net, "node-3", dir) + // All GSB handler will be bound on addresses prefixed by node name. + // After creating identity, it will be registered in MockNet and all GSB messages + // will be routed to those prefixes. + .with_prefixed_gsb() + .with_identity() + .with_fake_payment(); + node3.bind_gsb().await?; + + let identity = node3.get_identity()?.create_identity_key("node-3").await?; + + let agreement = + FakeMarket::create_fake_agreement(appkey_req.identity, identity.identity).unwrap(); + node.get_market()?.add_agreement(agreement.clone()).await; + + // Sending Invoice from FakePayment node to real payment module. + // This can be done by directly sending GSB message directly, simulating, what would happen + // after calling REST API. + // `gsb_public_endpoint` returns GSB address on which `node` payment module was bound. + // Note that you should never build GSB addresses directly, because `MockNode` implementation + // decides about them, and they can be different from real yagna. + let invoice = FakePayment::fake_invoice(&agreement, BigDecimal::from_str("0.2")?)?; + node.get_payment()? + .gsb_public_endpoint() + .send_as(invoice.issuer_id, SendInvoice(invoice.clone())) + .await??; + + // FakePayment responds always with correct answer to any GSB message. + // This behavior can be overridden by querying `FakePayment::message_channel` and setting + // expected response. + // Function overrides default message handler and returns channel which yields all messages + // received. + let mut channel = node3 + .get_fake_payment()? + .message_channel::(Ok(Ack {})); + + // Sending accept Invoice in real Payment module. + // Internally GSB message will be sent to node3 and will be captured by channel. + let new_allocation = FakePayment::default_allocation(&agreement, BigDecimal::from(10u64))?; + let allocation = api.create_allocation(&new_allocation).await?; + api.accept_invoice( + &invoice.invoice_id, + &Acceptance { + total_amount_accepted: invoice.amount.clone(), + allocation_id: allocation.allocation_id.to_string(), + }, + ) + .await + .unwrap(); + + // Waiting for any message sent to `node3`. + // We expect that `AcceptInvoice` message will be received after previous REST api call was made. + let (_from, accept) = channel.recv().await.unwrap(); + assert_eq!(accept.invoice_id, invoice.invoice_id); Ok(()) } diff --git a/core/persistence/Cargo.toml b/core/persistence/Cargo.toml index e05d6a3931..b2b649272d 100644 --- a/core/persistence/Cargo.toml +++ b/core/persistence/Cargo.toml @@ -15,10 +15,10 @@ service = [ [dependencies] ya-client-model = { workspace = true, features = ["with-diesel"] } -ya-core-model = { workspace = true } -ya-service-api = { version = "0.1", optional = true } -ya-service-api-interfaces = { version = "0.2", optional = true } -ya-utils-process = { version = "0.3", features = ["lock"], optional = true } +ya-core-model.workspace = true +ya-service-api = { workspace = true, optional = true } +ya-service-api-interfaces = { workspace = true, optional = true } +ya-utils-process = { workspace = true, features = ["lock"], optional = true } anyhow = "1.0.26" bigdecimal = "0.2" diff --git a/core/persistence/src/types.rs b/core/persistence/src/types.rs index d96a6ef0de..1dd1b88e8c 100644 --- a/core/persistence/src/types.rs +++ b/core/persistence/src/types.rs @@ -132,11 +132,11 @@ pub enum Role { #[error("Invalid role string: {0}")] pub struct RoleParseError(pub String); -impl ToString for Role { - fn to_string(&self) -> String { +impl Display for Role { + fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { match self { - Role::Provider => "P".to_string(), - Role::Requestor => "R".to_string(), + Role::Provider => write!(f, "P"), + Role::Requestor => write!(f, "R"), } } } diff --git a/core/serv-api/Cargo.toml b/core/serv-api/Cargo.toml index 3f6d97e568..190f329a46 100644 --- a/core/serv-api/Cargo.toml +++ b/core/serv-api/Cargo.toml @@ -11,5 +11,5 @@ lazy_static = "1.4" serde = { version = "1.0", features = ["derive"] } url = "2.1.1" -ya-utils-cli = "0.1" -ya-core-model = { workspace = true } +ya-utils-cli.workspace = true +ya-core-model.workspace = true diff --git a/core/serv-api/derive/Cargo.toml b/core/serv-api/derive/Cargo.toml index 1f399fa945..911ee34a4c 100644 --- a/core/serv-api/derive/Cargo.toml +++ b/core/serv-api/derive/Cargo.toml @@ -17,11 +17,11 @@ strum_macros = "0.24" syn = { version = "1.0", features = ["full"] } [dev-dependencies] -ya-service-api-interfaces = "0.2" +ya-service-api-interfaces.workspace = true -actix-rt = "2.7" -actix-service = "2" -actix-web = "4" +actix-rt.workspace = true +actix-service.workspace = true +actix-web.workspace = true anyhow = "1.0" log = "0.4" structopt = "0.3" diff --git a/core/serv-api/web/Cargo.toml b/core/serv-api/web/Cargo.toml index 7683913e3a..54d3e350cd 100644 --- a/core/serv-api/web/Cargo.toml +++ b/core/serv-api/web/Cargo.toml @@ -8,8 +8,8 @@ edition = "2018" [dependencies] ya-client.workspace = true ya-core-model = { workspace = true, features = ["appkey"] } -ya-service-api = "0.1" -ya-service-bus = { workspace = true } +ya-service-api.workspace = true +ya-service-bus = { workspace = true } actix-cors = "0.6" actix-service = "2" @@ -23,11 +23,11 @@ structopt = "0.3" url = "2.1.1" [dev-dependencies] -ya-identity = "0.3" -ya-persistence = "0.3" +ya-identity.workspace = true +ya-persistence.workspace = true ya-sb-router = { workspace = true } -ya-service-api-derive = "0.2" -ya-service-api-interfaces = "0.2" +ya-service-api-derive.workspace = true +ya-service-api-interfaces.workspace = true actix-rt = "2.7" anyhow = "1.0" diff --git a/core/serv-api/web/src/middleware/auth/mod.rs b/core/serv-api/web/src/middleware/auth/mod.rs index 56533ae887..39a069cffa 100644 --- a/core/serv-api/web/src/middleware/auth/mod.rs +++ b/core/serv-api/web/src/middleware/auth/mod.rs @@ -84,12 +84,13 @@ where let cache = self.cache.clone(); let service = self.service.clone(); - // TODO: remove this hack; possibly by enabling creation of arbitrary appkey from CLI - if req.uri().to_string().starts_with("/metrics-api") - || req.uri().to_string().starts_with("/version") - { - log::debug!("skipping authorization for uri={}", req.uri()); - return Box::pin(service.borrow_mut().call(req)); + let allowed_uris = vec!["/metrics-api", "/version/get", "/dashboard"]; + + for uri in allowed_uris { + if req.uri().to_string().starts_with(uri) { + log::debug!("skipping authorization for uri={}", req.uri()); + return Box::pin(service.borrow_mut().call(req)); + } } Box::pin(async move { diff --git a/core/serv/src/main.rs b/core/serv/src/main.rs index 9960bed5c5..d1e0e87a19 100644 --- a/core/serv/src/main.rs +++ b/core/serv/src/main.rs @@ -1,9 +1,10 @@ #![allow(clippy::obfuscated_if_else)] -use actix_web::{middleware, web, App, HttpServer, Responder}; +use actix_web::{middleware, web, App, HttpResponse, HttpServer, Responder}; use anyhow::{Context, Result}; use futures::prelude::*; use metrics::{counter, gauge}; +use ya_healthcheck::HealthcheckService; #[cfg(feature = "static-openssl")] extern crate openssl_probe; @@ -53,6 +54,9 @@ use autocomplete::CompleteCommand; use ya_activity::TrackerRef; use ya_service_api_web::middleware::cors::AppKeyCors; +use ya_utils_consent::{ + consent_check_before_startup, set_consent_path_in_yagna_dir, ConsentService, +}; lazy_static::lazy_static! { static ref DEFAULT_DATA_DIR: String = DataDir::new(clap::crate_name!()).to_string(); @@ -241,6 +245,8 @@ impl TryFrom for ServiceContext { enum Services { #[enable(gsb, cli)] Db(PersistenceService), + #[enable(rest)] + Healthcheck(HealthcheckService), // Metrics service must be activated before all other services // to that will use it. Identity service is used by the Metrics, // so must be initialized before. @@ -261,6 +267,8 @@ enum Services { Activity(ActivityService), #[enable(gsb, rest, cli)] Payment(PaymentService), + #[enable(cli)] + Consent(ConsentService), #[enable(gsb)] SgxDriver(SgxService), #[enable(gsb, rest)] @@ -475,6 +483,7 @@ impl ServiceCommand { if !ctx.accept_terms { prompt_terms()?; } + match self { Self::Run(ServiceCommandOpts { api_url, @@ -541,6 +550,9 @@ impl ServiceCommand { let _lock = ProcLock::new(app_name, &ctx.data_dir)?.lock(std::process::id())?; + //before running yagna check consents + consent_check_before_startup(false)?; + ya_sb_router::bind_gsb_router(ctx.gsb_url.clone()) .await .context("binding service bus router")?; @@ -568,6 +580,8 @@ impl ServiceCommand { .wrap(middleware::Logger::default()) .wrap(auth::Auth::new(cors.cache())) .wrap(cors.cors()) + .route("/dashboard", web::get().to(redirect_to_dashboard)) + .route("/dashboard/{_:.*}", web::get().to(dashboard_serve)) .route("/me", web::get().to(me)) .service(forward_gsb); let rest = Services::rest(app, &context); @@ -710,6 +724,46 @@ async fn forward_gsb( Ok::<_, actix_web::Error>(web::Json(json_resp)) } +#[cfg(feature = "dashboard")] +#[derive(rust_embed::RustEmbed)] +#[folder = "dashboard"] +struct Asset; + +pub async fn redirect_to_dashboard() -> impl Responder { + #[cfg(feature = "dashboard")] + { + let target = "/dashboard/"; + log::debug!("Redirecting to endpoint: {target}"); + HttpResponse::Ok() + .status(actix_web::http::StatusCode::PERMANENT_REDIRECT) + .append_header((actix_web::http::header::LOCATION, target)) + .finish() + } + #[cfg(not(feature = "dashboard"))] + HttpResponse::NotFound().body("404 Not Found") +} + +pub async fn dashboard_serve(path: web::Path) -> impl Responder { + #[cfg(feature = "dashboard")] + { + let mut path = path.as_str(); + let mut content = Asset::get(path); + if content.is_none() && !path.contains('.') { + path = "index.html"; + content = Asset::get(path); + } + log::debug!("Serving frontend file: {path}"); + match content { + Some(content) => HttpResponse::Ok() + .content_type(mime_guess::from_path(path).first_or_octet_stream().as_ref()) + .body(content.data.into_owned()), + None => HttpResponse::NotFound().body("404 Not Found"), + } + } + #[cfg(not(feature = "dashboard"))] + HttpResponse::NotFound().body(format!("404 Not Found: {}", path)) +} + #[actix_rt::main] async fn main() -> Result<()> { dotenv::dotenv().ok(); @@ -719,6 +773,7 @@ async fn main() -> Result<()> { std::env::set_var(GSB_URL_ENV_VAR, args.gsb_url.as_str()); // FIXME + set_consent_path_in_yagna_dir()?; match args.run_command().await { Ok(()) => Ok(()), Err(err) => { diff --git a/core/version/Cargo.toml b/core/version/Cargo.toml index 68ca971a8d..40c9c35f1e 100644 --- a/core/version/Cargo.toml +++ b/core/version/Cargo.toml @@ -6,12 +6,14 @@ authors = ["Golem Factory "] edition = "2018" [dependencies] +ya-service-api-web.workspace = true ya-client.workspace = true -ya-compile-time-utils = "0.2" +ya-compile-time-utils.workspace = true ya-core-model = { workspace = true, features = ["version"] } -ya-persistence = "0.3" -ya-service-api = "0.1" -ya-service-api-interfaces = "0.2" +ya-net = { workspace = true, features = ["service"] } +ya-persistence.workspace = true +ya-service-api.workspace = true +ya-service-api-interfaces.workspace = true ya-service-bus = { workspace = true } actix-web = "4" @@ -27,3 +29,5 @@ serde_json = "1.0" structopt = "0.3.21" thiserror = "^1.0" tokio = { version = "1", features = ["time", "sync"] } +problem_details = "0.6.0" +http = "1.1.0" diff --git a/core/version/src/service/gsb.rs b/core/version/src/service/gsb.rs index 04ffb493b9..432251e544 100644 --- a/core/version/src/service/gsb.rs +++ b/core/version/src/service/gsb.rs @@ -26,10 +26,9 @@ async fn skip_version_gsb( _msg: version::Skip, ) -> RpcMessageResult { match db.as_dao::().skip_pending_release().await { - Ok(r) => Ok(r.map(|r| { - log::info!("{}", ReleaseMessage::Skipped(&r)); + Ok(r) => Ok(r.inspect(|r| { + log::info!("{}", ReleaseMessage::Skipped(r)); counter!("version.skip", 1); - r })), Err(e) => Err(e.to_string().into()), } diff --git a/core/version/src/service/rest.rs b/core/version/src/service/rest.rs index aca69be54f..30475eaf6f 100644 --- a/core/version/src/service/rest.rs +++ b/core/version/src/service/rest.rs @@ -1,11 +1,12 @@ -use crate::db::dao::ReleaseDAO; - -use ya_client::model::ErrorMessage; use ya_persistence::executor::DbExecutor; +use crate::db::dao::ReleaseDAO; + use actix_web::web::Data; use actix_web::{web, HttpResponse, Responder}; +use ya_client::model::ErrorMessage; + pub const VERSION_API_PATH: &str = "/version"; pub fn web_scope(db: DbExecutor) -> actix_web::Scope { diff --git a/core/vpn/Cargo.toml b/core/vpn/Cargo.toml index f4f217ad66..9b0066926c 100644 --- a/core/vpn/Cargo.toml +++ b/core/vpn/Cargo.toml @@ -7,15 +7,13 @@ edition = "2018" [dependencies] ya-core-model = { workspace = true, features = ["activity", "market"] } ya-client-model = { workspace = true, features = ["sgx"] } -ya-net = "0.3" -ya-persistence = "0.3" -ya-service-api = "0.1" -ya-service-api-interfaces = "0.2" -ya-service-api-web = "0.2" +ya-net.workspace = true +ya-persistence.workspace = true +ya-service-api.workspace = true +ya-service-api-interfaces.workspace = true +ya-service-api-web.workspace = true ya-service-bus = { workspace = true } -ya-utils-networking = { version = "0.2", default-features = false, features = [ - "vpn", -] } +ya-utils-networking = { workspace = true, default-features = false, features = [ "vpn", ] } ya-packet-trace = { git = "https://github.com/golemfactory/ya-packet-trace" } actix = "0.13" diff --git a/core/vpn/src/message.rs b/core/vpn/src/message.rs index bf6b83a999..8ced1e66c0 100644 --- a/core/vpn/src/message.rs +++ b/core/vpn/src/message.rs @@ -37,10 +37,12 @@ pub struct RemoveNode { pub id: String, } +#[allow(dead_code)] #[derive(Debug, Message)] #[rtype(result = "Result>")] pub struct GetConnections; +#[allow(dead_code)] #[derive(Message)] #[rtype(result = "Result")] pub struct Connect { @@ -73,6 +75,7 @@ pub struct Packet { #[rtype(result = "Result<()>")] pub struct Shutdown; +#[allow(dead_code)] #[derive(Debug, Message)] #[rtype(result = "Result<()>")] pub struct DataSent; diff --git a/dashboard/index.html b/dashboard/index.html new file mode 100644 index 0000000000..5bdc8c3c01 --- /dev/null +++ b/dashboard/index.html @@ -0,0 +1,11 @@ + + + + + Yagna Dashboard + + +

Yagna Dashboard

+

This is placeholder, place your files into ./dashboard directory to serve them from yagna

+ + diff --git a/docs/legacy/development-plan.md b/docs/legacy/development-plan.md index efb28432fa..f7c5ed2576 100644 --- a/docs/legacy/development-plan.md +++ b/docs/legacy/development-plan.md @@ -85,7 +85,7 @@ Bugs should be reported in GitHub Issues. ### Technology Stack The programming language used in this project will be Rust (https://www.rust-lang.org/). -The 1.77.0 stable version of Rust compiler (`rustc`) should compile all source code without errors. +The 1.81.0 stable version of Rust compiler (`rustc`) should compile all source code without errors. For HTTP client/server code, Actix Web 1.0 (https://actix.rs) will be used. diff --git a/docs/provider/capabilities.md b/docs/provider/capabilities.md index e5ddaf928e..e60d8f00d1 100644 --- a/docs/provider/capabilities.md +++ b/docs/provider/capabilities.md @@ -2,10 +2,19 @@ ## Protocol -| Capability | Yagna package version | Backwards-compatible? | Description | -|-------------------------------|-----------------------|-----------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Multi-Activity Agreement | 0.5.0 | Yes | Negotiate ability to create multiple activities under single Agreement. Use (which?) property in Demand/Offer to indicate node's support for Multi-Activity. If counterparty does not support Multi-Activity, the node falls back to single Activity per Agreement behaviour. | -| Restart Proposal Negotiations | 0.7.0 | Yes | Agent is allowed to restart negotiations, by sending `Counter Proposal`, after he rejected Proposal at some point. Counter-party will receive regular `ProposalEvent` in this case. Only Agent rejecting Proposal has initiative in restarting negotiations, rejected Agent can only wait for this to happen. To indicate, that Proposal rejection isn't final and negotiations can be restarted later, Agent can set `golem.proposal.rejection.is-final` (bool) field in `Reason` structure. If this value is set to false, Agent can free any state related to this negotiation. The same field can be set in `Reason` sent in `Reject Agreement` operation. Requestor can send new counter Proposal after some period of time or propose the same Agreement for the second time. (No change to specification) | -| manifest-support | | | TODO | -| inet | | | TODO | -| start-entrypoint | | | TODO | + +| Capability | Yagna package version | Backwards-compatible? | Description | Property | +|-------------------------------|-----------------------|-----------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------| +| Multi-Activity Agreement | 0.5.0 | Yes | Negotiate ability to create multiple activities under single Agreement. Use `golem.srv.caps.multi-activity` property in Demand/Offer to indicate node's support for Multi-Activity. If counterparty does not support Multi-Activity, the node falls back to single Activity per Agreement behaviour. | golem.srv.caps.multi-activity | +| Restart Proposal Negotiations | 0.7.0 | Yes | Agent is allowed to restart negotiations, by sending `Counter Proposal`, after he rejected Proposal at some point. Counter-party will receive regular `ProposalEvent` in this case. Only Agent rejecting Proposal has initiative in restarting negotiations, rejected Agent can only wait for this to happen. To indicate, that Proposal rejection isn't final and negotiations can be restarted later, Agent can set `golem.proposal.rejection.is-final` (bool) field in `Reason` structure. If this value is set to false, Agent can free any state related to this negotiation. The same field can be set in `Reason` sent in `Reject Agreement` operation. Requestor can send new counter Proposal after some period of time or propose the same Agreement for the second time. (No change to specification) | | + + +## ExeUnit + + +| Capability | Yagna package version | Backwards-compatible? | Description | Property | +|----------------------------|-----------------------|-----------------------|---------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------| +| manifest-support | | | TODO | | +| inet | | | TODO | | +| start-entrypoint | | | TODO | | +| Command progress reporting | (not released) | Yes | ExeUnit can report progress of executed commands. [Specification](./exe-unit/command-progress.md) | `golem.activity.caps.transfer.report-progress` `golem.activity.caps.deploy.report-progress` | diff --git a/docs/provider/exe-unit/command-progress.md b/docs/provider/exe-unit/command-progress.md new file mode 100644 index 0000000000..6fe412c985 --- /dev/null +++ b/docs/provider/exe-unit/command-progress.md @@ -0,0 +1,49 @@ +# Command progress reporting + +ExeUnit behaves according to specification defined [here](https://golemfactory.github.io/golem-architecture/specs/command-progress.html) +and support progress reporting for commands: `deploy` and `transfer`. + +This document aims to describe implementation details not covered by specification. + +## Specification + + +| Name | Description | +|-----------------------------|----------------------| +| Minimum ExeUnit version | {TODO} | +| Minimum Runtime API version | Always compatible | +| Minimum yagna version | {TODO} | +| Minimum provider version | Always compatible | +| Supported commands | `deploy`, `transfer` | + + +## [ProgressArgs](https://golemfactory.github.io/ya-client/index.html?urls.primaryName=Activity%20API#/model-ProgressArgs) + +ExeUnit supports only `update-interval`. If value is not set, `1s` default will be used. + +`update-step` is not implemented. + +## [Runtime event](https://golemfactory.github.io/ya-client/index.html?urls.primaryName=Activity%20API#model-RuntimeEventKindProgress) + +### Steps + +`Deploy` and `transfer` command consist of only single step. + +### Progress + +- Progress is reported as `Bytes`. Fields is never a `None`. +- Size of file is always checked and put as second element of tuple. +- Initially `Size` element of tuple is set to `None` and if progress with `message` field is sent + than it can be received by Requestor agent + +### Message + +Two messages are currently possible: +- `Deployed image from cache` +- `Retry in {}s because of error: {err}` - indicates error during transfer, which will result in retry. + +When sending message, the rest of `CommandProgress` structure fields will be set to latest values. + +## Requestor Example + +PoC implementation using yapapi: https://github.com/golemfactory/yapapi/pull/1153 diff --git a/docs/readme.md b/docs/readme.md index 3de0b8ae24..7407a538c0 100644 --- a/docs/readme.md +++ b/docs/readme.md @@ -1,4 +1,23 @@ # Developer documentation -[Provider](./provider/architecture.md) -[Payment Driver](./../core/payment-driver/erc20/Readme.md) \ No newline at end of file +- [Architecture](https://golemfactory.github.io/golem-architecture/) +- [REST API specification](https://golemfactory.github.io/ya-client/) +- Developer guides + - [Installation](./provider/overview.md#installation) + - [Logging guidelines](./logging-guidelines.md) +- Implementation documentation + - Overview + - [Provider](./provider/architecture.md) + - [ExeUnit](./provider/exe-unit/exe-units.md) + - Yagna + - Identity + - Market + - Activity + - Payments + - [Payment Driver](./../core/payment-driver/erc20/Readme.md) + - Net + - GSB + - VPN + - Runtimes + - ya-runtime-vm + - ya-runtime-wasm \ No newline at end of file diff --git a/docs/yagna/capabilities.md b/docs/yagna/capabilities.md index 2844de7103..fd867bb1ec 100644 --- a/docs/yagna/capabilities.md +++ b/docs/yagna/capabilities.md @@ -6,6 +6,7 @@ Capabilities requiring Provider agent support are listed [here](../provider/capa ## Yagna API -| Capability | Yagna package version | Backwards-compatible? | Description | -|-------------|-----------------------|-----------------------|----------------------------------------------------------------------------| -| Cors Policy | 0.12.0 | Yes | Yagna is able to respond with Cors headers. [Spec](./capabilities/cors.md) | +| Capability | Yagna package version | Backwards-compatible? | Description | +|----------------------------|-----------------------|-----------------------|---------------------------------------------------------------------------------------------------| +| Cors Policy | 0.12.0 | Yes | Yagna is able to respond with Cors headers. [Spec](./capabilities/cors.md) | +| Command progress reporting | (not released) | Yes | ExeUnit can report progress of executed commands. [Specification](./exe-unit/command-progress.md) | \ No newline at end of file diff --git a/exe-unit/Cargo.toml b/exe-unit/Cargo.toml index eb89c0edfe..9d7bfa0b45 100644 --- a/exe-unit/Cargo.toml +++ b/exe-unit/Cargo.toml @@ -1,8 +1,8 @@ [package] -name = "ya-exe-unit" -version = "0.4.0" authors = ["Golem Factory "] edition = "2018" +name = "ya-exe-unit" +version = "0.4.0" [lib] name = "ya_exe_unit" @@ -13,64 +13,66 @@ name = "exe-unit" path = "src/bin.rs" [features] -default = ['compat-deployment'] compat-deployment = [] +default = ['compat-deployment'] +packet-trace-enable = ["ya-packet-trace/enable"] sgx = [ - 'graphene-sgx', - 'openssl/vendored', - 'secp256k1/rand', - 'ya-client-model/sgx', - 'ya-core-model/sgx', - 'ya-transfer/sgx', + 'graphene-sgx', + 'openssl/vendored', + 'secp256k1/rand', + 'ya-client-model/sgx', + 'ya-core-model/sgx', + 'ya-transfer/sgx', ] -packet-trace-enable = ["ya-packet-trace/enable"] -framework-test = ["ya-transfer/framework-test"] +framework-test = [] [dependencies] -ya-agreement-utils = { workspace = true } -ya-manifest-utils = { version = "0.2" } +ya-agreement-utils = {workspace = true} ya-client-model.workspace = true -ya-compile-time-utils = "0.2" -ya-core-model = { workspace = true, features = ["activity", "appkey"] } -ya-runtime-api = { version = "0.7", path = "runtime-api", features = [ - "server", -] } -ya-service-bus = { workspace = true } -ya-transfer = "0.3" -ya-utils-path = "0.1" +ya-compile-time-utils.workspace = true +ya-core-model = {workspace = true, features = ["activity", "appkey"]} +ya-counters = {path = "./components/counters", features = ["os"]} +ya-gsb-http-proxy = {path = "../exe-unit/components/gsb-http-proxy"} +ya-manifest-utils.workspace = true +ya-packet-trace = {git = "https://github.com/golemfactory/ya-packet-trace"} +ya-runtime-api = {version = "0.7", path = "runtime-api", features = [ + "server", +]} +ya-service-bus = {workspace = true} +ya-std-utils.workspace = true +ya-transfer.path = "components/transfer" ya-utils-futures.workspace = true -ya-std-utils = "0.1" -ya-utils-networking = { version = "0.2", default-features = false, features = [ +ya-utils-networking = { workspace = true, default-features = false, features = [ "dns", "vpn", ] } -ya-utils-process = "0.3" -ya-packet-trace = { git = "https://github.com/golemfactory/ya-packet-trace" } -ya-gsb-http-proxy = { path = "../exe-unit/components/gsb-http-proxy" } -ya-counters = { path = "../exe-unit/components/counters", features = ["os"] } +ya-utils-process.workspace = true +ya-utils-path.workspace = true + -actix = { version = "0.13", default-features = false } + +actix = {version = "0.13", default-features = false} actix-rt = "2.7" anyhow = "1.0" async-trait = "0.1.24" bytes = "1" chrono = "0.4" derivative = "2.1" -derive_more = { workspace = true } +derive_more = {workspace = true} dotenv = "0.15.0" -flexi_logger = { version = "0.22", features = ["colors"] } +flexi_logger = {version = "0.22", features = ["colors"]} futures = "0.3" -graphene-sgx = { version = "0.3.3", optional = true } +graphene-sgx = {version = "0.3.3", optional = true} hex = "0.4.2" ipnet = "2.3" lazy_static = "1.4.0" log = "0.4" -openssl = { workspace = true, optional = true } +openssl = {workspace = true, optional = true} rand = "0.8.5" regex = "1.5" -reqwest = { version = "0.11", optional = false, features = ["stream"] } -secp256k1 = { version = "0.27.0", optional = true } -serde = { version = "^1.0", features = ["derive"] } +reqwest = {version = "0.11", optional = false, features = ["stream"]} +secp256k1 = {version = "0.27.0", optional = true} +serde = {version = "^1.0", features = ["derive"]} serde_json = "1.0" serde_yaml = "0.8" sha3 = "0.10" @@ -79,34 +81,39 @@ socket2 = "0.4" structopt = "0.3" thiserror = "1.0" # keep the "rt-multi-thread" feature -tokio = { version = "1", features = [ - "process", - "signal", - "time", - "net", - "rt-multi-thread", -] } -tokio-util = { version = "0.7.2", features = ["codec", "net"] } -tokio-stream = { version = "0.1.8", features = ["io-util", "sync"] } +async-stream = "0.3.5" +tokio = {version = "1", features = [ + "process", + "signal", + "time", + "net", + "rt-multi-thread", +]} +tokio-stream = {version = "0.1.8", features = ["io-util", "sync"]} +tokio-util = {version = "0.7.2", features = ["codec", "net"]} +trust-dns-resolver = {workspace = true} url = "2.1" yansi = "0.5.0" -trust-dns-resolver = { workspace = true } -async-stream = "0.3.5" [dev-dependencies] -ya-runtime-api = { version = "0.7", path = "runtime-api", features = [ - "codec", - "server", -] } -ya-sb-router = { workspace = true } +ya-runtime-api = {version = "0.7", path = "runtime-api", features = [ + "codec", + "server", +]} +ya-sb-router = {workspace = true} actix-files = "0.6" actix-web = "4" env_logger = "0.7" rustyline = "7.0.0" +serial_test = {git = "https://github.com/tworec/serial_test.git", branch = "actix_rt_test", features = ["actix-rt2"]} sha3 = "0.10" shell-words = "1.0.0" tempdir = "0.3.7" +test-context = "0.1.4" + +ya-framework-basic.workspace = true +ya-mock-runtime = {path = "components/mock-runtime"} [lints] workspace = true diff --git a/exe-unit/components/counters/Cargo.toml b/exe-unit/components/counters/Cargo.toml index 585fcb827a..f6cff54e44 100644 --- a/exe-unit/components/counters/Cargo.toml +++ b/exe-unit/components/counters/Cargo.toml @@ -6,7 +6,7 @@ description = "Runtime counters generating data used to collect activity metrics [dependencies] ya-agreement-utils = { workspace = true } -ya-utils-process = { version = "0.3", optional = true } +ya-utils-process = { workspace = true, optional = true } anyhow = "1.0" actix = "0.13" diff --git a/exe-unit/components/gsb-http-proxy/Cargo.toml b/exe-unit/components/gsb-http-proxy/Cargo.toml index 812d35ead0..4966d846e8 100644 --- a/exe-unit/components/gsb-http-proxy/Cargo.toml +++ b/exe-unit/components/gsb-http-proxy/Cargo.toml @@ -43,5 +43,5 @@ mockito = "1.2" serial_test = { git = "https://github.com/tworec/serial_test.git", branch = "actix_rt_test", features = ["actix-rt2"] } test-context = "0.1.4" -ya-framework-basic = { version = "0.1" } +ya-framework-basic.workspace = true ya-sb-router = "0.6.2" diff --git a/exe-unit/components/gsb-http-proxy/tests/test_proxy.rs b/exe-unit/components/gsb-http-proxy/tests/test_proxy.rs index 084485bd5e..c0a13e7d01 100644 --- a/exe-unit/components/gsb-http-proxy/tests/test_proxy.rs +++ b/exe-unit/components/gsb-http-proxy/tests/test_proxy.rs @@ -8,7 +8,6 @@ use ya_framework_basic::async_drop::DroppableTestContext; use ya_gsb_http_proxy::gsb_to_http::GsbToHttpProxy; use ya_gsb_http_proxy::http_to_gsb::{BindingMode, HttpToGsbProxy}; -#[cfg_attr(not(feature = "framework-test"), ignore)] #[test_context(DroppableTestContext)] #[serial_test::serial] pub async fn test_gsb_http_proxy(ctx: &mut DroppableTestContext) { @@ -29,7 +28,6 @@ pub async fn test_gsb_http_proxy(ctx: &mut DroppableTestContext) { assert_eq!(r, "correct"); } -#[cfg_attr(not(feature = "framework-test"), ignore)] #[test_context(DroppableTestContext)] #[serial_test::serial] pub async fn test_gsb_http_streaming_proxy(ctx: &mut DroppableTestContext) { diff --git a/exe-unit/components/mock-runtime/Cargo.toml b/exe-unit/components/mock-runtime/Cargo.toml new file mode 100644 index 0000000000..e35a2ab04a --- /dev/null +++ b/exe-unit/components/mock-runtime/Cargo.toml @@ -0,0 +1,43 @@ +[package] +authors = ["Golem Factory "] +description = "Mock runtime for testing purposes and set of libraries for testing ExeUnits in tests." +edition = "2021" +name = "ya-mock-runtime" +version = "0.1.0" + +[lib] +path = "src/lib.rs" + +[[bin]] +name = "ya-mock-runtime" +path = "src/main.rs" + +[dependencies] +anyhow = "1.0.31" +bytes = "1.0" +env_logger = "0.10" +futures = {version = "0.3"} +log = "0.4" +serde = {version = "1.0", features = ["derive"]} +serde_json = "1.0" +thiserror = "1.0" +tokio = {version = "1", features = ["io-std", "rt", "process", "sync", "macros", "time"]} +tokio-util = {version = "0.7", features = ["codec"]} +url = "2.3" + +ya-runtime-api = "0.7" + +# Dependancies for ExeUnit testing utils +ya-client-model.workspace = true +ya-core-model = {workspace = true, features = ["activity", "appkey"]} +ya-exe-unit.workspace = true +ya-framework-basic.workspace = true +ya-sb-router.workspace = true +ya-service-bus = {workspace = true} + +actix = {version = "0.13", default-features = false} +async-trait = "0.1.77" +hex = "0.4.3" +portpicker = "0.1.1" +rand = "0.8.5" +uuid = {version = "0.8.2", features = ["v4"]} diff --git a/exe-unit/components/mock-runtime/resources/mock-runtime-descriptor.json b/exe-unit/components/mock-runtime/resources/mock-runtime-descriptor.json new file mode 100644 index 0000000000..93f43f2d54 --- /dev/null +++ b/exe-unit/components/mock-runtime/resources/mock-runtime-descriptor.json @@ -0,0 +1,9 @@ +[ + { + "name": "ya-mock-runtime", + "version": "0.1.0", + "supervisor-path": "exe-unit", + "runtime-path": "ya-mock-runtime", + "description": "Mock runtime for testing purposes" + } +] \ No newline at end of file diff --git a/exe-unit/runtime-api/examples/runtime-server-mock.rs b/exe-unit/components/mock-runtime/src/lib.rs similarity index 56% rename from exe-unit/runtime-api/examples/runtime-server-mock.rs rename to exe-unit/components/mock-runtime/src/lib.rs index 496b741a17..ee9ed1bab4 100644 --- a/exe-unit/runtime-api/examples/runtime-server-mock.rs +++ b/exe-unit/components/mock-runtime/src/lib.rs @@ -1,20 +1,19 @@ +pub mod testing; + use futures::future::BoxFuture; use futures::prelude::*; use futures::FutureExt; use std::clone::Clone; -use std::env; use std::sync::{Arc, Mutex}; use std::time::Duration; use ya_runtime_api::server::*; -// server - -struct RuntimeMock +pub struct RuntimeMock where H: RuntimeHandler, { - handler: H, + pub handler: H, } impl RuntimeService for RuntimeMock { @@ -61,14 +60,15 @@ impl RuntimeService for RuntimeMock { // client // holds last received status -struct EventMock(Arc>); +#[derive(Clone, Default)] +pub struct EventMock(Arc>); impl EventMock { - fn new() -> Self { - Self(Arc::new(Mutex::new(Default::default()))) + pub fn new() -> Self { + Self::default() } - fn get_last_status(&self) -> ProcessStatus { + pub fn get_last_status(&self) -> ProcessStatus { self.0.lock().unwrap().clone() } } @@ -84,46 +84,3 @@ impl RuntimeHandler for EventMock { future::ready(()).boxed() } } - -impl Clone for EventMock { - fn clone(&self) -> Self { - Self(self.0.clone()) - } -} - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - if env::var("RUST_LOG").is_err() { - env::set_var("RUST_LOG", "debug") - } - env_logger::init(); - if env::var("X_SERVER").is_ok() { - run(|event_emitter| RuntimeMock { - handler: event_emitter, - }) - .await - } else { - use tokio::process::Command; - let exe = env::current_exe().unwrap(); - - let mut cmd = Command::new(exe); - cmd.env("X_SERVER", "1"); - let events = EventMock::new(); - let c = spawn(cmd, events.clone()).await?; - log::debug!("hello_result={:?}", c.hello("0.0.0x").await); - let run = RunProcess { - bin: "sleep".to_owned(), - args: vec!["10".to_owned()], - ..Default::default() - }; - let sleep_1 = c.run_process(run.clone()); - let sleep_2 = c.run_process(run.clone()); - let sleep_3 = c.run_process(run); - log::info!("start sleep1"); - log::info!("sleep1={:?}", sleep_1.await); - log::info!("start sleep2 sleep3"); - log::info!("sleep23={:?}", future::join(sleep_2, sleep_3).await); - log::info!("last status: {:?}", events.get_last_status()); - } - Ok(()) -} diff --git a/exe-unit/components/mock-runtime/src/main.rs b/exe-unit/components/mock-runtime/src/main.rs new file mode 100644 index 0000000000..509263c203 --- /dev/null +++ b/exe-unit/components/mock-runtime/src/main.rs @@ -0,0 +1,42 @@ +use futures::future; +use std::env; + +use ya_mock_runtime::{EventMock, RuntimeMock}; +use ya_runtime_api::server::{run, spawn, RunProcess, RuntimeService}; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + if env::var("RUST_LOG").is_err() { + env::set_var("RUST_LOG", "debug") + } + env_logger::init(); + if env::var("X_SERVER").is_ok() { + run(|event_emitter| RuntimeMock { + handler: event_emitter, + }) + .await + } else { + use tokio::process::Command; + let exe = env::current_exe().unwrap(); + + let mut cmd = Command::new(exe); + cmd.env("X_SERVER", "1"); + let events = EventMock::new(); + let c = spawn(cmd, events.clone()).await?; + log::debug!("hello_result={:?}", c.hello("0.0.0x").await); + let run = RunProcess { + bin: "sleep".to_owned(), + args: vec!["10".to_owned()], + ..Default::default() + }; + let sleep_1 = c.run_process(run.clone()); + let sleep_2 = c.run_process(run.clone()); + let sleep_3 = c.run_process(run); + log::info!("start sleep1"); + log::info!("sleep1={:?}", sleep_1.await); + log::info!("start sleep2 sleep3"); + log::info!("sleep23={:?}", future::join(sleep_2, sleep_3).await); + log::info!("last status: {:?}", events.get_last_status()); + } + Ok(()) +} diff --git a/exe-unit/components/mock-runtime/src/testing.rs b/exe-unit/components/mock-runtime/src/testing.rs new file mode 100644 index 0000000000..7b2c833428 --- /dev/null +++ b/exe-unit/components/mock-runtime/src/testing.rs @@ -0,0 +1,3 @@ +mod exe_unit_ext; + +pub use exe_unit_ext::{create_exe_unit, exe_unit_config, ExeUnitExt, ExeUnitHandle}; diff --git a/exe-unit/components/mock-runtime/src/testing/exe_unit_ext.rs b/exe-unit/components/mock-runtime/src/testing/exe_unit_ext.rs new file mode 100644 index 0000000000..cebfc03cda --- /dev/null +++ b/exe-unit/components/mock-runtime/src/testing/exe_unit_ext.rs @@ -0,0 +1,238 @@ +use actix::Addr; +use anyhow::{anyhow, bail}; +use std::fs; +use std::path::{Path, PathBuf}; +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::broadcast; +use url::Url; +use uuid::Uuid; + +use ya_client_model::activity::exe_script_command::ProgressArgs; +use ya_client_model::activity::{ExeScriptCommand, State, StatePair}; +use ya_core_model::activity; +use ya_exe_unit::message::{GetBatchResults, GetState, GetStateResponse, Shutdown, ShutdownReason}; +use ya_exe_unit::runtime::process::RuntimeProcess; +use ya_exe_unit::{exe_unit, ExeUnit, ExeUnitConfig, FinishNotifier, RunArgs, SuperviseCli}; +use ya_framework_basic::async_drop::{AsyncDroppable, DroppableTestContext}; +use ya_service_bus::RpcEnvelope; + +#[async_trait::async_trait] +pub trait ExeUnitExt { + async fn exec( + &self, + batch_id: Option, + exe_script: Vec, + ) -> anyhow::Result; + + async fn deploy(&self, progress: Option) -> anyhow::Result; + async fn start(&self, args: Vec) -> anyhow::Result; + + async fn wait_for_batch(&self, batch_id: &str) -> anyhow::Result<()>; + + /// Waits until ExeUnit will be ready to receive commands. + async fn await_init(&self) -> anyhow::Result<()>; +} + +#[derive(Debug, Clone)] +pub struct ExeUnitHandle { + pub addr: Addr>, + pub config: Arc, +} + +impl ExeUnitHandle { + pub fn new( + addr: Addr>, + config: ExeUnitConfig, + ) -> anyhow::Result { + Ok(ExeUnitHandle { + addr, + config: Arc::new(config), + }) + } + + pub async fn finish_notifier(&self) -> anyhow::Result> { + Ok(self.addr.send(FinishNotifier {}).await??) + } + + pub async fn shutdown(&self) -> anyhow::Result<()> { + let finish = self.finish_notifier().await; + + log::info!("Waiting for shutdown.."); + + self.addr + .send(Shutdown(ShutdownReason::Finished)) + .await + .ok(); + if let Ok(mut finish) = finish { + finish.recv().await?; + } + Ok(()) + } +} + +#[async_trait::async_trait] +impl AsyncDroppable for ExeUnitHandle { + async fn async_drop(&self) { + self.shutdown().await.ok(); + } +} + +pub fn exe_unit_config( + temp_dir: &Path, + agreement_path: &Path, + binary: impl AsRef, +) -> ExeUnitConfig { + ExeUnitConfig { + args: RunArgs { + agreement: agreement_path.to_path_buf(), + cache_dir: temp_dir.join("cache"), + work_dir: temp_dir.join("work"), + }, + binary: binary.as_ref().to_path_buf(), + runtime_args: vec![], + supervise: SuperviseCli { + hardware: false, + image: false, + }, + sec_key: None, + requestor_pub_key: None, + service_id: Some(Uuid::new_v4().to_simple().to_string()), + report_url: None, + } +} + +pub async fn create_exe_unit( + config: ExeUnitConfig, + ctx: &mut DroppableTestContext, +) -> anyhow::Result { + // If activity id was provided, ExeUnit will bind endpoints on remote GSB. + // For this to work we need to setup gsb router. + if config.service_id.is_some() { + let gsb_url = match std::env::consts::FAMILY { + "unix" => Url::from_str(&format!( + "unix://{}/gsb.sock", + config.args.work_dir.display() + ))?, + _ => Url::from_str(&format!( + "tcp://127.0.0.1:{}", + portpicker::pick_unused_port().ok_or(anyhow!("No ports free"))? + ))?, + }; + + if gsb_url.scheme() == "unix" { + let dir = PathBuf::from_str(gsb_url.path())? + .parent() + .map(|path| path.to_path_buf()) + .ok_or(anyhow!("`gsb_url` unix socket has no parent directory."))?; + fs::create_dir_all(dir)?; + } + + // GSB takes url from this variable and we can't set it directly. + std::env::set_var("GSB_URL", gsb_url.to_string()); + ya_sb_router::bind_gsb_router(Some(gsb_url.clone())) + .await + .map_err(|e| anyhow!("Error binding service bus router to '{}': {e}", &gsb_url))?; + } + + let exe = exe_unit(config.clone()).await.unwrap(); + let handle = ExeUnitHandle::new(exe, config)?; + ctx.register(handle.clone()); + Ok(handle) +} + +#[async_trait::async_trait] +impl ExeUnitExt for ExeUnitHandle { + async fn exec( + &self, + batch_id: Option, + exe_script: Vec, + ) -> anyhow::Result { + log::debug!("Executing commands: {:?}", exe_script); + + let batch_id = if let Some(batch_id) = batch_id { + batch_id + } else { + hex::encode(rand::random::<[u8; 16]>()) + }; + + let msg = activity::Exec { + activity_id: self.config.service_id.clone().unwrap_or_default(), + batch_id: batch_id.clone(), + exe_script, + timeout: None, + }; + self.addr + .send(RpcEnvelope::with_caller(String::new(), msg)) + .await + .map_err(|e| anyhow!("Unable to execute exe script: {e:?}"))? + .map_err(|e| anyhow!("Unable to execute exe script: {e:?}"))?; + Ok(batch_id) + } + + async fn deploy(&self, progress: Option) -> anyhow::Result { + Ok(self + .exec( + None, + vec![ExeScriptCommand::Deploy { + net: vec![], + progress, + env: Default::default(), + hosts: Default::default(), + hostname: None, + volumes: None, + }], + ) + .await + .unwrap()) + } + + async fn start(&self, args: Vec) -> anyhow::Result { + Ok(self + .exec(None, vec![ExeScriptCommand::Start { args }]) + .await + .unwrap()) + } + + async fn wait_for_batch(&self, batch_id: &str) -> anyhow::Result<()> { + let delay = Duration::from_secs_f32(0.5); + loop { + match self + .addr + .send(GetBatchResults { + batch_id: batch_id.to_string(), + idx: None, + }) + .await + { + Ok(results) => { + if let Some(last) = results.0.last() { + if last.is_batch_finished { + return Ok(()); + } + } + } + Err(e) => bail!("Waiting for batch: {batch_id}. Error: {e}"), + } + tokio::time::sleep(delay).await; + } + } + + async fn await_init(&self) -> anyhow::Result<()> { + let delay = Duration::from_secs_f32(0.3); + loop { + match self.addr.send(GetState).await { + Ok(GetStateResponse(StatePair(State::Initialized, None))) => break, + Ok(GetStateResponse(StatePair(State::Terminated, _))) + | Ok(GetStateResponse(StatePair(_, Some(State::Terminated)))) + | Err(_) => { + log::error!("ExeUnit has terminated"); + bail!("ExeUnit has terminated"); + } + _ => tokio::time::sleep(delay).await, + } + } + Ok(()) + } +} diff --git a/exe-unit/components/transfer/Cargo.toml b/exe-unit/components/transfer/Cargo.toml index f33a27f60b..239f6bf265 100644 --- a/exe-unit/components/transfer/Cargo.toml +++ b/exe-unit/components/transfer/Cargo.toml @@ -6,9 +6,8 @@ authors = ["Golem Factory "] edition = "2018" [dependencies] -ya-client = { workspace = true } -ya-client-model = { workspace = true } -ya-core-model = { workspace = true, features = ["gftp"] } +ya-client-model.workspace = true +ya-core-model.workspace = true ya-service-bus = { workspace = true } ya-utils-path = { version = "0.1", path = "../../../utils/path" } ya-utils-futures.workspace = true @@ -46,6 +45,7 @@ tokio-util = { version = "0.7", features = ["io"] } url = "2.1.1" walkdir = "2.3.1" async-trait = "0.1.74" +tokio-stream = { version = "0.1.14", features = ["sync"] } [target.'cfg(target_family = "unix")'.dependencies] awc = { version = "3", features = ["openssl"] } @@ -62,7 +62,7 @@ sgx = [ 'ya-core-model/sgx', 'reqwest/trust-dns', ] -framework-test = [] +system-test = [] [dependencies.zip] version = "0.5.6" @@ -84,5 +84,5 @@ structopt = "0.3.15" test-context = "0.1.4" test-case = "3" -ya-framework-basic = { version = "0.1" } +ya-framework-basic.workspace = true ya-exe-unit = { version = "0.4", path = "../../../exe-unit" } diff --git a/exe-unit/components/transfer/src/http.rs b/exe-unit/components/transfer/src/http.rs index 97bb1b2026..03e1acfd7e 100644 --- a/exe-unit/components/transfer/src/http.rs +++ b/exe-unit/components/transfer/src/http.rs @@ -119,10 +119,6 @@ impl TransferProvider for HttpTransferProvider { url: &Url, ctx: &TransferContext, ) -> LocalBoxFuture<'a, Result<(), Error>> { - if ctx.state.offset() == 0 { - return futures::future::ok(()).boxed_local(); - } - let url = url.clone(); let state = ctx.state.clone(); @@ -137,8 +133,13 @@ impl TransferProvider for HttpTransferProvider { .get(header::CONTENT_LENGTH) .and_then(|v| v.to_str().ok().and_then(|s| u64::from_str(s).ok())); + match &size { + None => log::info!("File size unknown. Http source server didn't respond with CONTENT_LENGTH header."), + Some(size) => log::info!("Http source size reported by server: {size} B"), + }; + state.set_size(size); - if !ranges { + if state.offset() != 0 && !ranges { log::warn!("Transfer resuming is not supported by the server"); state.set_offset(0); } diff --git a/exe-unit/components/transfer/src/lib.rs b/exe-unit/components/transfer/src/lib.rs index 34e055680a..4cca1ff186 100644 --- a/exe-unit/components/transfer/src/lib.rs +++ b/exe-unit/components/transfer/src/lib.rs @@ -25,19 +25,19 @@ use futures::prelude::*; use futures::task::{Context, Poll}; use url::Url; -use crate::error::Error; - pub use crate::archive::{archive, extract, ArchiveFormat}; pub use crate::container::ContainerTransferProvider; +use crate::error::Error; pub use crate::file::{DirTransferProvider, FileTransferProvider}; pub use crate::gftp::GftpTransferProvider; +use crate::hash::with_hash_stream; pub use crate::http::HttpTransferProvider; pub use crate::location::{TransferUrl, UrlExt}; +use crate::progress::{progress_report_channel, ProgressReporter}; pub use crate::progress::{wrap_sink_with_progress_reporting, wrap_stream_with_progress_reporting}; pub use crate::retry::Retry; pub use crate::traverse::PathTraverse; -use crate::hash::with_hash_stream; use ya_client_model::activity::TransferArgs; /// Transfers data from `stream` to a `TransferSink` @@ -73,17 +73,22 @@ where log::debug!("Transferring from offset: {}", ctx.state.offset()); let stream = with_hash_stream(src.source(&src_url.url, ctx), src_url, dst_url, ctx)?; - let sink = dst.destination(&dst_url.url, ctx); + let sink = progress_report_channel(dst.destination(&dst_url.url, ctx), ctx); transfer(stream, sink).await?; Ok::<_, Error>(()) }; match fut.await { - Ok(val) => return Ok(val), + Ok(val) => { + return Ok(val); + } Err(err) => match ctx.state.delay(&err) { Some(delay) => { - log::warn!("Retrying in {}s because: {}", delay.as_secs_f32(), err); + let msg = format!("Retry in {}s because of error: {err}", delay.as_secs_f32()); + log::warn!("{}", msg); + + ctx.progress.report_message(msg); tokio::time::sleep(delay).await; } None => return Err(err), @@ -296,6 +301,7 @@ impl From> for TransferData { pub struct TransferContext { pub state: TransferState, pub args: TransferArgs, + pub progress: ProgressReporter, } impl TransferContext { @@ -304,7 +310,15 @@ impl TransferContext { let state = TransferState::default(); state.set_offset(offset); - Self { args, state } + Self { + args, + state, + progress: ProgressReporter::default(), + } + } + + pub fn reporter(&self) -> ProgressReporter { + self.progress.clone() } } diff --git a/exe-unit/components/transfer/src/progress.rs b/exe-unit/components/transfer/src/progress.rs index 71edaa7419..03f1d6fd04 100644 --- a/exe-unit/components/transfer/src/progress.rs +++ b/exe-unit/components/transfer/src/progress.rs @@ -1,11 +1,112 @@ use crate::error::Error; use crate::{abortable_sink, abortable_stream, TransferSink, TransferStream}; use crate::{TransferContext, TransferData}; + use futures::{SinkExt, StreamExt, TryFutureExt}; +use std::sync::Arc; +use std::time::Duration; use tokio::task::spawn_local; +use tokio::time::Instant; + +use ya_client_model::activity::exe_script_command::ProgressArgs; +use ya_client_model::activity::CommandProgress; type Stream = TransferStream; +#[derive(Debug, Clone)] +pub struct ProgressConfig { + /// Channel for watching for transfer progress. + pub progress: tokio::sync::broadcast::Sender, + pub progress_args: ProgressArgs, +} + +#[derive(Default, Clone)] +pub struct ProgressReporter { + config: ProgressArgs, + inner: Arc>>, +} + +struct ProgressImpl { + pub report: tokio::sync::broadcast::Sender, + pub last: CommandProgress, + pub last_send: Instant, +} + +impl ProgressReporter { + pub fn next_step(&self) { + self.inner.lock().unwrap().as_mut().map(|inner| { + inner.last.step.0 += 1; + inner.last.progress = (0, None); + inner.last_send = Instant::now(); + inner + .report + .send(CommandProgress { + message: None, + ..inner.last.clone() + }) + .ok() + }); + } + + /// TODO: implement `update_step` + pub fn report_progress(&self, progress: u64, size: Option) { + let update_interval: Duration = self + .config + .update_interval + .map(Into::into) + .unwrap_or(Duration::from_secs(1)); + let _update_step = self.config.update_step; + + if let Some(inner) = self.inner.lock().unwrap().as_mut() { + inner.last.progress = (progress, size); + if inner.last_send + update_interval <= Instant::now() { + inner.last_send = Instant::now(); + inner + .report + .send(CommandProgress { + message: None, + ..inner.last.clone() + }) + .ok(); + } + } + } + + pub fn report_message(&self, message: String) { + self.inner.lock().unwrap().as_mut().map(|inner| { + inner.last_send = Instant::now(); + inner + .report + .send(CommandProgress { + message: Some(message), + ..inner.last.clone() + }) + .ok() + }); + } + + pub fn register_reporter( + &mut self, + args: Option, + steps: usize, + unit: Option, + ) { + if let Some(args) = args { + self.config = args.progress_args; + *(self.inner.lock().unwrap()) = Some(ProgressImpl { + report: args.progress, + last: CommandProgress { + step: (0, steps), + message: None, + progress: (0, None), + unit, + }, + last_send: Instant::now(), + }); + } + } +} + /// Wraps a stream to report progress. /// The `report` function is called with the current offset and the total size. /// The total size is 0 if the size is unknown. (For example, when the source is a directory.) @@ -48,6 +149,13 @@ where type Sink = TransferSink; +pub fn progress_report_channel(dest: Sink, ctx: &TransferContext) -> Sink { + let report = ctx.reporter(); + wrap_sink_with_progress_reporting(dest, ctx, move |progress, size| { + report.report_progress(progress, size) + }) +} + /// Wraps a sink to report progress. /// The `report` function is called with the current offset and the total size. /// The total size is 0 if the size is unknown. (For example, when the source is a directory.) @@ -91,3 +199,98 @@ where sink } + +#[cfg(test)] +mod tests { + use super::*; + use std::time::Instant; + + use tokio::time::Duration; + + #[actix_rt::test] + async fn test_progress_reporter_interval() { + // Note: This test is time dependent and you can expect it to fail on very slow machines. + // If this happens, you could scale intervals by increasing `loop_interval`. Rather you + // shouldn't touch relations between these variables, if you don't know what are you doing. + let loop_interval = 200u64; + let update_interval = 10 * loop_interval; + let offset = loop_interval / 2; + let margin = loop_interval * 8 / 10; + + let mut report = ProgressReporter::default(); + let (tx, mut rx) = tokio::sync::broadcast::channel(10); + report.register_reporter( + Some(ProgressConfig { + progress: tx, + progress_args: ProgressArgs { + update_interval: Some(Duration::from_millis(update_interval)), + update_step: None, + }, + }), + 2, + Some("Bytes".to_string()), + ); + + let size = 200; + let mut before = Instant::now(); + tokio::task::spawn_local(async move { + for step in 0..2 { + tokio::time::sleep(Duration::from_millis(offset)).await; + + for i in 0..=size { + report.report_progress(i, Some(size)); + tokio::time::sleep(Duration::from_millis(loop_interval)).await; + } + if step == 0 { + report.next_step(); + } + } + report.report_message("Finished".to_string()); + }); + + let mut counter = 0; + let mut step = 0; + while let Ok(event) = rx.recv().await { + //println!("{event:?}"); + + counter += 1; + let update = Instant::now().duration_since(before); + before = Instant::now(); + let diff = if update > Duration::from_millis(update_interval + offset) { + update - Duration::from_millis(update_interval + offset) + } else { + Duration::from_millis(update_interval + offset) - update + }; + + assert!(diff <= Duration::from_millis(margin)); + + // `ProgressReporter` should ignore 10 messages in each loop. + assert_eq!(event.progress.0, counter * 10); + assert_eq!(event.progress.1, Some(size)); + assert_eq!(event.step, (step, 2)); + assert_eq!(event.unit, Some("Bytes".to_string())); + assert_eq!(event.message, None); + + if counter == 20 { + if step == 1 { + break; + } + + counter = 0; + step += 1; + + // Skip step change event + rx.recv().await.unwrap(); + before = Instant::now(); + } + } + + // Reporting message will result in event containing progress adn step from previous event. + let last = rx.recv().await.unwrap(); + //println!("{last:?}"); + assert_eq!(last.message, Some("Finished".to_string())); + assert_eq!(last.progress.0, size); + assert_eq!(last.progress.1, Some(size)); + assert_eq!(last.step, (1, 2)); + } +} diff --git a/exe-unit/components/transfer/src/transfer.rs b/exe-unit/components/transfer/src/transfer.rs index d3eb77e7f4..8d4bd9503c 100644 --- a/exe-unit/components/transfer/src/transfer.rs +++ b/exe-unit/components/transfer/src/transfer.rs @@ -5,16 +5,20 @@ use std::rc::Rc; use actix::prelude::*; use futures::future::Abortable; +use futures::{Sink, StreamExt, TryStreamExt}; use url::Url; use crate::cache::{Cache, CachePath}; use crate::error::Error; use crate::error::Error as TransferError; +pub use crate::progress::ProgressConfig; use crate::{ transfer_with, ContainerTransferProvider, FileTransferProvider, GftpTransferProvider, HttpTransferProvider, Retry, TransferContext, TransferData, TransferProvider, TransferUrl, }; +use ya_client_model::activity::exe_script_command::ProgressArgs; +pub use ya_client_model::activity::CommandProgress; use ya_client_model::activity::TransferArgs; use ya_runtime_api::deploy::ContainerVolume; use ya_utils_futures::abort::Abort; @@ -35,12 +39,14 @@ macro_rules! actor_try { }; } -#[derive(Clone, Debug, Message)] +#[derive(Debug, Message, Default)] #[rtype(result = "Result<()>")] pub struct TransferResource { pub from: String, pub to: String, pub args: TransferArgs, + /// Progress reporting configuration. `None` means that there will be no progress updates. + pub progress_config: Option, } #[derive(Message)] @@ -53,10 +59,92 @@ impl AddVolumes { } } -#[derive(Clone, Debug, Message)] +#[derive(Debug, Message, Default)] #[rtype(result = "Result>")] pub struct DeployImage { pub task_package: Option, + /// Progress reporting configuration. `None` means that there will be no progress updates. + pub progress_config: Option, +} + +impl DeployImage { + pub fn with_package(task_package: &str) -> DeployImage { + DeployImage { + task_package: Some(task_package.to_string()), + progress_config: None, + } + } +} + +pub trait ForwardProgressToSink { + fn progress_config_mut(&mut self) -> &mut Option; + + fn forward_progress( + &mut self, + args: &ProgressArgs, + sender: impl Sink + 'static, + ) { + let progress_args = self.progress_config_mut(); + let rx = match progress_args { + None => { + let (tx, rx) = tokio::sync::broadcast::channel(50); + *progress_args = Some(ProgressConfig { + progress: tx, + progress_args: args.clone(), + }); + rx + } + Some(args) => args.progress.subscribe(), + }; + + tokio::task::spawn_local(async move { + tokio_stream::wrappers::BroadcastStream::new(rx) + .map_err(|e| Error::Other(e.to_string())) + .forward(sender) + .await + .ok() + }); + } +} + +impl ForwardProgressToSink for DeployImage { + fn progress_config_mut(&mut self) -> &mut Option { + &mut self.progress_config + } +} + +impl ForwardProgressToSink for TransferResource { + fn progress_config_mut(&mut self) -> &mut Option { + &mut self.progress_config + } +} + +impl DeployImage { + pub fn forward_progress( + &mut self, + args: &ProgressArgs, + sender: impl Sink + 'static, + ) { + let rx = match &self.progress_config { + None => { + let (tx, rx) = tokio::sync::broadcast::channel(50); + self.progress_config = Some(ProgressConfig { + progress: tx, + progress_args: args.clone(), + }); + rx + } + Some(args) => args.progress.subscribe(), + }; + + tokio::task::spawn_local(async move { + tokio_stream::wrappers::BroadcastStream::new(rx) + .map_err(|e| Error::Other(e.to_string())) + .forward(sender) + .await + .ok() + }); + } } #[derive(Clone, Debug, Message)] @@ -160,6 +248,7 @@ impl TransferService { src_url: TransferUrl, _src_name: CachePath, path: PathBuf, + _ctx: TransferContext, ) -> ActorResponse>> { let fut = async move { let resp = reqwest::get(src_url.url) @@ -181,6 +270,7 @@ impl TransferService { src_url: TransferUrl, src_name: CachePath, path: PathBuf, + ctx: TransferContext, ) -> ActorResponse>> { let path_tmp = self.cache.to_temp_path(&src_name).to_path_buf(); @@ -191,9 +281,6 @@ impl TransferService { hash: None, }; - let ctx = TransferContext::default(); - ctx.state.retry_with(self.deploy_retry.clone()); - // Using partially downloaded image from previous executions could speed up deploy // process, but it comes with the cost: If image under URL changed, Requestor will get // error on the end. This can result with Provider being perceived as unreliable. @@ -211,6 +298,8 @@ impl TransferService { let fut = async move { if path.exists() { log::info!("Deploying cached image: {:?}", path); + ctx.reporter() + .report_message("Deployed image from cache".to_string()); return Ok(Some(path)); } @@ -269,11 +358,16 @@ impl Handler for TransferService { log::info!("Deploying from {:?} to {:?}", src_url.url, path); + let mut ctx = TransferContext::default(); + ctx.state.retry_with(self.deploy_retry.clone()); + ctx.progress + .register_reporter(deploy.progress_config, 1, Some("Bytes".to_string())); + #[cfg(not(feature = "sgx"))] - return self.deploy_no_sgx(src_url, src_name, path); + return self.deploy_no_sgx(src_url, src_name, path, ctx); #[cfg(feature = "sgx")] - return self.deploy_sgx(src_url, src_name, path); + return self.deploy_sgx(src_url, src_name, path, ctx); } } @@ -286,8 +380,10 @@ impl Handler for TransferService { let src = actor_try!(self.provider(&src_url)); let dst = actor_try!(self.provider(&dst_url)); - let ctx = TransferContext::default(); + let mut ctx = TransferContext::default(); ctx.state.retry_with(self.transfer_retry.clone()); + ctx.progress + .register_reporter(msg.progress_config, 1, Some("Bytes".to_string())); let (abort, reg) = Abort::new_pair(); diff --git a/exe-unit/components/transfer/tests/test_deploy.rs b/exe-unit/components/transfer/tests/test_deploy.rs index 824dd2c12a..74d81d35f2 100644 --- a/exe-unit/components/transfer/tests/test_deploy.rs +++ b/exe-unit/components/transfer/tests/test_deploy.rs @@ -1,12 +1,17 @@ use actix::Actor; use digest::Digest; +use futures::channel::mpsc; +use futures::SinkExt; use std::env; use std::time::Duration; use test_context::test_context; use tokio::time::sleep; +use tokio_stream::StreamExt; +use ya_client_model::activity::exe_script_command::ProgressArgs; +use ya_client_model::activity::CommandProgress; use ya_framework_basic::async_drop::DroppableTestContext; -use ya_framework_basic::file::{generate_file_with_hash, generate_file_with_hasher}; +use ya_framework_basic::file::generate_random_file_with_hash; use ya_framework_basic::log::enable_logs; use ya_framework_basic::server_external::start_http; use ya_framework_basic::temp_dir; @@ -14,7 +19,7 @@ use ya_transfer::transfer::{AbortTransfers, DeployImage, TransferService, Transf /// When re-deploying image, `TransferService` should use partially downloaded image. /// Hash computations should be correct in both cases. -#[cfg_attr(not(feature = "framework-test"), ignore)] +#[cfg_attr(not(feature = "system-test"), ignore)] #[test_context(DroppableTestContext)] #[serial_test::serial] async fn test_deploy_image_restart(ctx: &mut DroppableTestContext) -> anyhow::Result<()> { @@ -32,10 +37,8 @@ async fn test_deploy_image_restart(ctx: &mut DroppableTestContext) -> anyhow::Re std::fs::create_dir_all(dir)?; } - let chunk_size = 4096_usize; - let chunk_count = 1024 * 10; - - let hash = generate_file_with_hash(temp_dir, "rnd", chunk_size, chunk_count); + let hash = + generate_random_file_with_hash::(temp_dir, "rnd", 4096_usize, 1024 * 10); log::debug!("Starting HTTP servers"); let path = temp_dir.to_path_buf(); @@ -43,10 +46,10 @@ async fn test_deploy_image_restart(ctx: &mut DroppableTestContext) -> anyhow::Re .await .expect("unable to start http servers"); - let task_package = Some(format!( + let task_package = format!( "hash://sha3:{}:http://127.0.0.1:8001/rnd", hex::encode(hash) - )); + ); log::debug!("Starting TransferService"); let exe_ctx = TransferServiceContext { @@ -65,29 +68,97 @@ async fn test_deploy_image_restart(ctx: &mut DroppableTestContext) -> anyhow::Re }); log::info!("[>>] Deployment with hash verification"); - let result = addr - .send(DeployImage { - task_package: task_package.clone(), - }) - .await?; + let result = addr.send(DeployImage::with_package(&task_package)).await?; log::info!("Deployment stopped"); assert!(result.is_err()); log::info!("Re-deploying the same image"); - addr.send(DeployImage { - task_package: task_package.clone(), - }) - .await??; + addr.send(DeployImage::with_package(&task_package)) + .await??; Ok(()) } -#[cfg_attr(not(feature = "framework-test"), ignore)] +#[cfg_attr(not(feature = "system-test"), ignore)] +#[test_context(DroppableTestContext)] +#[serial_test::serial] +async fn test_deploy_progress(ctx: &mut DroppableTestContext) -> anyhow::Result<()> { + enable_logs(false); + + let dir = temp_dir!("deploy-progress")?; + let temp_dir = dir.path(); + + log::debug!("Creating directories in: {}", temp_dir.display()); + let work_dir = temp_dir.join("work_dir"); + let cache_dir = temp_dir.join("cache_dir"); + let sub_dir = temp_dir.join("sub_dir"); + + for dir in [work_dir.clone(), cache_dir.clone(), sub_dir.clone()] { + std::fs::create_dir_all(dir)?; + } + + let chunk_size = 4096_usize; + let chunk_count = 1024; + let file_size = (chunk_size * chunk_count) as u64; + let hash = + generate_random_file_with_hash::(temp_dir, "rnd", chunk_size, chunk_count); + + log::debug!("Starting HTTP servers"); + let path = temp_dir.to_path_buf(); + start_http(ctx, path) + .await + .expect("unable to start http servers"); + + let task_package = format!( + "hash://sha3:{}:http://127.0.0.1:8001/rnd", + hex::encode(hash) + ); + + log::debug!("Starting TransferService"); + let exe_ctx = TransferServiceContext { + work_dir: work_dir.clone(), + cache_dir, + ..TransferServiceContext::default() + }; + let addr = TransferService::new(exe_ctx).start(); + + log::info!("[>>] Deployment with hash verification"); + let (tx, mut rx) = mpsc::channel::(15); + let mut msg = DeployImage::with_package(&task_package); + msg.forward_progress( + &ProgressArgs::default(), + tx.sink_map_err(|e| ya_transfer::error::Error::Other(e.to_string())), + ); + + tokio::task::spawn_local(async move { + let _result = addr.send(msg).await??; + log::info!("Deployment stopped"); + anyhow::Ok(()) + }); + + let mut last_progress = 0u64; + while let Some(progress) = rx.next().await { + assert_eq!(progress.progress.1.unwrap(), file_size); + assert!(progress.progress.0 >= last_progress); + + last_progress = progress.progress.0; + + log::info!( + "Progress: {}/{}", + progress.progress.0, + progress.progress.1.unwrap_or(0) + ); + } + + Ok(()) +} + +#[cfg_attr(not(feature = "system-test"), ignore)] #[test_context(DroppableTestContext)] #[serial_test::serial] async fn test_deploy_checksum(ctx: &mut DroppableTestContext) -> anyhow::Result<()> { - enable_logs(true); + enable_logs(false); let dir = temp_dir!("deploy-checksum")?; let temp_dir = dir.path(); @@ -107,7 +178,7 @@ async fn test_deploy_checksum(ctx: &mut DroppableTestContext) -> anyhow::Result< let chunk_size = 4096_usize; let chunk_count = 10; - hex::encode(generate_file_with_hasher::( + hex::encode(generate_random_file_with_hash::( temp_dir, name, chunk_size, @@ -154,11 +225,9 @@ async fn test_deploy_checksum(ctx: &mut DroppableTestContext) -> anyhow::Result< log::info!("[>>] Verifying deploy with {name}"); let hash_function = name.split('-').next().unwrap(); - let deploy = DeployImage { - task_package: Some(format!( - "hash://{hash_function}:{hash}:http://127.0.0.1:8001/{name}" - )), - }; + let deploy = DeployImage::with_package(&format!( + "hash://{hash_function}:{hash}:http://127.0.0.1:8001/{name}" + )); let result = addr.send(deploy).await; let path = result.unwrap().unwrap().unwrap(); diff --git a/exe-unit/components/transfer/tests/test_transfer_abort.rs b/exe-unit/components/transfer/tests/test_transfer_abort.rs index 69e864f2d7..12d2dc8f92 100644 --- a/exe-unit/components/transfer/tests/test_transfer_abort.rs +++ b/exe-unit/components/transfer/tests/test_transfer_abort.rs @@ -38,6 +38,7 @@ async fn interrupted_transfer( from: src.to_owned(), to: dest.to_owned(), args: TransferArgs::default(), + progress_config: None, }) .await?; @@ -49,7 +50,7 @@ async fn interrupted_transfer( Ok(()) } -#[cfg_attr(not(feature = "framework-test"), ignore)] +#[cfg_attr(not(feature = "system-test"), ignore)] #[test_context(DroppableTestContext)] #[serial_test::serial] async fn test_transfer_abort(ctx: &mut DroppableTestContext) -> anyhow::Result<()> { diff --git a/exe-unit/components/transfer/tests/test_transfer_resume.rs b/exe-unit/components/transfer/tests/test_transfer_resume.rs index b112d93858..2dbb24f20c 100644 --- a/exe-unit/components/transfer/tests/test_transfer_resume.rs +++ b/exe-unit/components/transfer/tests/test_transfer_resume.rs @@ -10,7 +10,7 @@ use url::Url; use ya_client_model::activity::TransferArgs; use ya_framework_basic::async_drop::DroppableTestContext; -use ya_framework_basic::file::generate_file_with_hash; +use ya_framework_basic::file::generate_random_file_with_hash; use ya_framework_basic::hash::verify_hash; use ya_framework_basic::log::enable_logs; use ya_framework_basic::server_external::start_http; @@ -113,6 +113,7 @@ async fn transfer_with_args( from: from.to_owned(), to: to.to_owned(), args, + progress_config: None, }) .await??; @@ -127,7 +128,7 @@ async fn transfer( transfer_with_args(addr, from, to, TransferArgs::default()).await } -#[cfg_attr(not(feature = "framework-test"), ignore)] +#[cfg_attr(not(feature = "system-test"), ignore)] #[test_context(DroppableTestContext)] #[serial_test::serial] async fn test_transfer_resume(ctx: &mut DroppableTestContext) -> anyhow::Result<()> { @@ -161,7 +162,8 @@ async fn test_transfer_resume(ctx: &mut DroppableTestContext) -> anyhow::Result< }]; addr.send(AddVolumes::new(volumes)).await??; - let hash = generate_file_with_hash(temp_dir, "rnd", 4096_usize, 3 * 1024); + let hash = + generate_random_file_with_hash::(temp_dir, "rnd", 4096_usize, 3 * 1024); log::debug!("Starting HTTP servers"); start_http(ctx, temp_dir.to_path_buf()) diff --git a/exe-unit/components/transfer/tests/test_transfer_service.rs b/exe-unit/components/transfer/tests/test_transfer_service.rs index 894a0eeba5..40fc88708c 100644 --- a/exe-unit/components/transfer/tests/test_transfer_service.rs +++ b/exe-unit/components/transfer/tests/test_transfer_service.rs @@ -6,7 +6,7 @@ use test_context::test_context; use ya_client_model::activity::TransferArgs; use ya_exe_unit::error::Error; use ya_framework_basic::async_drop::DroppableTestContext; -use ya_framework_basic::file::generate_file_with_hash; +use ya_framework_basic::file::generate_random_file_with_hash; use ya_framework_basic::hash::verify_hash; use ya_framework_basic::log::enable_logs; use ya_framework_basic::server_external::start_http; @@ -32,13 +32,14 @@ async fn transfer_with_args( from: from.to_owned(), to: to.to_owned(), args, + progress_config: None, }) .await??; Ok(()) } -#[cfg_attr(not(feature = "framework-test"), ignore)] +#[cfg_attr(not(feature = "system-test"), ignore)] #[test_context(DroppableTestContext)] #[serial_test::serial] async fn test_transfer_scenarios(ctx: &mut DroppableTestContext) -> anyhow::Result<()> { @@ -70,7 +71,8 @@ async fn test_transfer_scenarios(ctx: &mut DroppableTestContext) -> anyhow::Resu }, // Uncomment to enable logs ]; - let hash = generate_file_with_hash(temp_dir, "rnd", 4096_usize, 256_usize); + let hash = + generate_random_file_with_hash::(temp_dir, "rnd", 4096_usize, 256_usize); log::debug!("Starting HTTP servers"); @@ -79,10 +81,10 @@ async fn test_transfer_scenarios(ctx: &mut DroppableTestContext) -> anyhow::Resu .await .expect("unable to start http servers"); - let task_package = Some(format!( + let task_package = format!( "hash://sha3:{}:http://127.0.0.1:8001/rnd", hex::encode(hash) - )); + ); log::debug!("Starting TransferService"); let exe_ctx = TransferServiceContext { @@ -97,18 +99,14 @@ async fn test_transfer_scenarios(ctx: &mut DroppableTestContext) -> anyhow::Resu println!(); log::warn!("[>>] Deployment with hash verification"); - addr.send(DeployImage { - task_package: task_package.clone(), - }) - .await??; + addr.send(DeployImage::with_package(&task_package)) + .await??; log::warn!("Deployment complete"); println!(); log::warn!("[>>] Deployment from cache"); - addr.send(DeployImage { - task_package: task_package.clone(), - }) - .await??; + addr.send(DeployImage::with_package(&task_package)) + .await??; log::warn!("Deployment from cache complete"); println!(); @@ -155,7 +153,7 @@ async fn test_transfer_scenarios(ctx: &mut DroppableTestContext) -> anyhow::Resu } #[ignore] -#[cfg_attr(not(feature = "framework-test"), ignore)] +//#[cfg_attr(not(feature = "system-test"), ignore)] #[test_context(DroppableTestContext)] #[serial_test::serial] async fn test_transfer_archived(ctx: &mut DroppableTestContext) -> anyhow::Result<()> { @@ -186,7 +184,8 @@ async fn test_transfer_archived(ctx: &mut DroppableTestContext) -> anyhow::Resul path: "/extract".into(), }, ]; - let hash = generate_file_with_hash(temp_dir, "rnd", 4096_usize, 256_usize); + let hash = + generate_random_file_with_hash::(temp_dir, "rnd", 4096_usize, 256_usize); log::debug!("Starting HTTP servers"); diff --git a/exe-unit/runtime-api/Cargo.toml b/exe-unit/runtime-api/Cargo.toml index 9de7a43bfc..316a74f2fb 100644 --- a/exe-unit/runtime-api/Cargo.toml +++ b/exe-unit/runtime-api/Cargo.toml @@ -8,12 +8,7 @@ license = "GPL-3.0" homepage = "https://github.com/golemfactory/yagna/tree/master/exe-unit/runtime-api" repository = "https://github.com/golemfactory/yagna" -[[example]] -name = "runtime-server-mock" -required-features = ["server"] - [features] -default = ['server'] codec = [] server = ['prost', 'futures', 'tokio', 'tokio-util'] diff --git a/exe-unit/runtime-api/src/deploy.rs b/exe-unit/runtime-api/src/deploy.rs index 072dffb8a0..80c31078c9 100644 --- a/exe-unit/runtime-api/src/deploy.rs +++ b/exe-unit/runtime-api/src/deploy.rs @@ -119,14 +119,7 @@ impl DeployResult { let b: &[u8] = bytes.as_ref(); if b.is_empty() { log::warn!("empty descriptor"); - let vols = if cfg!(feature = "compat-deployment") { - vec![ContainerVolume { - name: ".".to_string(), - path: "".to_string(), - }] - } else { - Default::default() - }; + let vols = Default::default(); return Ok(DeployResult { valid: Ok(Default::default()), diff --git a/exe-unit/src/bin.rs b/exe-unit/src/bin.rs index 15c6ede35b..95fb3e5822 100644 --- a/exe-unit/src/bin.rs +++ b/exe-unit/src/bin.rs @@ -1,191 +1,7 @@ -use actix::{Actor, Addr}; -use anyhow::{bail, Context}; -use futures::channel::oneshot; -use std::convert::TryFrom; -use std::path::PathBuf; -use structopt::{clap, StructOpt}; +use structopt::StructOpt; -use ya_client_model::activity::ExeScriptCommand; -use ya_service_bus::RpcEnvelope; - -use ya_core_model::activity; -use ya_exe_unit::agreement::Agreement; use ya_exe_unit::logger::*; -use ya_exe_unit::manifest::ManifestContext; -use ya_exe_unit::message::{GetState, GetStateResponse, Register}; -use ya_exe_unit::runtime::process::RuntimeProcess; -use ya_exe_unit::service::counters; -use ya_exe_unit::service::signal::SignalMonitor; -use ya_exe_unit::state::Supervision; -use ya_exe_unit::{ExeUnit, ExeUnitContext}; -use ya_transfer::transfer::TransferService; -use ya_utils_path::normalize_path; - -#[derive(structopt::StructOpt, Debug)] -#[structopt(global_setting = clap::AppSettings::ColoredHelp)] -#[structopt(version = ya_compile_time_utils::version_describe!())] -struct Cli { - /// Runtime binary path - #[structopt(long, short)] - binary: PathBuf, - #[structopt(flatten)] - supervise: SuperviseCli, - /// Additional runtime arguments - #[structopt( - long, - short, - set = clap::ArgSettings::Global, - number_of_values = 1, - )] - runtime_arg: Vec, - /// Enclave secret key used in secure communication - #[structopt( - long, - env = "EXE_UNIT_SEC_KEY", - hide_env_values = true, - set = clap::ArgSettings::Global, - )] - #[allow(dead_code)] - sec_key: Option, - /// Requestor public key used in secure communication - #[structopt( - long, - env = "EXE_UNIT_REQUESTOR_PUB_KEY", - hide_env_values = true, - set = clap::ArgSettings::Global, - )] - #[allow(dead_code)] - requestor_pub_key: Option, - #[structopt(subcommand)] - command: Command, -} - -#[derive(structopt::StructOpt, Debug)] -struct SuperviseCli { - /// Hardware resources are handled by the runtime - #[structopt( - long = "runtime-managed-hardware", - alias = "cap-handoff", - parse(from_flag = std::ops::Not::not), - set = clap::ArgSettings::Global, - )] - hardware: bool, - /// Images are handled by the runtime - #[structopt( - long = "runtime-managed-image", - parse(from_flag = std::ops::Not::not), - set = clap::ArgSettings::Global, - )] - image: bool, -} - -#[derive(structopt::StructOpt, Debug)] -#[structopt(global_setting = clap::AppSettings::DeriveDisplayOrder)] -enum Command { - /// Execute commands from file - FromFile { - /// ExeUnit daemon GSB URL - #[structopt(long)] - report_url: Option, - /// ExeUnit service ID - #[structopt(long)] - service_id: Option, - /// Command file path - input: PathBuf, - #[structopt(flatten)] - args: RunArgs, - }, - /// Bind to Service Bus - ServiceBus { - /// ExeUnit service ID - service_id: String, - /// ExeUnit daemon GSB URL - report_url: String, - #[structopt(flatten)] - args: RunArgs, - }, - /// Print an offer template in JSON format - OfferTemplate, - /// Run runtime's test command - Test, -} - -#[derive(structopt::StructOpt, Debug)] -struct RunArgs { - /// Agreement file path - #[structopt(long, short)] - agreement: PathBuf, - /// Working directory - #[structopt(long, short)] - work_dir: PathBuf, - /// Common cache directory - #[structopt(long, short)] - cache_dir: PathBuf, -} - -fn create_path(path: &PathBuf) -> anyhow::Result { - if let Err(error) = std::fs::create_dir_all(path) { - match &error.kind() { - std::io::ErrorKind::AlreadyExists => (), - _ => bail!("Can't create directory: {}, {}", path.display(), error), - } - } - Ok(normalize_path(path)?) -} - -#[cfg(feature = "sgx")] -fn init_crypto( - sec_key: Option, - req_key: Option, -) -> anyhow::Result { - use ya_exe_unit::crypto::Crypto; - - let req_key = req_key.ok_or_else(|| anyhow::anyhow!("Missing requestor public key"))?; - match sec_key { - Some(key) => Ok(Crypto::try_with_keys(key, req_key)?), - None => { - log::info!("Generating a new key pair..."); - Ok(Crypto::try_new(req_key)?) - } - } -} - -async fn send_script( - exe_unit: Addr>, - activity_id: Option, - exe_script: Vec, -) { - use std::time::Duration; - use ya_exe_unit::state::{State, StatePair}; - - let delay = Duration::from_secs_f32(0.5); - loop { - match exe_unit.send(GetState).await { - Ok(GetStateResponse(StatePair(State::Initialized, None))) => break, - Ok(GetStateResponse(StatePair(State::Terminated, _))) - | Ok(GetStateResponse(StatePair(_, Some(State::Terminated)))) - | Err(_) => { - return log::error!("ExeUnit has terminated"); - } - _ => tokio::time::sleep(delay).await, - } - } - - log::debug!("Executing commands: {:?}", exe_script); - - let msg = activity::Exec { - activity_id: activity_id.unwrap_or_default(), - batch_id: hex::encode(rand::random::<[u8; 16]>()), - exe_script, - timeout: None, - }; - if let Err(e) = exe_unit - .send(RpcEnvelope::with_caller(String::new(), msg)) - .await - { - log::error!("Unable to execute exe script: {:?}", e); - } -} +use ya_exe_unit::{run, Cli}; #[cfg(feature = "packet-trace-enable")] fn init_packet_trace() -> anyhow::Result<()> { @@ -197,147 +13,6 @@ fn init_packet_trace() -> anyhow::Result<()> { Ok(()) } -async fn run() -> anyhow::Result<()> { - dotenv::dotenv().ok(); - - #[cfg(feature = "packet-trace-enable")] - init_packet_trace()?; - - #[allow(unused_mut)] - let mut cli: Cli = Cli::from_args(); - if !cli.binary.exists() { - bail!("Runtime binary does not exist: {}", cli.binary.display()); - } - - let mut commands = None; - let ctx_activity_id; - let ctx_report_url; - - let args = match &cli.command { - Command::FromFile { - args, - service_id, - report_url, - input, - } => { - let contents = std::fs::read_to_string(input).map_err(|e| { - anyhow::anyhow!("Cannot read commands from file {}: {e}", input.display()) - })?; - let contents = serde_json::from_str(&contents).map_err(|e| { - anyhow::anyhow!( - "Cannot deserialize commands from file {}: {e}", - input.display(), - ) - })?; - ctx_activity_id = service_id.clone(); - ctx_report_url = report_url.clone(); - commands = Some(contents); - args - } - Command::ServiceBus { - args, - service_id, - report_url, - } => { - ctx_activity_id = Some(service_id.clone()); - ctx_report_url = Some(report_url.clone()); - args - } - Command::OfferTemplate => { - let args = cli.runtime_arg.clone(); - let offer_template = ExeUnit::::offer_template(cli.binary, args)?; - println!("{}", serde_json::to_string(&offer_template)?); - return Ok(()); - } - Command::Test => { - let args = cli.runtime_arg.clone(); - let output = ExeUnit::::test(cli.binary, args)?; - println!("{}", String::from_utf8_lossy(&output.stdout)); - eprintln!("{}", String::from_utf8_lossy(&output.stderr)); - if !output.status.success() { - bail!("Test failed"); - } - return Ok(()); - } - }; - - if !args.agreement.exists() { - bail!( - "Agreement file does not exist: {}", - args.agreement.display() - ); - } - let work_dir = create_path(&args.work_dir).map_err(|e| { - anyhow::anyhow!( - "Cannot create the working directory {}: {e}", - args.work_dir.display(), - ) - })?; - let cache_dir = create_path(&args.cache_dir).map_err(|e| { - anyhow::anyhow!( - "Cannot create the cache directory {}: {e}", - args.work_dir.display(), - ) - })?; - let mut agreement = Agreement::try_from(&args.agreement).map_err(|e| { - anyhow::anyhow!( - "Error parsing the agreement from {}: {e}", - args.agreement.display(), - ) - })?; - - log::info!("Attempting to read app manifest .."); - - let manifest_ctx = - ManifestContext::try_new(&agreement.inner).context("Invalid app manifest")?; - agreement.task_package = manifest_ctx - .payload() - .or_else(|| agreement.task_package.take()); - - log::info!("Manifest-enabled features: {:?}", manifest_ctx.features()); - log::info!("User-provided payload: {:?}", agreement.task_package); - - let ctx = ExeUnitContext { - supervise: Supervision { - hardware: cli.supervise.hardware, - image: cli.supervise.image, - manifest: manifest_ctx, - }, - activity_id: ctx_activity_id.clone(), - report_url: ctx_report_url, - agreement, - work_dir, - cache_dir, - runtime_args: cli.runtime_arg.clone(), - acl: Default::default(), - credentials: None, - #[cfg(feature = "sgx")] - crypto: init_crypto( - cli.sec_key.replace("".into()), - cli.requestor_pub_key.clone(), - )?, - }; - - log::debug!("CLI args: {:?}", cli); - log::debug!("ExeUnitContext args: {:?}", ctx); - - let (tx, rx) = oneshot::channel(); - - let counters = counters::build(&ctx, Some(10000), ctx.supervise.hardware).start(); - let transfers = TransferService::new((&ctx).into()).start(); - let runtime = RuntimeProcess::new(&ctx, cli.binary).start(); - let exe_unit = ExeUnit::new(tx, ctx, counters, transfers, runtime).start(); - let signals = SignalMonitor::new(exe_unit.clone()).start(); - exe_unit.send(Register(signals)).await?; - - if let Some(exe_script) = commands { - tokio::task::spawn(send_script(exe_unit, ctx_activity_id, exe_script)); - } - - rx.await??; - Ok(()) -} - #[actix_rt::main] async fn main() { let panic_hook = std::panic::take_hook(); @@ -351,7 +26,16 @@ async fn main() { log::warn!("Using fallback logging due to an error: {:?}", error); }; - std::process::exit(match run().await { + dotenv::dotenv().ok(); + + #[cfg(feature = "packet-trace-enable")] + if let Err(error) = init_packet_trace() { + log::warn!("Initializing packet tracing failed: {error:?}"); + } + + let cli: Cli = Cli::from_args(); + + std::process::exit(match run(cli).await { Ok(_) => 0, Err(error) => { log::error!("{}", error); diff --git a/exe-unit/src/exe_unit.rs b/exe-unit/src/exe_unit.rs new file mode 100644 index 0000000000..ed44fc16b8 --- /dev/null +++ b/exe-unit/src/exe_unit.rs @@ -0,0 +1,628 @@ +use actix::dev::IntervalFunc; +use actix::{ + Actor, ActorFutureExt, ActorStreamExt, Addr, AsyncContext, Context, ContextFutureSpawner, + Handler, Message, ResponseFuture, Running, StreamHandler, WrapFuture, +}; +use chrono::Utc; +use futures::channel::{mpsc, oneshot}; +use futures::{FutureExt, SinkExt}; +use std::path::PathBuf; +use std::time::Duration; +use tokio::sync::broadcast; +use ya_counters::error::CounterError; +use ya_counters::message::GetCounters; +use ya_counters::service::CountersService; + +use ya_agreement_utils::OfferTemplate; +use ya_client_model::activity::exe_script_command::VolumeMount; +use ya_client_model::activity::{ActivityUsage, CommandOutput, ExeScriptCommand, State, StatePair}; +use ya_core_model::activity; +use ya_core_model::activity::local::Credentials; +use ya_runtime_api::deploy; +use ya_runtime_api::deploy::ContainerVolume; +use ya_service_bus::{actix_rpc, RpcEndpoint, RpcMessage}; +use ya_transfer::transfer::{ + AddVolumes, DeployImage, ForwardProgressToSink, TransferResource, TransferService, + TransferServiceContext, +}; + +use crate::acl::Acl; +use crate::agreement::Agreement; +use crate::error::Error; +use crate::message::{ + ExecuteCommand, GetStdOut, Initialize, RuntimeEvent, SetState, Shutdown, ShutdownReason, + SignExeScript, Stop, UpdateDeployment, +}; +use crate::runtime::{Runtime, RuntimeMode}; +use crate::service::{self, ServiceAddr, ServiceControl}; +use crate::state::{ExeUnitState, StateError, Supervision}; +use crate::Result; + +lazy_static::lazy_static! { + static ref DEFAULT_REPORT_INTERVAL: Duration = Duration::from_secs(1u64); +} + +#[derive(Clone, Debug, Default, Message)] +#[rtype(result = "Result>")] +pub struct FinishNotifier {} + +pub struct ExeUnit { + pub(crate) ctx: ExeUnitContext, + pub(crate) state: ExeUnitState, + pub(crate) events: Channel, + pub(crate) runtime: Addr, + pub(crate) counters: Addr, + pub(crate) transfers: Addr, + pub(crate) services: Vec>, + pub(crate) shutdown_tx: broadcast::Sender<()>, +} + +impl ExeUnit { + pub fn new( + ctx: ExeUnitContext, + counters: Addr, + transfers: Addr, + runtime: Addr, + ) -> Self { + let (shutdown_tx, _) = broadcast::channel(1); + ExeUnit { + ctx, + state: ExeUnitState::default(), + events: Channel::default(), + runtime: runtime.clone(), + counters: counters.clone(), + transfers: transfers.clone(), + services: vec![ + Box::new(ServiceAddr::new(counters)), + Box::new(ServiceAddr::new(transfers)), + Box::new(ServiceAddr::new(runtime)), + ], + shutdown_tx, + } + } + + pub fn offer_template(binary: PathBuf, args: Vec) -> crate::Result { + use crate::runtime::process::RuntimeProcess; + + let runtime_template = RuntimeProcess::offer_template(binary, args)?; + let supervisor_template = OfferTemplate::new(serde_json::json!({ + "golem.com.usage.vector": service::counters::usage_vector(), + "golem.activity.caps.transfer.protocol": TransferService::schemes(), + "golem.activity.caps.transfer.report-progress": true, + "golem.activity.caps.deploy.report-progress": true, + })); + + Ok(supervisor_template.patch(runtime_template)) + } + + pub fn test(binary: PathBuf, args: Vec) -> crate::Result { + use crate::runtime::process::RuntimeProcess; + RuntimeProcess::test(binary, args) + } + + fn report_usage(&mut self, context: &mut Context) { + if self.ctx.activity_id.is_none() || self.ctx.report_url.is_none() { + return; + } + let fut = report_usage( + self.ctx.report_url.clone().unwrap(), + self.ctx.activity_id.clone().unwrap(), + context.address(), + self.counters.clone(), + ); + context.spawn(fut.into_actor(self)); + } + + pub(crate) async fn stop_runtime(runtime: Addr, reason: ShutdownReason) { + if let Err(e) = runtime + .send(Shutdown(reason)) + .timeout(Duration::from_secs(5u64)) + .await + { + log::warn!("Unable to stop the runtime: {:?}", e); + } + } +} + +#[derive(Clone)] +pub struct RuntimeRef(Addr>); + +impl RuntimeRef { + pub fn from_ctx(ctx: &Context>) -> Self { + RuntimeRef(ctx.address()) + } +} + +impl std::ops::Deref for RuntimeRef { + type Target = Addr>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl RuntimeRef { + pub async fn exec( + self, + exec: activity::Exec, + runtime: Addr, + transfers: Addr, + mut events: mpsc::Sender, + mut control: oneshot::Receiver<()>, + ) { + let batch_id = exec.batch_id.clone(); + for (idx, command) in exec.exe_script.into_iter().enumerate() { + if let Ok(Some(_)) = control.try_recv() { + log::warn!("Batch {} execution aborted", batch_id); + break; + } + + let runtime_cmd = ExecuteCommand { + batch_id: batch_id.clone(), + command: command.clone(), + tx: events.clone(), + idx, + }; + + let evt = RuntimeEvent::started(batch_id.clone(), idx, command.clone()); + if let Err(e) = events.send(evt).await { + log::error!("Unable to report event: {:?}", e); + } + + let (return_code, message) = match { + if runtime_cmd.stateless() { + self.exec_stateless(&runtime_cmd).await + } else { + self.exec_stateful(runtime_cmd, &runtime, &transfers).await + } + } { + Ok(_) => (0, None), + Err(ref err) => match err { + Error::CommandExitCodeError(c) => (*c, Some(err.to_string())), + _ => (-1, Some(err.to_string())), + }, + }; + + let evt = RuntimeEvent::finished(batch_id.clone(), idx, return_code, message.clone()); + if let Err(e) = events.send(evt).await { + log::error!("Unable to report event: {:?}", e); + } + + if return_code != 0 { + let message = message.unwrap_or_else(|| "reason unspecified".into()); + log::warn!("Batch {} execution interrupted: {}", batch_id, message); + break; + } + } + } + + async fn exec_stateless(&self, runtime_cmd: &ExecuteCommand) -> crate::Result<()> { + match runtime_cmd.command { + ExeScriptCommand::Sign {} => { + let batch_id = runtime_cmd.batch_id.clone(); + let signature = self.send(SignExeScript { batch_id }).await??; + let stdout = serde_json::to_string(&signature)?; + + runtime_cmd + .tx + .clone() + .send(RuntimeEvent::stdout( + runtime_cmd.batch_id.clone(), + runtime_cmd.idx, + CommandOutput::Bin(stdout.into_bytes()), + )) + .await + .map_err(|e| Error::runtime(format!("Unable to send stdout event: {:?}", e)))?; + } + ExeScriptCommand::Terminate {} => { + log::debug!("Terminating running ExeScripts"); + let exclude_batches = vec![runtime_cmd.batch_id.clone()]; + self.send(Stop { exclude_batches }).await??; + self.send(SetState::from(State::Initialized)).await?; + } + _ => (), + } + Ok(()) + } + + async fn exec_stateful( + &self, + runtime_cmd: ExecuteCommand, + runtime: &Addr, + transfer_service: &Addr, + ) -> crate::Result<()> { + let state = self.send(crate::message::GetState {}).await?.0; + let state_pre = match (&state.0, &state.1) { + (_, Some(_)) => { + return Err(StateError::Busy(state).into()); + } + (State::New, _) | (State::Terminated, _) => { + return Err(StateError::InvalidState(state).into()); + } + (State::Initialized, _) => match &runtime_cmd.command { + ExeScriptCommand::Deploy { .. } => { + StatePair(State::Initialized, Some(State::Deployed)) + } + _ => return Err(StateError::InvalidState(state).into()), + }, + (State::Deployed, _) => match &runtime_cmd.command { + ExeScriptCommand::Start { .. } => StatePair(State::Deployed, Some(State::Ready)), + _ => return Err(StateError::InvalidState(state).into()), + }, + (s, _) => match &runtime_cmd.command { + ExeScriptCommand::Deploy { .. } | ExeScriptCommand::Start { .. } => { + return Err(StateError::InvalidState(state).into()); + } + _ => StatePair(*s, Some(*s)), + }, + }; + self.send(SetState::from(state_pre)).await?; + + log::info!("Executing command: {:?}", runtime_cmd.command); + + let result = async { + self.pre_runtime(&runtime_cmd, runtime, transfer_service) + .await?; + + let exit_code = runtime.send(runtime_cmd.clone()).await??; + if exit_code != 0 { + return Err(Error::CommandExitCodeError(exit_code)); + } + + self.post_runtime(&runtime_cmd, runtime, transfer_service) + .await?; + + Ok(()) + } + .await; + + let state_cur = self.send(crate::message::GetState {}).await?.0; + if state_cur != state_pre { + return Err(StateError::UnexpectedState { + current: state_cur, + expected: state_pre, + } + .into()); + } + + self.send(SetState::from(state_pre.1.unwrap())).await?; + result + } + + async fn pre_runtime( + &self, + runtime_cmd: &ExecuteCommand, + runtime: &Addr, + transfer_service: &Addr, + ) -> crate::Result<()> { + match &runtime_cmd.command { + ExeScriptCommand::Transfer { + from, + to, + args, + progress, + } => { + let mut msg = TransferResource { + from: from.clone(), + to: to.clone(), + args: args.clone(), + progress_config: None, + }; + + if let Some(args) = progress { + msg.forward_progress(args, runtime_cmd.progress_sink()) + } + transfer_service.send(msg).await??; + } + ExeScriptCommand::Deploy { + net, + hosts, + progress, + volumes, + .. + } => { + let volumes = if let Some(v) = &volumes { + v.clone() + .as_volumes() + .iter() + .enumerate() + .filter_map(|(idx, (path, info))| { + if matches!(info, VolumeMount::Host { .. }) { + Some(ContainerVolume { + name: format!("vol-custom-{idx}"), + path: path.clone(), + }) + } else { + None + } + }) + .collect::>() + } else { + Vec::new() + }; + + transfer_service.send(AddVolumes::new(volumes)).await??; + + // TODO: We should pass `task_package` here not in `TransferService` initialization. + let mut msg = DeployImage::default(); + if let Some(args) = progress { + msg.forward_progress(args, runtime_cmd.progress_sink()) + } + + let task_package = transfer_service.send(msg).await??; + runtime + .send(UpdateDeployment { + task_package, + networks: Some(net.clone()), + hosts: Some(hosts.clone()), + ..Default::default() + }) + .await??; + } + _ => (), + } + Ok(()) + } + + async fn post_runtime( + &self, + runtime_cmd: &ExecuteCommand, + runtime: &Addr, + transfer_service: &Addr, + ) -> crate::Result<()> { + if let ExeScriptCommand::Deploy { .. } = &runtime_cmd.command { + let mut runtime_mode = RuntimeMode::ProcessPerCommand; + let stdout = self + .send(GetStdOut { + batch_id: runtime_cmd.batch_id.clone(), + idx: runtime_cmd.idx, + }) + .await?; + + if let Some(output) = stdout { + let deployment = deploy::DeployResult::from_bytes(output).map_err(|e| { + log::error!("Deployment failed: {}", e); + Error::CommandError(e.to_string()) + })?; + transfer_service + .send(AddVolumes::new(deployment.vols)) + .await??; + runtime_mode = deployment.start_mode.into(); + } + runtime + .send(UpdateDeployment { + runtime_mode: Some(runtime_mode), + ..Default::default() + }) + .await??; + } + Ok(()) + } +} + +impl Actor for ExeUnit { + type Context = Context; + + fn started(&mut self, ctx: &mut Self::Context) { + let rx = self.events.rx.take().unwrap(); + Self::add_stream(rx, ctx); + + let addr = ctx.address(); + if let Some(activity_id) = &self.ctx.activity_id { + let srv_id = activity::exeunit::bus_id(activity_id); + actix_rpc::bind::(&srv_id, addr.clone().recipient()); + actix_rpc::bind::(&srv_id, addr.clone().recipient()); + + #[cfg(feature = "sgx")] + { + actix_rpc::bind::( + &srv_id, + addr.clone().recipient(), + ); + } + #[cfg(not(feature = "sgx"))] + { + actix_rpc::bind::(&srv_id, addr.clone().recipient()); + actix_rpc::bind::(&srv_id, addr.clone().recipient()); + actix_rpc::bind::(&srv_id, addr.clone().recipient()); + actix_rpc::binds::( + &srv_id, + addr.clone().recipient(), + ); + } + } + + IntervalFunc::new(*DEFAULT_REPORT_INTERVAL, Self::report_usage) + .finish() + .spawn(ctx); + + log::info!("Initializing manifests"); + self.ctx + .supervise + .manifest + .build_validators() + .into_actor(self) + .map(|result, this, ctx| match result { + Ok(validators) => { + this.ctx.supervise.manifest.add_validators(validators); + log::info!("Manifest initialization complete"); + } + Err(e) => { + let err = Error::Other(format!("manifest initialization error: {}", e)); + log::error!("Supervisor is shutting down due to {}", err); + ctx.address().do_send(Shutdown(ShutdownReason::Error(err))); + } + }) + .wait(ctx); + + let addr_ = addr.clone(); + async move { + addr.send(Initialize).await?.map_err(Error::from)?; + addr.send(SetState::from(State::Initialized)).await?; + Ok::<_, Error>(()) + } + .then(|result| async move { + match result { + Ok(_) => log::info!("Supervisor initialized"), + Err(e) => { + let err = Error::Other(format!("initialization error: {}", e)); + log::error!("Supervisor is shutting down due to {}", err); + let _ = addr_.send(Shutdown(ShutdownReason::Error(err))).await; + } + } + }) + .into_actor(self) + .spawn(ctx); + } + + fn stopping(&mut self, _: &mut Self::Context) -> Running { + if self.state.inner.0 == State::Terminated { + return Running::Stop; + } + Running::Continue + } + + fn stopped(&mut self, _: &mut Self::Context) { + self.shutdown_tx.send(()).ok(); + } +} + +#[derive(derivative::Derivative)] +#[derivative(Debug)] +pub struct ExeUnitContext { + pub supervise: Supervision, + pub activity_id: Option, + pub report_url: Option, + pub agreement: Agreement, + pub work_dir: PathBuf, + pub cache_dir: PathBuf, + pub runtime_args: Vec, + pub acl: Acl, + pub credentials: Option, + #[cfg(feature = "sgx")] + #[derivative(Debug = "ignore")] + pub crypto: crate::crypto::Crypto, +} + +impl ExeUnitContext { + pub fn verify_activity_id(&self, activity_id: &str) -> crate::Result<()> { + match &self.activity_id { + Some(act_id) => match act_id == activity_id { + true => Ok(()), + false => Err(Error::RemoteServiceError(format!( + "Forbidden! Invalid activity id: {}", + activity_id + ))), + }, + None => Ok(()), + } + } +} + +impl From<&ExeUnitContext> for TransferServiceContext { + fn from(val: &ExeUnitContext) -> Self { + TransferServiceContext { + task_package: val.agreement.task_package.clone(), + deploy_retry: None, + cache_dir: val.cache_dir.clone(), + work_dir: val.work_dir.clone(), + transfer_retry: None, + } + } +} + +pub struct Channel { + pub(crate) tx: mpsc::Sender, + rx: Option>, +} + +impl Default for Channel { + fn default() -> Self { + let (tx, rx) = mpsc::channel(8); + Channel { tx, rx: Some(rx) } + } +} + +pub async fn report(url: S, msg: M) -> bool +where + M: RpcMessage + Unpin + 'static, + S: AsRef, +{ + let url = url.as_ref(); + match ya_service_bus::typed::service(url).send(msg).await { + Err(ya_service_bus::Error::Timeout(msg)) => { + log::warn!("Timed out reporting to {}: {}", url, msg); + true + } + Err(e) => { + log::error!("Error reporting to {}: {:?}", url, e); + false + } + Ok(Err(e)) => { + log::error!("Error response while reporting to {}: {:?}", url, e); + false + } + Ok(Ok(_)) => true, + } +} + +async fn report_usage( + report_url: String, + activity_id: String, + exe_unit: Addr>, + metrics: Addr, +) { + match metrics.send(GetCounters).await { + Ok(resp) => match resp { + Ok(data) => { + let msg = activity::local::SetUsage { + activity_id, + usage: ActivityUsage { + current_usage: Some(data), + timestamp: Utc::now().timestamp(), + }, + timeout: None, + }; + if !report(&report_url, msg).await { + exe_unit.do_send(Shutdown(ShutdownReason::Error(Error::RuntimeError( + format!("Reporting endpoint '{}' is not available", report_url), + )))); + } + } + Err(err) => match err { + CounterError::UsageLimitExceeded(info) => { + log::warn!("Usage limit exceeded: {}", info); + exe_unit.do_send(Shutdown(ShutdownReason::UsageLimitExceeded(info))); + } + error => log::warn!("Unable to retrieve metrics: {:?}", error), + }, + }, + Err(e) => log::warn!("Unable to report activity usage: {:?}", e), + } +} + +impl Handler for ExeUnit { + type Result = Result>; + + fn handle(&mut self, _msg: FinishNotifier, _: &mut Self::Context) -> Self::Result { + Ok(self.shutdown_tx.subscribe()) + } +} + +impl Handler for TransferService { + type Result = ResponseFuture>; + + fn handle(&mut self, _msg: Shutdown, ctx: &mut Self::Context) -> Self::Result { + let addr = ctx.address(); + async move { Ok(addr.send(ya_transfer::transfer::Shutdown {}).await??) }.boxed_local() + } +} + +impl Handler for CountersService { + type Result = ResponseFuture>; + + fn handle(&mut self, _msg: Shutdown, ctx: &mut Self::Context) -> Self::Result { + let addr = ctx.address(); + async move { Ok(addr.send(ya_counters::message::Shutdown {}).await??) }.boxed_local() + } +} diff --git a/exe-unit/src/handlers/local.rs b/exe-unit/src/handlers/local.rs index 2437a0851f..7386a37f61 100644 --- a/exe-unit/src/handlers/local.rs +++ b/exe-unit/src/handlers/local.rs @@ -1,3 +1,6 @@ +use actix::prelude::*; +use futures::FutureExt; + use crate::error::Error; use crate::message::*; use crate::runtime::Runtime; @@ -5,9 +8,6 @@ use crate::service::ServiceAddr; use crate::state::State; use crate::{report, ExeUnit}; -use actix::prelude::*; -use futures::FutureExt; - use ya_client_model::activity; use ya_core_model::activity::local::SetState as SetActivityState; use ya_counters::message::SetCounter; diff --git a/exe-unit/src/handlers/rpc.rs b/exe-unit/src/handlers/rpc.rs index db06fd93f0..a343b9573c 100644 --- a/exe-unit/src/handlers/rpc.rs +++ b/exe-unit/src/handlers/rpc.rs @@ -23,6 +23,7 @@ impl Handler> for ExeUnit { type Result = as Message>::Result; fn handle(&mut self, msg: RpcEnvelope, ctx: &mut Self::Context) -> Self::Result { + log::debug!("Received Exec message: {:?}", msg.as_ref()); self.ctx.verify_activity_id(&msg.activity_id)?; let batch_id = msg.batch_id.clone(); diff --git a/exe-unit/src/lib.rs b/exe-unit/src/lib.rs index 90c63adff1..cc4ee1520f 100644 --- a/exe-unit/src/lib.rs +++ b/exe-unit/src/lib.rs @@ -1,36 +1,25 @@ #[macro_use] extern crate derive_more; +use actix::prelude::*; +use anyhow::{bail, Context}; +use std::convert::TryFrom; use std::path::PathBuf; -use std::time::Duration; +use structopt::clap; -use actix::prelude::*; -use chrono::Utc; -use futures::channel::{mpsc, oneshot}; -use futures::{FutureExt, SinkExt}; - -use ya_agreement_utils::agreement::OfferTemplate; -use ya_client_model::activity::{ - activity_state::StatePair, ActivityUsage, CommandOutput, ExeScriptCommand, State, -}; +use ya_client_model::activity::ExeScriptCommand; use ya_core_model::activity; -use ya_core_model::activity::local::Credentials; -use ya_counters::error::CounterError; -use ya_counters::message::GetCounters; -use ya_counters::service::CountersService; -use ya_runtime_api::deploy; -use ya_service_bus::{actix_rpc, RpcEndpoint, RpcMessage}; -use ya_transfer::transfer::{ - AddVolumes, DeployImage, TransferResource, TransferService, TransferServiceContext, -}; - -use crate::acl::Acl; +use ya_service_bus::RpcEnvelope; +use ya_transfer::transfer::TransferService; +use ya_utils_path::normalize_path; + use crate::agreement::Agreement; use crate::error::Error; -use crate::message::*; -use crate::runtime::*; -use crate::service::{ServiceAddr, ServiceControl}; -use crate::state::{ExeUnitState, StateError, Supervision}; +use crate::manifest::ManifestContext; +use crate::message::{GetState, GetStateResponse, Register}; +use crate::runtime::process::RuntimeProcess; +use crate::service::signal::SignalMonitor; +use crate::state::Supervision; mod acl; pub mod agreement; @@ -49,540 +38,342 @@ pub mod service; pub mod state; mod dns; +mod exe_unit; + +pub use exe_unit::{report, ExeUnit, ExeUnitContext, FinishNotifier, RuntimeRef}; + pub type Result = std::result::Result; -lazy_static::lazy_static! { - static ref DEFAULT_REPORT_INTERVAL: Duration = Duration::from_secs(1u64); +#[derive(structopt::StructOpt, Debug)] +#[structopt(global_setting = clap::AppSettings::ColoredHelp)] +#[structopt(version = ya_compile_time_utils::version_describe!())] +pub struct Cli { + /// Runtime binary path + #[structopt(long, short)] + pub binary: PathBuf, + #[structopt(flatten)] + pub supervise: SuperviseCli, + /// Additional runtime arguments + #[structopt( + long, + short, + set = clap::ArgSettings::Global, + number_of_values = 1, + )] + pub runtime_arg: Vec, + /// Enclave secret key used in secure communication + #[structopt( + long, + env = "EXE_UNIT_SEC_KEY", + hide_env_values = true, + set = clap::ArgSettings::Global, + )] + #[allow(dead_code)] + pub sec_key: Option, + /// Requestor public key used in secure communication + #[structopt( + long, + env = "EXE_UNIT_REQUESTOR_PUB_KEY", + hide_env_values = true, + set = clap::ArgSettings::Global, + )] + #[allow(dead_code)] + pub requestor_pub_key: Option, + #[structopt(subcommand)] + pub command: Command, } -pub struct ExeUnit { - ctx: ExeUnitContext, - state: ExeUnitState, - events: Channel, - runtime: Addr, - counters: Addr, - transfers: Addr, - services: Vec>, - shutdown_tx: Option>>, +#[derive(structopt::StructOpt, Debug, Clone)] +pub struct SuperviseCli { + /// Hardware resources are handled by the runtime + #[structopt( + long = "runtime-managed-hardware", + alias = "cap-handoff", + parse(from_flag = std::ops::Not::not), + set = clap::ArgSettings::Global, + )] + pub hardware: bool, + /// Images are handled by the runtime + #[structopt( + long = "runtime-managed-image", + parse(from_flag = std::ops::Not::not), + set = clap::ArgSettings::Global, + )] + pub image: bool, } -impl ExeUnit { - pub fn new( - shutdown_tx: oneshot::Sender>, - ctx: ExeUnitContext, - counters: Addr, - transfers: Addr, - runtime: Addr, - ) -> Self { - ExeUnit { - ctx, - state: ExeUnitState::default(), - events: Channel::default(), - runtime: runtime.clone(), - counters: counters.clone(), - transfers: transfers.clone(), - services: vec![ - Box::new(ServiceAddr::new(counters)), - Box::new(ServiceAddr::new(transfers)), - Box::new(ServiceAddr::new(runtime)), - ], - shutdown_tx: Some(shutdown_tx), - } - } - - pub fn offer_template(binary: PathBuf, args: Vec) -> Result { - use crate::runtime::process::RuntimeProcess; - - let runtime_template = RuntimeProcess::offer_template(binary, args)?; - let supervisor_template = OfferTemplate::new(serde_json::json!({ - "golem.com.usage.vector": service::counters::usage_vector(), - "golem.activity.caps.transfer.protocol": TransferService::schemes(), - })); - - Ok(supervisor_template.patch(runtime_template)) - } - - pub fn test(binary: PathBuf, args: Vec) -> Result { - use crate::runtime::process::RuntimeProcess; - RuntimeProcess::test(binary, args) - } - - fn report_usage(&mut self, context: &mut Context) { - if self.ctx.activity_id.is_none() || self.ctx.report_url.is_none() { - return; - } - let fut = report_usage( - self.ctx.report_url.clone().unwrap(), - self.ctx.activity_id.clone().unwrap(), - context.address(), - self.counters.clone(), - ); - context.spawn(fut.into_actor(self)); - } - - async fn stop_runtime(runtime: Addr, reason: ShutdownReason) { - if let Err(e) = runtime - .send(Shutdown(reason)) - .timeout(Duration::from_secs(5u64)) - .await - { - log::warn!("Unable to stop the runtime: {:?}", e); - } - } +#[derive(structopt::StructOpt, Debug)] +#[structopt(global_setting = clap::AppSettings::DeriveDisplayOrder)] +pub enum Command { + /// Execute commands from file + FromFile { + /// ExeUnit daemon GSB URL + #[structopt(long)] + report_url: Option, + /// ExeUnit service ID + #[structopt(long)] + service_id: Option, + /// Command file path + input: PathBuf, + #[structopt(flatten)] + args: RunArgs, + }, + /// Bind to Service Bus + ServiceBus { + /// ExeUnit service ID + service_id: String, + /// ExeUnit daemon GSB URL + report_url: String, + #[structopt(flatten)] + args: RunArgs, + }, + /// Print an offer template in JSON format + OfferTemplate, + /// Run runtime's test command + Test, } -#[derive(Clone)] -struct RuntimeRef(Addr>); - -impl RuntimeRef { - fn from_ctx(ctx: &Context>) -> Self { - RuntimeRef(ctx.address()) - } +#[derive(structopt::StructOpt, Debug, Clone)] +pub struct RunArgs { + /// Agreement file path + #[structopt(long, short)] + pub agreement: PathBuf, + /// Working directory + #[structopt(long, short)] + pub work_dir: PathBuf, + /// Common cache directory + #[structopt(long, short)] + pub cache_dir: PathBuf, } -impl std::ops::Deref for RuntimeRef { - type Target = Addr>; - - fn deref(&self) -> &Self::Target { - &self.0 +fn create_path(path: &PathBuf) -> anyhow::Result { + if let Err(error) = std::fs::create_dir_all(path) { + match &error.kind() { + std::io::ErrorKind::AlreadyExists => (), + _ => bail!("Can't create directory: {}, {}", path.display(), error), + } } + Ok(normalize_path(path)?) } -impl RuntimeRef { - async fn exec( - self, - exec: activity::Exec, - runtime: Addr, - transfers: Addr, - mut events: mpsc::Sender, - mut control: oneshot::Receiver<()>, - ) { - let batch_id = exec.batch_id.clone(); - for (idx, command) in exec.exe_script.into_iter().enumerate() { - if let Ok(Some(_)) = control.try_recv() { - log::warn!("Batch {} execution aborted", batch_id); - break; - } - - let runtime_cmd = ExecuteCommand { - batch_id: batch_id.clone(), - command: command.clone(), - tx: events.clone(), - idx, - }; - - let evt = RuntimeEvent::started(batch_id.clone(), idx, command.clone()); - if let Err(e) = events.send(evt).await { - log::error!("Unable to report event: {:?}", e); - } - - let (return_code, message) = match { - if runtime_cmd.stateless() { - self.exec_stateless(&runtime_cmd).await - } else { - self.exec_stateful(runtime_cmd, &runtime, &transfers).await - } - } { - Ok(_) => (0, None), - Err(ref err) => match err { - Error::CommandExitCodeError(c) => (*c, Some(err.to_string())), - _ => (-1, Some(err.to_string())), - }, - }; - - let evt = RuntimeEvent::finished(batch_id.clone(), idx, return_code, message.clone()); - if let Err(e) = events.send(evt).await { - log::error!("Unable to report event: {:?}", e); - } - - if return_code != 0 { - let message = message.unwrap_or_else(|| "reason unspecified".into()); - log::warn!("Batch {} execution interrupted: {}", batch_id, message); - break; - } +#[cfg(feature = "sgx")] +fn init_crypto( + sec_key: Option, + req_key: Option, +) -> anyhow::Result { + let req_key = req_key.ok_or_else(|| anyhow::anyhow!("Missing requestor public key"))?; + match sec_key { + Some(key) => Ok(crate::crypto::Crypto::try_with_keys(key, req_key)?), + None => { + log::info!("Generating a new key pair..."); + Ok(crate::crypto::Crypto::try_new(req_key)?) } } +} - async fn exec_stateless(&self, runtime_cmd: &ExecuteCommand) -> Result<()> { - match runtime_cmd.command { - ExeScriptCommand::Sign {} => { - let batch_id = runtime_cmd.batch_id.clone(); - let signature = self.send(SignExeScript { batch_id }).await??; - let stdout = serde_json::to_string(&signature)?; - - runtime_cmd - .tx - .clone() - .send(RuntimeEvent::stdout( - runtime_cmd.batch_id.clone(), - runtime_cmd.idx, - CommandOutput::Bin(stdout.into_bytes()), - )) - .await - .map_err(|e| Error::runtime(format!("Unable to send stdout event: {:?}", e)))?; - } - ExeScriptCommand::Terminate {} => { - log::debug!("Terminating running ExeScripts"); - let exclude_batches = vec![runtime_cmd.batch_id.clone()]; - self.send(Stop { exclude_batches }).await??; - self.send(SetState::from(State::Initialized)).await?; +pub async fn send_script( + exe_unit: Addr>, + activity_id: Option, + exe_script: Vec, +) -> anyhow::Result { + use crate::state::{State, StatePair}; + use std::time::Duration; + + let delay = Duration::from_secs_f32(0.5); + loop { + match exe_unit.send(GetState).await { + Ok(GetStateResponse(StatePair(State::Initialized, None))) => break, + Ok(GetStateResponse(StatePair(State::Terminated, _))) + | Ok(GetStateResponse(StatePair(_, Some(State::Terminated)))) + | Err(_) => { + log::error!("ExeUnit has terminated"); + bail!("ExeUnit has terminated"); } - _ => (), + _ => tokio::time::sleep(delay).await, } - Ok(()) } - async fn exec_stateful( - &self, - runtime_cmd: ExecuteCommand, - runtime: &Addr, - transfer_service: &Addr, - ) -> Result<()> { - let state = self.send(GetState {}).await?.0; - let state_pre = match (&state.0, &state.1) { - (_, Some(_)) => { - return Err(StateError::Busy(state).into()); - } - (State::New, _) | (State::Terminated, _) => { - return Err(StateError::InvalidState(state).into()); - } - (State::Initialized, _) => match &runtime_cmd.command { - ExeScriptCommand::Deploy { .. } => { - StatePair(State::Initialized, Some(State::Deployed)) - } - _ => return Err(StateError::InvalidState(state).into()), - }, - (State::Deployed, _) => match &runtime_cmd.command { - ExeScriptCommand::Start { .. } => StatePair(State::Deployed, Some(State::Ready)), - _ => return Err(StateError::InvalidState(state).into()), - }, - (s, _) => match &runtime_cmd.command { - ExeScriptCommand::Deploy { .. } | ExeScriptCommand::Start { .. } => { - return Err(StateError::InvalidState(state).into()); - } - _ => StatePair(*s, Some(*s)), - }, - }; - self.send(SetState::from(state_pre)).await?; - - log::info!("Executing command: {:?}", runtime_cmd.command); - - let result = async { - self.pre_runtime(&runtime_cmd, runtime, transfer_service) - .await?; - - let exit_code = runtime.send(runtime_cmd.clone()).await??; - if exit_code != 0 { - return Err(Error::CommandExitCodeError(exit_code)); - } + log::debug!("Executing commands: {:?}", exe_script); - self.post_runtime(&runtime_cmd, runtime, transfer_service) - .await?; + let batch_id = hex::encode(rand::random::<[u8; 16]>()); + let msg = activity::Exec { + activity_id: activity_id.unwrap_or_default(), + batch_id: batch_id.clone(), + exe_script, + timeout: None, + }; - Ok(()) - } - .await; - - let state_cur = self.send(GetState {}).await?.0; - if state_cur != state_pre { - return Err(StateError::UnexpectedState { - current: state_cur, - expected: state_pre, - } - .into()); - } + exe_unit + .send(RpcEnvelope::with_caller(String::new(), msg)) + .await??; + Ok(batch_id) +} - self.send(SetState::from(state_pre.1.unwrap())).await?; - result - } +// We need this mut for conditional compilation for sgx +#[allow(unused_mut)] +pub async fn run(mut cli: Cli) -> anyhow::Result<()> { + log::debug!("CLI args: {:?}", cli); - async fn pre_runtime( - &self, - runtime_cmd: &ExecuteCommand, - runtime: &Addr, - transfer_service: &Addr, - ) -> Result<()> { - match &runtime_cmd.command { - ExeScriptCommand::Transfer { from, to, args } => { - let msg = TransferResource { - from: from.clone(), - to: to.clone(), - args: args.clone(), - }; - transfer_service.send(msg).await??; - } - ExeScriptCommand::Deploy { net, hosts } => { - // TODO: We should pass `task_package` here not in `TransferService` initialization. - let task_package = transfer_service - .send(DeployImage { task_package: None }) - .await??; - runtime - .send(UpdateDeployment { - task_package, - networks: Some(net.clone()), - hosts: Some(hosts.clone()), - ..Default::default() - }) - .await??; - } - _ => (), - } - Ok(()) + if !cli.binary.exists() { + bail!("Runtime binary does not exist: {}", cli.binary.display()); } - async fn post_runtime( - &self, - runtime_cmd: &ExecuteCommand, - runtime: &Addr, - transfer_service: &Addr, - ) -> Result<()> { - if let ExeScriptCommand::Deploy { .. } = &runtime_cmd.command { - let mut runtime_mode = RuntimeMode::ProcessPerCommand; - let stdout = self - .send(GetStdOut { - batch_id: runtime_cmd.batch_id.clone(), - idx: runtime_cmd.idx, - }) - .await?; - - if let Some(output) = stdout { - let deployment = deploy::DeployResult::from_bytes(output).map_err(|e| { - log::error!("Deployment failed: {}", e); - Error::CommandError(e.to_string()) - })?; - transfer_service - .send(AddVolumes::new(deployment.vols)) - .await??; - runtime_mode = deployment.start_mode.into(); - } - runtime - .send(UpdateDeployment { - runtime_mode: Some(runtime_mode), - ..Default::default() - }) - .await??; + let mut commands = None; + let ctx_activity_id; + let ctx_report_url; + + let args = match cli.command { + Command::FromFile { + args, + service_id, + report_url, + input, + } => { + let contents = std::fs::read_to_string(&input).map_err(|e| { + anyhow::anyhow!("Cannot read commands from file {}: {e}", input.display()) + })?; + let contents = serde_json::from_str(&contents).map_err(|e| { + anyhow::anyhow!( + "Cannot deserialize commands from file {}: {e}", + input.display(), + ) + })?; + ctx_activity_id = service_id.clone(); + ctx_report_url = report_url.clone(); + commands = Some(contents); + args } - Ok(()) - } -} - -impl Actor for ExeUnit { - type Context = Context; - - fn started(&mut self, ctx: &mut Self::Context) { - let rx = self.events.rx.take().unwrap(); - Self::add_stream(rx, ctx); - - let addr = ctx.address(); - if let Some(activity_id) = &self.ctx.activity_id { - let srv_id = activity::exeunit::bus_id(activity_id); - actix_rpc::bind::(&srv_id, addr.clone().recipient()); - actix_rpc::bind::(&srv_id, addr.clone().recipient()); - - #[cfg(feature = "sgx")] - { - actix_rpc::bind::( - &srv_id, - addr.clone().recipient(), - ); - } - #[cfg(not(feature = "sgx"))] - { - actix_rpc::bind::(&srv_id, addr.clone().recipient()); - actix_rpc::bind::(&srv_id, addr.clone().recipient()); - actix_rpc::bind::(&srv_id, addr.clone().recipient()); - actix_rpc::binds::( - &srv_id, - addr.clone().recipient(), - ); - } + Command::ServiceBus { + args, + service_id, + report_url, + } => { + ctx_activity_id = Some(service_id.clone()); + ctx_report_url = Some(report_url.clone()); + args } - - IntervalFunc::new(*DEFAULT_REPORT_INTERVAL, Self::report_usage) - .finish() - .spawn(ctx); - - log::info!("Initializing manifests"); - self.ctx - .supervise - .manifest - .build_validators() - .into_actor(self) - .map(|result, this, ctx| match result { - Ok(validators) => { - this.ctx.supervise.manifest.add_validators(validators); - log::info!("Manifest initialization complete"); - } - Err(e) => { - let err = Error::Other(format!("manifest initialization error: {}", e)); - log::error!("Supervisor is shutting down due to {}", err); - ctx.address().do_send(Shutdown(ShutdownReason::Error(err))); - } - }) - .wait(ctx); - - let addr_ = addr.clone(); - async move { - addr.send(Initialize).await?.map_err(Error::from)?; - addr.send(SetState::from(State::Initialized)).await?; - Ok::<_, Error>(()) + Command::OfferTemplate => { + let args = cli.runtime_arg.clone(); + let offer_template = ExeUnit::::offer_template(cli.binary, args)?; + println!("{}", serde_json::to_string(&offer_template)?); + return Ok(()); } - .then(|result| async move { - match result { - Ok(_) => log::info!("Supervisor initialized"), - Err(e) => { - let err = Error::Other(format!("initialization error: {}", e)); - log::error!("Supervisor is shutting down due to {}", err); - let _ = addr_.send(Shutdown(ShutdownReason::Error(err))).await; - } + Command::Test => { + let args = cli.runtime_arg.clone(); + let output = ExeUnit::::test(cli.binary, args)?; + println!("{}", String::from_utf8_lossy(&output.stdout)); + eprintln!("{}", String::from_utf8_lossy(&output.stderr)); + if !output.status.success() { + bail!("Test failed"); } - }) - .into_actor(self) - .spawn(ctx); - } - - fn stopping(&mut self, _: &mut Self::Context) -> Running { - if self.state.inner.0 == State::Terminated { - return Running::Stop; + return Ok(()); } - Running::Continue + }; + + let exe_unit = exe_unit(ExeUnitConfig { + report_url: ctx_report_url, + service_id: ctx_activity_id.clone(), + runtime_args: cli.runtime_arg, + binary: cli.binary, + supervise: cli.supervise, + sec_key: cli.sec_key, + args, + requestor_pub_key: cli.requestor_pub_key, + }) + .await?; + + if let Some(exe_script) = commands { + tokio::task::spawn(send_script(exe_unit.clone(), ctx_activity_id, exe_script)); } - fn stopped(&mut self, _: &mut Self::Context) { - if let Some(tx) = self.shutdown_tx.take() { - let _ = tx.send(Ok(())); - } - } + exe_unit.send(FinishNotifier {}).await??.recv().await?; + Ok(()) } -#[derive(derivative::Derivative)] -#[derivative(Debug)] -pub struct ExeUnitContext { - pub supervise: Supervision, - pub activity_id: Option, - pub report_url: Option, - pub agreement: Agreement, - pub work_dir: PathBuf, - pub cache_dir: PathBuf, +#[derive(Debug, Clone)] +pub struct ExeUnitConfig { + pub args: RunArgs, + pub binary: PathBuf, pub runtime_args: Vec, - pub acl: Acl, - pub credentials: Option, - #[cfg(feature = "sgx")] - #[derivative(Debug = "ignore")] - pub crypto: crypto::Crypto, -} - -impl ExeUnitContext { - pub fn verify_activity_id(&self, activity_id: &str) -> Result<()> { - match &self.activity_id { - Some(act_id) => match act_id == activity_id { - true => Ok(()), - false => Err(Error::RemoteServiceError(format!( - "Forbidden! Invalid activity id: {}", - activity_id - ))), - }, - None => Ok(()), - } - } -} - -impl From<&ExeUnitContext> for TransferServiceContext { - fn from(val: &ExeUnitContext) -> Self { - TransferServiceContext { - task_package: val.agreement.task_package.clone(), - deploy_retry: None, - cache_dir: val.cache_dir.clone(), - work_dir: val.work_dir.clone(), - transfer_retry: None, - } - } -} - -struct Channel { - tx: mpsc::Sender, - rx: Option>, -} + pub service_id: Option, + pub report_url: Option, + pub supervise: SuperviseCli, -impl Default for Channel { - fn default() -> Self { - let (tx, rx) = mpsc::channel(8); - Channel { tx, rx: Some(rx) } - } + #[allow(dead_code)] + pub sec_key: Option, + #[allow(dead_code)] + pub requestor_pub_key: Option, } -pub(crate) async fn report(url: S, msg: M) -> bool -where - M: RpcMessage + Unpin + 'static, - S: AsRef, -{ - let url = url.as_ref(); - match ya_service_bus::typed::service(url).send(msg).await { - Err(ya_service_bus::Error::Timeout(msg)) => { - log::warn!("Timed out reporting to {}: {}", url, msg); - true - } - Err(e) => { - log::error!("Error reporting to {}: {:?}", url, e); - false - } - Ok(Err(e)) => { - log::error!("Error response while reporting to {}: {:?}", url, e); - false - } - Ok(Ok(_)) => true, +// Mut is necessary in case of sgx compilation :((((( +#[allow(unused_mut)] +pub async fn exe_unit(mut config: ExeUnitConfig) -> anyhow::Result>> { + let args = config.args; + if !args.agreement.exists() { + bail!( + "Agreement file does not exist: {}", + args.agreement.display() + ); } -} - -async fn report_usage( - report_url: String, - activity_id: String, - exe_unit: Addr>, - counters: Addr, -) { - match counters.send(GetCounters).await { - Ok(resp) => match resp { - Ok(data) => { - let msg = activity::local::SetUsage { - activity_id, - usage: ActivityUsage { - current_usage: Some(data), - timestamp: Utc::now().timestamp(), - }, - timeout: None, - }; - if !report(&report_url, msg).await { - exe_unit.do_send(Shutdown(ShutdownReason::Error(Error::RuntimeError( - format!("Reporting endpoint '{}' is not available", report_url), - )))); - } - } - Err(err) => match err { - CounterError::UsageLimitExceeded(info) => { - log::warn!("Usage limit exceeded: {}", info); - exe_unit.do_send(Shutdown(ShutdownReason::UsageLimitExceeded(info))); - } - error => log::warn!("Unable to retrieve counters: {:?}", error), - }, + let work_dir = create_path(&args.work_dir).map_err(|e| { + anyhow::anyhow!( + "Cannot create the working directory {}: {e}", + args.work_dir.display(), + ) + })?; + let cache_dir = create_path(&args.cache_dir).map_err(|e| { + anyhow::anyhow!( + "Cannot create the cache directory {}: {e}", + args.work_dir.display(), + ) + })?; + let mut agreement = Agreement::try_from(&args.agreement).map_err(|e| { + anyhow::anyhow!( + "Error parsing the agreement from {}: {e}", + args.agreement.display(), + ) + })?; + + log::info!("Attempting to read app manifest .."); + + let manifest_ctx = + ManifestContext::try_new(&agreement.inner).context("Invalid app manifest")?; + agreement.task_package = manifest_ctx + .payload() + .or_else(|| agreement.task_package.take()); + + log::info!("Manifest-enabled features: {:?}", manifest_ctx.features()); + log::info!("User-provided payload: {:?}", agreement.task_package); + + let ctx = ExeUnitContext { + supervise: Supervision { + hardware: config.supervise.hardware, + image: config.supervise.image, + manifest: manifest_ctx, }, - Err(e) => log::warn!("Unable to report activity usage: {:?}", e), - } -} - -impl Handler for TransferService { - type Result = ResponseFuture>; - - fn handle(&mut self, _msg: Shutdown, ctx: &mut Self::Context) -> Self::Result { - let addr = ctx.address(); - async move { Ok(addr.send(ya_transfer::transfer::Shutdown {}).await??) }.boxed_local() - } -} - -impl Handler for CountersService { - type Result = ResponseFuture>; - - fn handle(&mut self, _msg: Shutdown, ctx: &mut Self::Context) -> Self::Result { - let addr = ctx.address(); - async move { Ok(addr.send(ya_counters::message::Shutdown {}).await??) }.boxed_local() - } + activity_id: config.service_id.clone(), + report_url: config.report_url, + agreement, + work_dir, + cache_dir, + runtime_args: config.runtime_args, + acl: Default::default(), + credentials: None, + #[cfg(feature = "sgx")] + crypto: init_crypto( + config.sec_key.replace("".into()), + config.requestor_pub_key.clone(), + )?, + }; + + log::debug!("ExeUnitContext args: {:?}", ctx); + + let counters = service::counters::build(&ctx, Some(1000), ctx.supervise.hardware).start(); + let transfers = TransferService::new((&ctx).into()).start(); + let runtime = RuntimeProcess::new(&ctx, config.binary).start(); + let exe_unit = ExeUnit::new(ctx, counters, transfers, runtime).start(); + let signals = SignalMonitor::new(exe_unit.clone()).start(); + exe_unit.send(Register(signals)).await?; + + Ok(exe_unit) } diff --git a/exe-unit/src/logger.rs b/exe-unit/src/logger.rs index d9cd42565d..9d36cd3473 100644 --- a/exe-unit/src/logger.rs +++ b/exe-unit/src/logger.rs @@ -6,7 +6,7 @@ const ENV_VAR_LOG_DIR: &str = "EXE_UNIT_LOG_DIR"; const ENV_VAR_FILE_LOG_LEVEL: &str = "EXE_UNIT_FILE_LOG_LEVEL"; const DEFAULT_LOG_LEVEL: &str = "info"; -const DEFAULT_FILE_LOG_LEVEL: &str = "debug"; +const DEFAULT_FILE_LOG_LEVEL: &str = "debug,tokio_util=off,h2=info"; const DEFAULT_LOG_DIR: &str = "logs"; const DEFAULT_LOG_FORMAT: &str = "%Y-%m-%dT%H:%M:%S%.3f%z"; diff --git a/exe-unit/src/manifest.rs b/exe-unit/src/manifest.rs index 300bcd4e56..8c1df47382 100644 --- a/exe-unit/src/manifest.rs +++ b/exe-unit/src/manifest.rs @@ -99,7 +99,7 @@ impl ManifestContext { } pub fn add_validators(&mut self, iter: impl IntoIterator)>) { - self.validators.write().unwrap().extend(iter.into_iter()); + self.validators.write().unwrap().extend(iter); } pub fn validator(&self) -> Option { @@ -503,6 +503,10 @@ mod tests { ExeScriptCommand::Deploy { net: Default::default(), hosts: Default::default(), + hostname: None, + volumes: Default::default(), + env: Default::default(), + progress: None, }, ExeScriptCommand::Start { args: Default::default(), @@ -657,6 +661,7 @@ mod tests { from: "/src/0x0add".to_string(), to: "/dst/0x0add".to_string(), args: Default::default(), + progress: None, }]; let validator: ScriptValidator = r#"{ diff --git a/exe-unit/src/message.rs b/exe-unit/src/message.rs index bf2bd473e2..d6125b29b3 100644 --- a/exe-unit/src/message.rs +++ b/exe-unit/src/message.rs @@ -5,6 +5,7 @@ use crate::Result; use actix::prelude::*; use futures::channel::mpsc; +use futures::{Sink, SinkExt}; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::path::PathBuf; @@ -12,7 +13,9 @@ use std::path::PathBuf; use ya_client_model::activity; use ya_client_model::activity::activity_state::{State, StatePair}; use ya_client_model::activity::exe_script_command::Network; -use ya_client_model::activity::{CommandOutput, ExeScriptCommand, ExeScriptCommandResult}; +use ya_client_model::activity::{ + CommandOutput, CommandProgress, ExeScriptCommand, ExeScriptCommandResult, +}; #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Message)] #[rtype(result = "GetStateResponse")] @@ -99,8 +102,19 @@ impl ExecuteCommand { }, ) } + + pub fn progress_sink(&self) -> impl Sink { + let CommandContext { batch_id, idx, .. } = self.clone().split().1; + self.tx.clone().with(move |item| { + let batch_id = batch_id.clone(); + futures::future::ok(RuntimeEvent::Process(activity::RuntimeEvent::progress( + batch_id, idx, item, + ))) + }) + } } +#[allow(clippy::large_enum_variant)] #[derive(Clone, Debug)] pub enum RuntimeEvent { Process(activity::RuntimeEvent), diff --git a/exe-unit/src/network/inet.rs b/exe-unit/src/network/inet.rs index 3a429c77d4..243a8478d7 100644 --- a/exe-unit/src/network/inet.rs +++ b/exe-unit/src/network/inet.rs @@ -1047,6 +1047,7 @@ impl Unpin for TransportReceiver {} trait TransportKeyExt { fn proxy_key(self) -> Result; + #[allow(dead_code)] fn proxy_key_mirror(self) -> Result where Self: Sized, diff --git a/exe-unit/src/notify.rs b/exe-unit/src/notify.rs index 5868f03129..720aaadb2c 100644 --- a/exe-unit/src/notify.rs +++ b/exe-unit/src/notify.rs @@ -33,7 +33,7 @@ impl Notify { impl Default for Notify { fn default() -> Self { Notify { - uid: usize::max_value(), + uid: usize::MAX, when: None, last: None, state: Arc::new(Mutex::new(NotifyState::default())), diff --git a/exe-unit/src/runtime/process.rs b/exe-unit/src/runtime/process.rs index 7c0fd1c9f7..139a5f396f 100644 --- a/exe-unit/src/runtime/process.rs +++ b/exe-unit/src/runtime/process.rs @@ -205,7 +205,23 @@ impl RuntimeProcess { let (cmd, ctx) = cmd.split(); match cmd { - ExeScriptCommand::Deploy { .. } => rt_args.args(["deploy", "--"]), + ExeScriptCommand::Deploy { + volumes, hostname, .. + } => { + if let Some(volumes) = volumes { + let vol_override_json = serde_json::to_string(&volumes.as_volumes()) + .expect("failed to serialize volume info"); + + rt_args.arg("--volume-override"); + rt_args.arg(vol_override_json); + } + + if let Some(hostname) = hostname { + rt_args.args(["--hostname", hostname.as_str()]); + } + + rt_args.args(["deploy", "--"]) + } ExeScriptCommand::Start { args } => rt_args.args(["start", "--"]).args(args), ExeScriptCommand::Run { entry_point, args, .. diff --git a/exe-unit/src/state.rs b/exe-unit/src/state.rs index 5729a1416f..e0e59c80ce 100644 --- a/exe-unit/src/state.rs +++ b/exe-unit/src/state.rs @@ -164,6 +164,7 @@ impl Batch { ..event }) } + RuntimeEventKind::Progress(_) => Some(event), }; if let Some(evt) = stream_event { diff --git a/exe-unit/tests/resources/agreement.json b/exe-unit/tests/resources/agreement.json new file mode 100644 index 0000000000..427dbee17f --- /dev/null +++ b/exe-unit/tests/resources/agreement.json @@ -0,0 +1,80 @@ +{ + "agreementId": "0ec929f5acc8f98a47ab72d61a2c2f343d45d8438d3aa4ccdc84e717c219e185", + "proposedSignature": "NoSignature", + "state": "Pending", + "timestamp": "2022-05-22T10:41:42.564784259Z", + "validTo": "2022-05-22T11:41:42.562457Z", + + "offer": { + "properties": { + "golem.activity.caps.transfer.protocol": [ + "gftp", + "https", + "http" + ], + "golem.activity.caps.transfer.report-progress": true, + "golem.com.payment.debit-notes.accept-timeout?": 240, + "golem.com.payment.platform.erc20-goerli-tglm.address": "0x95369fc6fd02afeca110b9c32a21fb8ad899ee0a", + "golem.com.pricing.model": "linear", + "golem.com.pricing.model.linear.coeffs": [ + 0.001388888888888889, + 0.0002777777777777778, + 0.0 + ], + "golem.com.scheme": "payu", + "golem.com.scheme.payu.debit-note.interval-sec?": 120, + "golem.com.scheme.payu.payment-timeout-sec?": 120, + "golem.com.usage.vector": [ + "golem.usage.cpu_sec", + "golem.usage.duration_sec" + ], + "golem.inf.cpu.architecture": "x86_64", + "golem.inf.cpu.brand": "Intel(R) Core(TM) i7-9750H CPU @ 2.60GHz", + "golem.inf.cpu.capabilities": [], + "golem.inf.cpu.cores": 6, + "golem.inf.cpu.model": "Stepping 10 Family 6 Model 302", + "golem.inf.cpu.threads": 11, + "golem.inf.cpu.vendor": "GenuineIntel", + "golem.inf.mem.gib": 28.0, + "golem.inf.storage.gib": 10.188121795654297, + "golem.node.id.name": "mock-provider", + "golem.node.net.is-public": true, + "golem.runtime.capabilities": [], + "golem.runtime.name": "ya-mock-runtime", + "golem.runtime.version": "0.1.0", + "golem.srv.caps.multi-activity": true, + "golem.srv.caps.payload-manifest": true + }, + "constraints": "(&\n (golem.srv.comp.expiration>1705586871777)\n)", + "offerId": "afce49b1ea5b45db91bdd6e5481479f9-9095fca9dea0a91ce95cf994125b33cdd838fcc963a1106f2be9e4b5b65a52f0", + "providerId": "0x86a269498fb5270f20bdc6fdcf6039122b0d3b23", + "timestamp": "2022-05-22T10:41:42.564784259Z" + }, + + "demand": { + "constraints": "(&(golem.com.payment.platform.erc20-goerli-tglm.address=*)\n\t(golem.com.pricing.model=linear)\n\t(&(golem.inf.mem.gib>=0.5)\n\t(golem.inf.storage.gib>=2.0)\n\t(golem.inf.cpu.threads>=1)\n\t(golem.runtime.name=ya-mock-runtime)))", + "demandId": "773035fc685c46da8e61473ac2a2568e-3f3eb86d6ef9a01708d0f57d0b19cc69fd74422150c120e33cc1b5f4a1a12b96", + "properties": { + "golem": { + "com.payment": { + "chosen-platform": "erc20-goerli-tglm", + "debit-notes.accept-timeout?": 240, + "platform.erc20-goerli-tglm.address": "0xa5ad3f81e283983b8e9705b2e31d0c138bb2b1b7" + }, + "node": {}, + "srv": { + "caps.multi-activity": true, + "comp": { + "expiration": 1653216996555, + "task_package": "hash://sha3:22e08b990b6c6685a7e80ecd9a1adb52561a7d9fe9e69b915269da229be6c1ad69dea4ff8a77dc2c4973558da9150909a2be4121b1cbe1ddb04630c1f75aad4f:http://127.0.0.1:8001/image-1", + "vm": { + "package_format": "gvmkit-squash" + } + } + } + } + }, + "requestorId": "0xa5ad3f81e283983b8e9705b2e31d0c138bb2b1b7", + "timestamp": "2022-05-22T10:41:42.564784259Z" + } +} diff --git a/exe-unit/tests/resources/agreement.template.json b/exe-unit/tests/resources/agreement.template.json new file mode 100644 index 0000000000..a3488f8354 --- /dev/null +++ b/exe-unit/tests/resources/agreement.template.json @@ -0,0 +1,80 @@ +{ + "agreementId": "0ec929f5acc8f98a47ab72d61a2c2f343d45d8438d3aa4ccdc84e717c219e185", + "proposedSignature": "NoSignature", + "state": "Pending", + "timestamp": "2022-05-22T10:41:42.564784259Z", + "validTo": "2022-05-22T11:41:42.562457Z", + + "offer": { + "properties": { + "golem.activity.caps.transfer.protocol": [ + "gftp", + "https", + "http" + ], + "golem.activity.caps.transfer.report-progress": true, + "golem.com.payment.debit-notes.accept-timeout?": 240, + "golem.com.payment.platform.erc20-goerli-tglm.address": "0x95369fc6fd02afeca110b9c32a21fb8ad899ee0a", + "golem.com.pricing.model": "linear", + "golem.com.pricing.model.linear.coeffs": [ + 0.001388888888888889, + 0.0002777777777777778, + 0.0 + ], + "golem.com.scheme": "payu", + "golem.com.scheme.payu.debit-note.interval-sec?": 120, + "golem.com.scheme.payu.payment-timeout-sec?": 120, + "golem.com.usage.vector": [ + "golem.usage.cpu_sec", + "golem.usage.duration_sec" + ], + "golem.inf.cpu.architecture": "x86_64", + "golem.inf.cpu.brand": "Intel(R) Core(TM) i7-9750H CPU @ 2.60GHz", + "golem.inf.cpu.capabilities": [], + "golem.inf.cpu.cores": 6, + "golem.inf.cpu.model": "Stepping 10 Family 6 Model 302", + "golem.inf.cpu.threads": 11, + "golem.inf.cpu.vendor": "GenuineIntel", + "golem.inf.mem.gib": 28.0, + "golem.inf.storage.gib": 10.188121795654297, + "golem.node.id.name": "mock-provider", + "golem.node.net.is-public": true, + "golem.runtime.capabilities": [], + "golem.runtime.name": "ya-mock-runtime", + "golem.runtime.version": "0.1.0", + "golem.srv.caps.multi-activity": true, + "golem.srv.caps.payload-manifest": true + }, + "constraints": "(&\n (golem.srv.comp.expiration>1705586871777)\n)", + "offerId": "afce49b1ea5b45db91bdd6e5481479f9-9095fca9dea0a91ce95cf994125b33cdd838fcc963a1106f2be9e4b5b65a52f0", + "providerId": "0x86a269498fb5270f20bdc6fdcf6039122b0d3b23", + "timestamp": "2022-05-22T10:41:42.564784259Z" + }, + + "demand": { + "constraints": "(&(golem.com.payment.platform.erc20-goerli-tglm.address=*)\n\t(golem.com.pricing.model=linear)\n\t(&(golem.inf.mem.gib>=0.5)\n\t(golem.inf.storage.gib>=2.0)\n\t(golem.inf.cpu.threads>=1)\n\t(golem.runtime.name=ya-mock-runtime)))", + "demandId": "773035fc685c46da8e61473ac2a2568e-3f3eb86d6ef9a01708d0f57d0b19cc69fd74422150c120e33cc1b5f4a1a12b96", + "properties": { + "golem": { + "com.payment": { + "chosen-platform": "erc20-goerli-tglm", + "debit-notes.accept-timeout?": 240, + "platform.erc20-goerli-tglm.address": "0xa5ad3f81e283983b8e9705b2e31d0c138bb2b1b7" + }, + "node": {}, + "srv": { + "caps.multi-activity": true, + "comp": { + "expiration": 1653216996555, + "task_package": "${task-package}", + "vm": { + "package_format": "gvmkit-squash" + } + } + } + } + }, + "requestorId": "0xa5ad3f81e283983b8e9705b2e31d0c138bb2b1b7", + "timestamp": "2022-05-22T10:41:42.564784259Z" + } +} diff --git a/exe-unit/tests/test_exe_unit_basic.rs b/exe-unit/tests/test_exe_unit_basic.rs new file mode 100644 index 0000000000..be747b8bf4 --- /dev/null +++ b/exe-unit/tests/test_exe_unit_basic.rs @@ -0,0 +1,53 @@ +use test_context::test_context; + +use ya_client_model::activity::ExeScriptCommand; +use ya_framework_basic::async_drop::DroppableTestContext; +use ya_framework_basic::file::generate_image; +use ya_framework_basic::log::enable_logs; +use ya_framework_basic::server_external::start_http; +use ya_framework_basic::test_dirs::cargo_binary; +use ya_framework_basic::{resource, temp_dir}; +use ya_mock_runtime::testing::{create_exe_unit, exe_unit_config, ExeUnitExt}; + +#[cfg_attr(not(feature = "framework-test"), ignore)] +#[test_context(DroppableTestContext)] +#[serial_test::serial] +async fn test_exe_unit_start_terminate(ctx: &mut DroppableTestContext) -> anyhow::Result<()> { + enable_logs(false); + + let dir = temp_dir!("exe-unit-start-terminate")?; + let temp_dir = dir.path(); + let image_repo = temp_dir.join("images"); + + generate_image(&image_repo, "image-1", 4096_usize, 10); + start_http(ctx, image_repo) + .await + .expect("unable to start http servers"); + + let config = exe_unit_config( + temp_dir, + &resource!("agreement.json"), + cargo_binary("ya-mock-runtime")?, + ); + + let exe = create_exe_unit(config.clone(), ctx).await.unwrap(); + exe.await_init().await.unwrap(); + + log::info!("Sending [deploy, start] batch for execution."); + + exe.wait_for_batch(&exe.deploy(None).await.unwrap()) + .await + .unwrap(); + exe.wait_for_batch(&exe.start(vec![]).await.unwrap()) + .await + .unwrap(); + + log::info!("Sending shutdown request."); + + exe.exec(None, vec![ExeScriptCommand::Terminate {}]) + .await + .unwrap(); + + exe.shutdown().await.unwrap(); + Ok(()) +} diff --git a/exe-unit/tests/test_progress.rs b/exe-unit/tests/test_progress.rs new file mode 100644 index 0000000000..3f43e0c624 --- /dev/null +++ b/exe-unit/tests/test_progress.rs @@ -0,0 +1,151 @@ +use futures::StreamExt; +use std::time::Duration; +use test_context::test_context; + +use ya_client_model::activity::exe_script_command::{ProgressArgs, Volumes}; +use ya_client_model::activity::{ExeScriptCommand, RuntimeEventKind, TransferArgs}; +use ya_core_model::activity; +use ya_framework_basic::async_drop::DroppableTestContext; +use ya_framework_basic::file::generate_image; +use ya_framework_basic::log::enable_logs; +use ya_framework_basic::server_external::start_http; +use ya_framework_basic::test_dirs::{cargo_binary, template}; +use ya_framework_basic::{resource, temp_dir}; +use ya_mock_runtime::testing::{create_exe_unit, exe_unit_config, ExeUnitExt}; + +use ya_service_bus::typed as bus; + +/// Test if progress reporting mechanisms work on gsb level +/// with full ExeUnit setup. +#[cfg_attr(not(feature = "framework-test"), ignore)] +#[test_context(DroppableTestContext)] +#[serial_test::serial] +async fn test_progress_reporting(ctx: &mut DroppableTestContext) -> anyhow::Result<()> { + enable_logs(false); + + let dir = temp_dir!("progress-reporting")?; + let temp_dir = dir.path(); + let image_repo = temp_dir.join("images"); + + let chunk_size = 4096_usize; + let chunk_count = 1024 * 20; + let file_size = (chunk_size * chunk_count) as u64; + + let hash = generate_image(&image_repo, "image-big", chunk_size, chunk_count); + let package = format!( + "hash://sha3:{}:http://127.0.0.1:8001/image-big", + hex::encode(hash) + ); + start_http(ctx, image_repo.clone()) + .await + .expect("unable to start http servers"); + + let config = exe_unit_config( + temp_dir, + &template( + &resource!("agreement.template.json"), + temp_dir.join("agreement.json"), + &[("task-package", package)], + )?, + cargo_binary("ya-mock-runtime")?, + ); + + let exe = create_exe_unit(config.clone(), ctx).await.unwrap(); + exe.await_init().await.unwrap(); + + log::info!("Sending [deploy] batch for execution."); + + let batch_id = exe + .exec( + None, + vec![ExeScriptCommand::Deploy { + net: vec![], + progress: Some(ProgressArgs { + update_interval: Some(Duration::from_millis(300)), + update_step: None, + }), + env: Default::default(), + hosts: Default::default(), + hostname: None, + volumes: Some(Volumes::Simple(vec!["/input".to_owned()])), + }], + ) + .await + .unwrap(); + + validate_progress( + config.service_id.clone().unwrap(), + batch_id.clone(), + file_size, + ) + .await; + + exe.wait_for_batch(&batch_id).await.unwrap(); + exe.wait_for_batch(&exe.start(vec![]).await.unwrap()) + .await + .unwrap(); + + let batch_id = exe + .exec( + None, + vec![ExeScriptCommand::Transfer { + args: TransferArgs::default(), + progress: Some(ProgressArgs { + update_interval: Some(Duration::from_millis(100)), + update_step: None, + }), + // Important: Use hashed transfer, because it is significantly slower in debug mode. + // Otherwise we won't get any progress message, because it is too fast. + from: format!( + "hash://sha3:{}:http://127.0.0.1:8001/image-big", + hex::encode(hash) + ), + to: "container:/input/image-copy".to_string(), + }], + ) + .await + .unwrap(); + + validate_progress(config.service_id.unwrap(), batch_id.clone(), file_size).await; + exe.wait_for_batch(&batch_id).await.unwrap(); + Ok(()) +} + +async fn validate_progress(activity_id: String, batch_id: String, file_size: u64) { + let msg = activity::StreamExecBatchResults { + activity_id: activity_id.clone(), + batch_id: batch_id.clone(), + }; + + // Note: Since we have already sent commands, we may loose a few events on the beginning. + // Our API has a problem here. We can't call `StreamExecBatchResults` before Exeunit knows + // `batch_id`. Even if we would generate id ourselves (possible in test, but not possible for Requestor), + // we still can't call this function too early. + let mut stream = bus::service(activity::exeunit::bus_id(&msg.activity_id)).call_streaming(msg); + + let mut last_progress = 0u64; + let mut num_progresses = 0u64; + while let Some(Ok(Ok(item))) = stream.next().await { + if item.index == 0 { + match item.kind { + RuntimeEventKind::Finished { return_code, .. } => { + assert_eq!(return_code, 0); + break; + } + RuntimeEventKind::Progress(progress) => { + log::info!("Progress report: {:?}", progress); + + assert_eq!(progress.step, (0, 1)); + assert_eq!(progress.unit, Some("Bytes".to_string())); + assert_eq!(progress.progress.1.unwrap(), file_size); + assert!(progress.progress.0 >= last_progress); + + last_progress = progress.progress.0; + num_progresses += 1; + } + _ => (), + } + } + } + assert!(num_progresses > 1); +} diff --git a/extra/payments/multi_test/config-payments.toml b/extra/payments/multi_test/config-payments.toml index 6982e940de..f1eb90591c 100644 --- a/extra/payments/multi_test/config-payments.toml +++ b/extra/payments/multi_test/config-payments.toml @@ -135,7 +135,7 @@ dns-source = "holesky.rpc-node.dev.golem.network." [chain.mumbai] chain-name = "Mumbai testnet" chain-id = 80001 -currency-symbol = "tMATIC" +currency-symbol = "POL" priority-fee = 1.0 max-fee-per-gas = 14.0 transaction-timeout = 60 @@ -167,7 +167,7 @@ dns-source = "mumbai.rpc-node.dev.golem.network." [chain.polygon] chain-name = "Polygon mainnet" chain-id = 137 -currency-symbol = "MATIC" +currency-symbol = "POL" priority-fee = 30.111 max-fee-per-gas = 500.0 transaction-timeout = 100 diff --git a/extra/payments/multi_test/payment_test.py b/extra/payments/multi_test/payment_test.py index c171f9c7be..ddae075735 100644 --- a/extra/payments/multi_test/payment_test.py +++ b/extra/payments/multi_test/payment_test.py @@ -167,6 +167,10 @@ def process_erc20(): balance = get_balance() if balance[public_addrs[0]]["tokenDecimal"] != "0": raise Exception("Test failed early because of wrong initial balance") + + # give consent before running yagna service + run_command(f"{yagna} consent allow-all") + pr = subprocess.Popen([yagna, "service", "run"]) time.sleep(10) diff --git a/golem_cli/Cargo.toml b/golem_cli/Cargo.toml index 2a5da6bb02..3d25dd739f 100644 --- a/golem_cli/Cargo.toml +++ b/golem_cli/Cargo.toml @@ -5,13 +5,17 @@ version = "0.3.0" authors = ["Golem Factory "] edition = "2018" +[features] +require-consent = ['ya-utils-consent/require-consent'] + [dependencies] ya-client = { workspace = true, features = ['cli'] } -ya-compile-time-utils = "0.2" +ya-compile-time-utils.workspace = true ya-core-model = { workspace = true, features = ["payment", "version"] } -ya-provider = "0.3" -ya-utils-path = "0.1.0" -ya-utils-process = { version = "0.3", features = ["lock"] } +ya-provider.path = "../agent/provider" +ya-utils-consent.workspace = true +ya-utils-path.workspace = true +ya-utils-process = { workspace = true, features = ["lock"] } actix-rt = "2.7" ansi_term = "0.12.1" @@ -29,7 +33,7 @@ log = "0.4" names = "0.10.0" openssl.workspace = true prettytable-rs = "0.10.0" -promptly = "0.3.0" +promptly.workspace = true rustyline = "6.3.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/golem_cli/src/command/provider.rs b/golem_cli/src/command/provider.rs index 719fbdbfd6..711e3e68a2 100644 --- a/golem_cli/src/command/provider.rs +++ b/golem_cli/src/command/provider.rs @@ -111,8 +111,14 @@ impl YaProviderCommand { .await .context("failed to get ya-provider exe-unit")?; - serde_json::from_slice(output.stdout.as_slice()) - .context("parsing ya-provider exe-unit list") + match serde_json::from_slice(output.stdout.as_slice()) { + Ok(runtimes) => Ok(runtimes), + Err(e) => { + let output = String::from_utf8_lossy(&output.stderr); + Err(anyhow::anyhow!("{}", output)) + .with_context(|| format!("parsing ya-provider exe-unit list: {}", e)) + } + } } pub async fn create_preset( diff --git a/golem_cli/src/command/yagna.rs b/golem_cli/src/command/yagna.rs index 9e41fa1f04..9c415bc757 100644 --- a/golem_cli/src/command/yagna.rs +++ b/golem_cli/src/command/yagna.rs @@ -16,6 +16,7 @@ use ya_core_model::payment::local::{ }; use ya_core_model::version::VersionInfo; +#[allow(dead_code)] pub struct PaymentPlatform { pub platform: &'static str, pub driver: &'static str, @@ -173,6 +174,7 @@ pub trait PaymentSummary { fn unconfirmed(&self) -> (BigDecimal, u64); } +#[allow(dead_code)] #[derive(Deserialize, Default)] #[serde(rename_all = "camelCase")] pub struct ActivityStatus { diff --git a/golem_cli/src/main.rs b/golem_cli/src/main.rs index 45387774d4..8f6698a37a 100644 --- a/golem_cli/src/main.rs +++ b/golem_cli/src/main.rs @@ -5,6 +5,8 @@ use anyhow::Result; use std::env; use std::io::Write; use structopt::{clap, StructOpt}; +use ya_utils_consent::ConsentCommand; +use ya_utils_consent::{run_consent_command, set_consent_path_in_yagna_dir}; mod appkey; mod command; @@ -47,6 +49,9 @@ enum Commands { /// Show provider status Status, + /// Manage consent (privacy) settings + Consent(ConsentCommand), + #[structopt(setting = structopt::clap::AppSettings::Hidden)] Complete(CompleteCommand), @@ -109,6 +114,11 @@ async fn my_main() -> Result { ); Ok(0) } + Commands::Consent(command) => { + set_consent_path_in_yagna_dir()?; + run_consent_command(command); + Ok(0) + } Commands::ManifestBundle(command) => manifest::manifest_bundle(command).await, Commands::Other(args) => { let cmd = command::YaCommand::new()?; diff --git a/golem_cli/src/service.rs b/golem_cli/src/service.rs index e20b892797..8ac7ce5e8b 100644 --- a/golem_cli/src/service.rs +++ b/golem_cli/src/service.rs @@ -115,6 +115,7 @@ pub async fn run(config: RunConfig) -> Result { crate::setup::setup(&config, false).await?; let cmd = YaCommand::new()?; + let service = cmd.yagna()?.service_run(&config).await?; let app_key = appkey::get_app_key().await?; diff --git a/golem_cli/src/settings_show.rs b/golem_cli/src/settings_show.rs index 2a821eeb9b..4369195804 100644 --- a/golem_cli/src/settings_show.rs +++ b/golem_cli/src/settings_show.rs @@ -8,6 +8,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use crate::command::YaCommand; +#[allow(dead_code)] #[derive(Deserialize)] pub struct ProviderConfig { pub node_name: String, diff --git a/golem_cli/src/setup.rs b/golem_cli/src/setup.rs index ab927d36f9..a3c2740a5b 100644 --- a/golem_cli/src/setup.rs +++ b/golem_cli/src/setup.rs @@ -5,6 +5,7 @@ use std::path::PathBuf; use structopt::clap; use structopt::StructOpt; use strum::VariantNames; +use ya_utils_consent::{consent_check_before_startup, set_consent_path_in_yagna_dir}; use ya_core_model::NodeId; @@ -60,6 +61,10 @@ pub async fn setup(run_config: &RunConfig, force: bool) -> Result { eprintln!("Initial node setup"); let _ = clear_stdin().await; } + //before running yagna check consents + set_consent_path_in_yagna_dir()?; + consent_check_before_startup(interactive)?; + let cmd = crate::command::YaCommand::new()?; let mut config = cmd.ya_provider()?.get_config().await?; diff --git a/rust-toolchain.toml b/rust-toolchain.toml index b5075cb364..e1b23c246d 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "1.77.0" +channel = "1.81.0" components = ["rustfmt", "clippy"] targets = ["x86_64-unknown-linux-musl"] diff --git a/test-utils/test-framework/Cargo.toml b/test-utils/test-framework/Cargo.toml index 74f4b2a24f..c743a8bef4 100644 --- a/test-utils/test-framework/Cargo.toml +++ b/test-utils/test-framework/Cargo.toml @@ -17,8 +17,8 @@ serial_test = { git = "https://github.com/golemfactory/serial_test.git", branch tokio = { version = "1", features = ["rt"] } url = "2.3" -ya-utils-process = "0.3" -ya-framework-macro = "0.1" +ya-utils-process.workspace = true +ya-framework-macro = { path = "./framework-macro" } [target.'cfg(target_family = "unix")'.dependencies] libc = "0.2.73" diff --git a/test-utils/test-framework/framework-basic/Cargo.toml b/test-utils/test-framework/framework-basic/Cargo.toml index 9c9f370bd8..ea7dc90c46 100644 --- a/test-utils/test-framework/framework-basic/Cargo.toml +++ b/test-utils/test-framework/framework-basic/Cargo.toml @@ -11,13 +11,13 @@ bcast-singleton = [] [dependencies] ya-client-model.workspace = true ya-client.workspace = true -ya-core-model = { workspace = true, features = ["net"] } +ya-core-model.workspace = true ya-service-bus = { workspace = true } ya-utils-path = { version = "0.1", path = "../../../utils/path" } ya-utils-futures.workspace = true gftp = { workspace = true } -ya-net = "0.3" +ya-net.workspace = true actix = "0.13" actix-files = "0.6" @@ -35,6 +35,7 @@ async-compression = { version = "=0.3.7", features = [ "xz", ] } bytes = "1.0" +cargo_metadata = "0.18" crossterm = "0.26.1" digest = "0.10" env_logger = "0.7" @@ -52,6 +53,7 @@ serde = "1.0.104" sha2 = "0.10" sha3 = "0.10" tempdir = "0.3.7" +test-binary = { version = "3.0", git = "https://github.com/golemfactory/test-binary.git" } test-context = "0.1.4" thiserror = "1.0.11" tokio = { version = "1", features = ["fs", "io-util"] } diff --git a/test-utils/test-framework/framework-basic/src/file.rs b/test-utils/test-framework/framework-basic/src/file.rs index 2e46def341..22b663594f 100644 --- a/test-utils/test-framework/framework-basic/src/file.rs +++ b/test-utils/test-framework/framework-basic/src/file.rs @@ -1,15 +1,79 @@ use digest::{Digest, Output}; +use rand::rngs::ThreadRng; use rand::Rng; +use std::fs; use std::fs::OpenOptions; use std::io::Write; use std::path::{Path, PathBuf}; -pub fn generate_file_with_hasher( +trait ContentGenerator { + fn generate(&mut self, chunk_size: usize) -> Vec; +} + +struct RandomGenerator(pub ThreadRng); +struct ZeroGenerator {} + +impl ContentGenerator for RandomGenerator { + fn generate(&mut self, chunk_size: usize) -> Vec { + (0..chunk_size) + .map(|_| self.0.gen_range(0..256) as u8) + .collect() + } +} + +impl ContentGenerator for ZeroGenerator { + fn generate(&mut self, chunk_size: usize) -> Vec { + vec![0; chunk_size] + } +} + +pub fn generate_random_file_with_hash( path: &Path, name: &str, chunk_size: usize, chunk_count: usize, ) -> Output { + generate_file_with_hasher::( + path, + name, + chunk_size, + chunk_count, + RandomGenerator(rand::thread_rng()), + ) +} + +pub fn generate_file_with_hash( + path: &Path, + name: &str, + chunk_size: usize, + chunk_count: usize, +) -> Output { + generate_random_file_with_hash::(path, name, chunk_size, chunk_count) +} + +pub fn generate_image( + path: &Path, + name: &str, + chunk_size: usize, + chunk_count: usize, +) -> Output { + generate_file_with_hasher::( + path, + name, + chunk_size, + chunk_count, + ZeroGenerator {}, + ) +} + +fn generate_file_with_hasher( + path: &Path, + name: &str, + chunk_size: usize, + chunk_count: usize, + mut gen: impl ContentGenerator, +) -> Output { + fs::create_dir_all(path).ok(); let path = path.join(name); log::debug!( @@ -24,8 +88,6 @@ pub fn generate_file_with_hasher( .open(path) .expect("rnd file"); - let mut rng = rand::thread_rng(); - for i in 0..chunk_count { log::trace!( "Generating chunk {i}/{chunk_count}. File size: {}/{}", @@ -33,9 +95,7 @@ pub fn generate_file_with_hasher( chunk_count * chunk_size ); - let input: Vec = (0..chunk_size) - .map(|_| rng.gen_range(0..256) as u8) - .collect(); + let input: Vec = gen.generate(chunk_size); hasher.update(&input); let _ = file_src.write(&input).unwrap(); @@ -44,15 +104,6 @@ pub fn generate_file_with_hasher( hasher.finalize() } -pub fn generate_file_with_hash( - path: &Path, - name: &str, - chunk_size: usize, - chunk_count: usize, -) -> Output { - generate_file_with_hasher::(path, name, chunk_size, chunk_count) -} - pub fn generate_file(path: &PathBuf, chunk_size: usize, chunk_count: usize) { let mut file = OpenOptions::new() .write(true) diff --git a/test-utils/test-framework/framework-basic/src/test_dirs.rs b/test-utils/test-framework/framework-basic/src/test_dirs.rs index bbe35b4643..374c697b2a 100644 --- a/test-utils/test-framework/framework-basic/src/test_dirs.rs +++ b/test-utils/test-framework/framework-basic/src/test_dirs.rs @@ -1,7 +1,9 @@ -use anyhow::anyhow; +use anyhow::{anyhow, bail}; use std::fs; use std::path::{Path, PathBuf}; +use std::str::FromStr; use tempdir::TempDir; +use test_binary::TestBinary; pub mod macros { /// Creates temporary directory in cargo target directory. @@ -33,6 +35,65 @@ pub fn temp_dir_(base_dir: &str, prefix: &str) -> anyhow::Result { Ok(dir) } +#[cfg(debug_assertions)] +pub fn is_debug() -> bool { + true +} + +#[cfg(not(debug_assertions))] +pub fn is_debug() -> bool { + false +} + +#[cfg(target_family = "windows")] +pub fn extension() -> String { + ".exe".to_string() +} + +#[cfg(not(target_family = "windows"))] +pub fn extension() -> String { + "".to_string() +} + +fn find_binary(bin_name: &str) -> anyhow::Result { + let current = std::env::current_exe() + .map_err(|e| anyhow!("Failed to get path to current binary. {e}"))? + .parent() + .and_then(|path| path.parent()) + .ok_or(anyhow!("No parent dir for current binary."))? + .to_path_buf(); + let bin_name = format!("{bin_name}{}", extension()); + let bin_path = current.join(&bin_name); + if !bin_path.exists() { + bail!( + "Path doesn't exist: {}, when looking for binary: {}", + bin_path.display(), + bin_name + ); + } + + if !bin_path.is_file() { + bail!("Expected {} to be binary file.", bin_path.display()); + } + + Ok(bin_path) +} + +/// Returns path to test binary from workspace. +pub fn cargo_binary(bin_name: &str) -> anyhow::Result { + // Check if binary is already compiled. + if find_binary(bin_name).is_err() { + TestBinary::from_workspace(bin_name)? + .build() + .map_err(|e| anyhow!("Failed to compile binary: {e}"))? + .to_str() + .map(PathBuf::from_str) + .ok_or(anyhow!("Failed to convert path from OsString"))??; + }; + + find_binary(bin_name) +} + /// Returns resource from `resources` directory in tests. pub fn resource_(base_dir: &str, name: &str) -> PathBuf { PathBuf::from(base_dir) diff --git a/test-utils/test-framework/framework-mocks/Cargo.toml b/test-utils/test-framework/framework-mocks/Cargo.toml index 22dabf909f..963783a18e 100644 --- a/test-utils/test-framework/framework-mocks/Cargo.toml +++ b/test-utils/test-framework/framework-mocks/Cargo.toml @@ -6,32 +6,33 @@ edition = "2021" [dependencies] ya-agreement-utils = { workspace = true } -ya-client-model = { workspace = true } -ya-client = { workspace = true } -ya-core-model = { workspace = true } -ya-dummy-driver = "0.3" -ya-erc20-driver = "0.4" -ya-identity = "0.3" -ya-market = "0.4" -ya-net = { version = "0.3", features = ["service"] } -ya-payment = "0.3" -ya-persistence = "0.3" -ya-service-api = "0.1" -ya-service-api-web = "0.2" +ya-client-model.workspace = true +ya-client.workspace = true +ya-core-model.workspace = true +ya-dummy-driver.workspace = true +ya-erc20-driver.workspace = true +ya-identity.workspace = true +ya-market.workspace = true +ya-net = { workspace = true, features = ["service"] } +ya-payment.workspace = true +ya-persistence.workspace = true +ya-service-api.workspace = true +ya-service-api-web.workspace = true ya-service-bus = { workspace = true } ya-sb-router = { workspace = true } -ya-utils-path = { version = "0.1", path = "../../../utils/path" } -ya-std-utils = "0.1" +ya-utils-path.workspace = true +ya-std-utils.workspace = true -ya-framework-basic = { version = "0.1" } +ya-framework-basic.workspace = true -actix-rt = "2.7" -actix-web = "4" +actix-rt.workspace = true +actix-web.workspace = true actix-http = "3" actix-service = "2" anyhow = "1.0" async-trait = "0.1.80" chrono = "0.4" +bigdecimal = "0.2" derive_more = { workspace = true } ethsign = "0.8" futures = "0.3" @@ -43,7 +44,7 @@ regex = "1.5" serde = "1.0" serde_json = "1.0" test-context = "0.1.4" -tokio = "1" +tokio = { version = "1", features = ["sync"] } uuid = { version = "0.8", features = ["v4", "serde"] } url = "2.5" diff --git a/test-utils/test-framework/framework-mocks/src/identity.rs b/test-utils/test-framework/framework-mocks/src/identity.rs index e6e7bb12fc..e5cef30c12 100644 --- a/test-utils/test-framework/framework-mocks/src/identity.rs +++ b/test-utils/test-framework/framework-mocks/src/identity.rs @@ -106,16 +106,16 @@ impl RealIdentity { } fn register_identity_in_net(&self, id: NodeId) { - // This line is temporary, until we will be able to rebind all modules to non-fixed prefix. - // Currently, all modules must be bound under `/local/{module}` and `/public/{module}`. - // Not doing so would break most of them. - // For example Payment module uses fixed prefix to call market and identity modules. - // When we will work around this problem, we will be able to instantiate many nodes in tests. - self.net.register_node(&id, "/public"); - - // Should be instead in the future: - // self.net - // .register_node(&id, &format!("/{}/public/{id}", self.name)); + if let Some(gsb) = &self.gsb { + self.net.register_node(&id, gsb.public_addr()); + } else { + // This line is temporary, until we will be able to rebind all modules to non-fixed prefix. + // Currently, all modules must be bound under `/local/{module}` and `/public/{module}`. + // Not doing so would break most of them. + // For example Payment module uses fixed prefix to call market and identity modules. + // When we will work around this problem, we will be able to instantiate many nodes in tests. + self.net.register_node(&id, "/public"); + } } pub async fn create_identity(&self, name: &str) -> anyhow::Result { diff --git a/test-utils/test-framework/framework-mocks/src/net.rs b/test-utils/test-framework/framework-mocks/src/net.rs index 1ed1127f4f..3c99717652 100644 --- a/test-utils/test-framework/framework-mocks/src/net.rs +++ b/test-utils/test-framework/framework-mocks/src/net.rs @@ -1,4 +1,6 @@ -use anyhow::Result; +use anyhow::{bail, Result}; +use futures::TryFutureExt; +use std::collections::hash_map::Entry; use std::collections::HashMap; use std::rc::Rc; use std::sync::{Arc, Mutex}; @@ -21,10 +23,18 @@ pub struct MockNet { broadcast: BCastService, } +#[derive(Clone)] +struct NodeInfo { + pub prefix: String, + // Indicates that connection is broken. Sending message to this node + // will result with following error. + pub break_error: Option, +} + #[derive(Default)] struct MockNetInner { /// Maps NodeIds to gsb prefixes of other nodes. - pub nodes: HashMap, + pub nodes: HashMap, } impl Default for MockNet { @@ -56,7 +66,11 @@ impl IMockNet for MockNet { log::info!("[MockNet] Registering node {node_id} at prefix: {prefix}"); let mut inner = self.inner.lock().unwrap(); - if inner.nodes.insert(*node_id, prefix.to_string()).is_some() { + let info = NodeInfo { + prefix: prefix.to_string(), + break_error: None, + }; + if inner.nodes.insert(*node_id, info).is_some() { panic!("[MockNet] Node [{}] already existed.", node_id); } } @@ -88,6 +102,22 @@ impl MockNet { self } + pub fn break_network_for(&self, node_id: NodeId) { + let mut inner = self.inner.lock().unwrap(); + inner.nodes.entry(node_id).and_modify(|info| { + log::info!("Disabling networking for: {node_id}"); + info.break_error = Some("Unreachable".to_string()); + }); + } + + pub fn enable_network_for(&self, node_id: NodeId) { + let mut inner = self.inner.lock().unwrap(); + inner.nodes.entry(node_id).and_modify(|info| { + log::info!("Enabling networking for: {node_id}"); + info.break_error = None; + }); + } + fn translate_to(&self, id: NodeId, addr: &str) -> Result { let prefix = self.node_prefix(id)?; let net_prefix = format!("/net/{}", id); @@ -97,19 +127,21 @@ impl MockNet { } fn node_prefix(&self, id: NodeId) -> Result { - let inner = self.inner.lock().unwrap(); - inner - .nodes - .get(&id) - .cloned() - .ok_or_else(|| anyhow::anyhow!("Node not registered: {id}")) + let mut inner = self.inner.lock().unwrap(); + match inner.nodes.entry(id) { + Entry::Occupied(info) => match info.get().break_error { + Some(ref err) => bail!("{err}"), + None => Ok(info.get().prefix.clone()), + }, + Entry::Vacant(_) => bail!("Node not registered: {id}"), + } } - pub fn node_by_prefix(&self, address: &str) -> Option { + fn node_by_prefix(&self, address: &str) -> Option { let inner = self.inner.lock().unwrap(); - for (id, prefix) in inner.nodes.iter() { - if address.contains(prefix) { - return Some(*id); + for (_id, info) in inner.nodes.iter() { + if address.contains(&info.prefix) { + return Some(info.clone()); } } None @@ -162,8 +194,8 @@ impl MockNet { let addr_local = addr.replacen("local", "public", 1); let node_id = match mock_net.node_by_prefix(&addr_local) { - Some(node_id) => node_id, - None => { + Some(info) if info.break_error.is_none() => info.prefix, + _ => { log::debug!( "Not broadcasting on topic {topic} to {addr}. Node not found on list. \ Probably networking was disabled for this Node." @@ -207,6 +239,7 @@ impl MockNet { let data = Vec::from(msg); let caller = caller.to_string(); let addr = addr.to_string(); + let addr_ = addr.to_string(); let resolver_ = resolver.clone(); async move { @@ -222,7 +255,7 @@ impl MockNet { "[MockNet] Sending message from [{from}], to: [{to}], address [{translated}]." ); local_bus::send(&translated, &from.to_string(), &data).await - } + }.inspect_err(move |e| log::warn!("[MockNet] Sending message [{addr_}]: {e}")) }, // TODO: Implement stream handler (), diff --git a/test-utils/test-framework/framework-mocks/src/node.rs b/test-utils/test-framework/framework-mocks/src/node.rs index 1ca26ad9c0..ec5bc7ac49 100644 --- a/test-utils/test-framework/framework-mocks/src/node.rs +++ b/test-utils/test-framework/framework-mocks/src/node.rs @@ -10,6 +10,7 @@ use ya_client::payment::PaymentApi; use ya_client::web::WebClient; use ya_core_model::bus::GsbBindPoints; use ya_framework_basic::async_drop::DroppableTestContext; +use ya_payment::Config; use ya_service_api_web::middleware::auth; use ya_service_api_web::middleware::cors::{AppKeyCors, CorsConfig}; use ya_service_api_web::rest_api_host_port; @@ -46,8 +47,8 @@ pub struct MockNode { } impl MockNode { - pub fn new(net: MockNet, name: &str, testdir: &Path) -> Self { - let testdir = testdir.join(name); + pub fn new(net: MockNet, name: &str, testdir: impl AsRef) -> Self { + let testdir = testdir.as_ref().join(name); fs::create_dir_all(&testdir).expect("Failed to create test directory"); MockNode { @@ -80,8 +81,8 @@ impl MockNode { } /// Use full wrapped Payment module for this node. - pub fn with_payment(mut self) -> Self { - self.payment = Some(RealPayment::new(&self.name, &self.testdir)); + pub fn with_payment(mut self, config: Option) -> Self { + self.payment = Some(RealPayment::new(&self.name, &self.testdir).with_config(config)); self } @@ -117,6 +118,12 @@ impl MockNode { .ok_or_else(|| anyhow!("Payment ({}) is not initialized", self.name)) } + pub fn get_fake_payment(&self) -> anyhow::Result { + self.fake_payment + .clone() + .ok_or_else(|| anyhow!("Payment ({}) is not initialized", self.name)) + } + pub fn get_market(&self) -> anyhow::Result { self.market .clone() diff --git a/test-utils/test-framework/framework-mocks/src/payment.rs b/test-utils/test-framework/framework-mocks/src/payment.rs index 7c7f75dd7c..9875c0f8c3 100644 --- a/test-utils/test-framework/framework-mocks/src/payment.rs +++ b/test-utils/test-framework/framework-mocks/src/payment.rs @@ -6,7 +6,7 @@ use std::sync::Arc; use std::time::Duration; use ya_client::payment::PaymentApi; -use ya_client_model::payment::Payment; +use ya_client_model::payment::{Acceptance, Allocation, DebitNote, Invoice, Payment}; use ya_core_model::driver::{driver_bus_id, Fund}; use ya_core_model::payment::local::BUS_ID; use ya_core_model::payment::public; @@ -14,7 +14,6 @@ use ya_payment::api::web_scope; use ya_payment::config::Config; use ya_payment::migrations; use ya_payment::processor::PaymentProcessor; -use ya_payment::service::BindOptions; use ya_persistence::executor::DbExecutor; use ya_service_bus::typed as bus; use ya_service_bus::typed::Endpoint; @@ -48,21 +47,30 @@ pub struct RealPayment { db: DbExecutor, processor: Arc, + + config: Arc, } impl RealPayment { pub fn new(name: &str, testdir: &Path) -> Self { let db = Self::create_db(testdir, "payment.db").unwrap(); let processor = Arc::new(PaymentProcessor::new(db.clone())); + let config = Config::from_env().unwrap().run_sync_job(false); RealPayment { name: name.to_string(), testdir: testdir.to_path_buf(), db, processor, + config: Arc::new(config), } } + pub fn with_config(mut self, config: Option) -> Self { + self.config = Arc::new(config.unwrap_or(Config::from_env().unwrap())); + self + } + fn create_db(testdir: &Path, name: &str) -> anyhow::Result { let db = DbExecutor::from_data_dir(testdir, name) .map_err(|e| anyhow!("Failed to create db [{name:?}]. Error: {e}"))?; @@ -73,12 +81,7 @@ impl RealPayment { pub async fn bind_gsb(&self) -> anyhow::Result<()> { log::info!("RealPayment ({}) - binding GSB", self.name); - ya_payment::service::bind_service( - &self.db, - self.processor.clone(), - BindOptions::default().run_sync_job(false), - Arc::new(Config::from_env()?), - ); + ya_payment::service::bind_service(&self.db, self.processor.clone(), self.config.clone()); self.start_dummy_driver().await?; self.start_erc20_driver().await?; @@ -133,6 +136,28 @@ pub trait PaymentRestExt { where Tz: TimeZone, Tz::Offset: Display; + + async fn wait_for_invoice_payment( + &self, + invoice_id: &str, + timeout: Duration, + after_timestamp: Option>, + ) -> anyhow::Result> + where + Tz: TimeZone, + Tz::Offset: Display; + + async fn simple_accept_invoice( + &self, + invoice: &Invoice, + allocation: &Allocation, + ) -> anyhow::Result<()>; + + async fn simple_accept_debit_note( + &self, + debit_note: &DebitNote, + allocation: &Allocation, + ) -> anyhow::Result<()>; } #[async_trait::async_trait(?Send)] @@ -162,4 +187,60 @@ impl PaymentRestExt for PaymentApi { } Err(anyhow!("Timeout {timeout:?} waiting for payments.")) } + + async fn wait_for_invoice_payment( + &self, + invoice_id: &str, + timeout: Duration, + after_timestamp: Option>, + ) -> anyhow::Result> + where + Tz: TimeZone, + Tz::Offset: Display, + { + let start = Utc::now(); + while start + timeout > Utc::now() { + let payments = self + .get_payments_for_invoice(invoice_id, after_timestamp.clone(), None) + .await?; + + if !payments.is_empty() { + return Ok(payments); + } + tokio::time::sleep(Duration::from_millis(300)).await; + } + Err(anyhow!("Timeout {timeout:?} waiting for payments.")) + } + + async fn simple_accept_invoice( + &self, + invoice: &Invoice, + allocation: &Allocation, + ) -> anyhow::Result<()> { + Ok(self + .accept_invoice( + &invoice.invoice_id, + &Acceptance { + total_amount_accepted: invoice.amount.clone(), + allocation_id: allocation.allocation_id.to_string(), + }, + ) + .await?) + } + + async fn simple_accept_debit_note( + &self, + debit_note: &DebitNote, + allocation: &Allocation, + ) -> anyhow::Result<()> { + Ok(self + .accept_debit_note( + &debit_note.debit_note_id, + &Acceptance { + total_amount_accepted: debit_note.total_amount_due.clone(), + allocation_id: allocation.allocation_id.to_string(), + }, + ) + .await?) + } } diff --git a/test-utils/test-framework/framework-mocks/src/payment/fake_payment.rs b/test-utils/test-framework/framework-mocks/src/payment/fake_payment.rs index 2f6008ce32..0d11508141 100644 --- a/test-utils/test-framework/framework-mocks/src/payment/fake_payment.rs +++ b/test-utils/test-framework/framework-mocks/src/payment/fake_payment.rs @@ -1,11 +1,23 @@ #![allow(unused)] use anyhow::anyhow; +use bigdecimal::BigDecimal; +use chrono::{DateTime, Duration, Utc}; +use std::fmt::Display; use std::path::{Path, PathBuf}; +use std::str::FromStr; use std::sync::Arc; +use tokio::sync::mpsc; +use uuid::Uuid; +use ya_agreement_utils::AgreementView; +use ya_client_model::market::Agreement; +use ya_client_model::payment::allocation::PaymentPlatformEnum; +use ya_client_model::payment::{DebitNote, DocumentStatus, Invoice, NewAllocation}; +use ya_client_model::NodeId; use ya_core_model as model; use ya_core_model::bus::GsbBindPoints; +use ya_core_model::payment::public; use ya_core_model::payment::public::{ AcceptDebitNote, AcceptInvoice, AcceptRejectError, Ack, CancelDebitNote, CancelError, CancelInvoice, PaymentSync, PaymentSyncError, PaymentSyncRequest, PaymentSyncWithBytes, @@ -15,7 +27,9 @@ use ya_core_model::payment::public::{ use ya_payment::migrations; use ya_payment::processor::PaymentProcessor; use ya_persistence::executor::DbExecutor; -use ya_service_bus::typed::ServiceBinder; +use ya_service_bus::typed as bus; +use ya_service_bus::typed::{Endpoint, ServiceBinder}; +use ya_service_bus::RpcMessage; #[derive(Clone)] pub struct FakePayment { @@ -72,6 +86,131 @@ impl FakePayment { Ok(()) } + + /// Function binds new GSB handler to the given message. + /// It returns Receiver that can be used to inspect the messages and make assertions. + /// GSB will always return `result` passed in parameter back to the caller. + /// Function overrides previous handler, so only one Receiver at the same time can be used. + pub fn message_channel( + &self, + result: Result, + ) -> mpsc::UnboundedReceiver<(NodeId, T)> + where + T: RpcMessage, + T::Item: Clone, + T::Error: Clone + Display, + { + let (sender, receiver) = mpsc::unbounded_channel(); + self.override_gsb_public() + .bind(move |_db: DbExecutor, sender_id: String, msg: T| { + let result = result.clone(); + let sender = sender.clone(); + async move { + let id = NodeId::from_str(&sender_id).unwrap(); + let _ = sender.send((id, msg)).map_err(|_e| { + log::error!( + "[FakePayment] Unable to send message '{}' to channel.", + T::ID + ); + }); + result + } + }); + receiver + } + + pub fn override_gsb_public(&self) -> ServiceBinder { + ServiceBinder::new(self.gsb.public_addr(), &self.db, ()) + } + + pub fn override_gsb_local(&self) -> ServiceBinder { + ServiceBinder::new(self.gsb.local_addr(), &self.db, ()) + } + + /// Unbinds GSB public endpoint. + /// TODO: it would be nice to be able to unbind each message separately, + /// but GSB doesn't allow this; it can only unbind whole GSB prefix. + pub async fn unbind_public(&self) { + bus::unbind(self.gsb.public_addr()).await; + } + + pub async fn unbind_local(&self) { + bus::unbind(self.gsb.local_addr()).await; + } + + pub fn gsb_local_endpoint(&self) -> Endpoint { + self.gsb.local() + } + + pub fn gsb_public_endpoint(&self) -> Endpoint { + self.gsb.public() + } + + fn platform_from(agreement: &Agreement) -> anyhow::Result { + let view = AgreementView::try_from(agreement)?; + Ok(view.pointer_typed("/demand/properties/golem/com/payment/chosen-platform")?) + } + + pub fn fake_invoice(agreement: &Agreement, amount: BigDecimal) -> anyhow::Result { + let platform = Self::platform_from(agreement)?; + Ok(Invoice { + invoice_id: Uuid::new_v4().to_string(), + issuer_id: agreement.offer.provider_id, + recipient_id: agreement.demand.requestor_id, + payee_addr: agreement.offer.provider_id.to_string(), + payer_addr: agreement.demand.requestor_id.to_string(), + payment_platform: platform, + timestamp: Utc::now(), + agreement_id: agreement.agreement_id.to_string(), + activity_ids: vec![], + amount, + payment_due_date: Utc::now() + Duration::seconds(10), + status: DocumentStatus::Issued, + }) + } + + pub fn fake_debit_note( + agreement: &Agreement, + activity_id: &str, + amount: BigDecimal, + due_date: Option>, + ) -> anyhow::Result { + let platform = Self::platform_from(agreement)?; + Ok(DebitNote { + debit_note_id: Uuid::new_v4().to_string(), + issuer_id: agreement.offer.provider_id, + recipient_id: agreement.demand.requestor_id, + payee_addr: agreement.offer.provider_id.to_string(), + payer_addr: agreement.demand.requestor_id.to_string(), + payment_platform: platform, + previous_debit_note_id: None, + timestamp: Utc::now(), + agreement_id: agreement.agreement_id.to_string(), + activity_id: activity_id.to_string(), + total_amount_due: amount, + usage_counter_vector: None, + payment_due_date: due_date, + status: DocumentStatus::Issued, + }) + } + + pub fn default_allocation( + agreement: &Agreement, + amount: BigDecimal, + ) -> anyhow::Result { + let platform = Self::platform_from(agreement)?; + let payment_platform = PaymentPlatformEnum::PaymentPlatformName(platform); + + Ok(NewAllocation { + address: None, // Use default address (i.e. identity) + payment_platform: Some(payment_platform.clone()), + total_amount: amount, + timeout: None, + make_deposit: false, + deposit: None, + extend_timeout: None, + }) + } } async fn send_debit_note( diff --git a/tests/readme.md b/tests/readme.md index 84919fcc7d..c4ccad8599 100644 --- a/tests/readme.md +++ b/tests/readme.md @@ -11,7 +11,7 @@ To run all tests including framework tests and unit tests (but without market te `cargo test --workspace --features framework-test` To run only framework tests use command: -`cargo test --test '*' --features framework-test` +`cargo test --test '*' -p yagna -p ya-exe-unit -p ya-transfer -p ya-payment -p ya-identity --features framework-test` ## Creating tests diff --git a/utils/consent/Cargo.toml b/utils/consent/Cargo.toml new file mode 100644 index 0000000000..9567f0109e --- /dev/null +++ b/utils/consent/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "ya-utils-consent" +version = "0.1.0" +description = "Consent (allow/deny settings) service for Yagna" +authors = ["Golem Factory "] +edition = "2021" + +[dependencies] +anyhow = "1.0" +structopt = "0.3" +log = "0.4" +metrics = "0.12" +serde = "1" +serde_json = "1" +strum.workspace = true +promptly.workspace = true +parking_lot.workspace = true +ya-service-api.workspace = true +ya-service-api-interfaces.workspace = true +ya-utils-path = { path = "../path" } + +[dev-dependencies] +env_logger = "0" +rand.workspace = true + +[features] +require-consent = [] diff --git a/utils/consent/README.md b/utils/consent/README.md new file mode 100644 index 0000000000..315c0e3dbc --- /dev/null +++ b/utils/consent/README.md @@ -0,0 +1,38 @@ +## Feature Documentation + +### Aim: +Add a management feature to allow users to set their consent for data collection and publishing on the stats.golem.network. + +### Description: +The user setting for the consent is saved in the CONSENT file, in the YAGNA_DATADIR folder. +Both ```yagna``` and ```golemsp``` use the config (see details below). +The setting can be modified by using the YA_CONSENT_STATS env variable (that can be read from the .env file). + +### Used artefacts: +YA_CONSENT_STATS - env, the value set by the variable has priority and is used to update the setting in the CONSENT file when yagna or golemsp is run +CONSENT file in the YAGNA_DATADIR folder + +### How to check the settings: + +Shows the current setting, +``` +yagna consent show +``` +Note it reads the value from the CONSENT file and the value of the YA_CONSENT_STATS variable (from session or .env file in the pwd folder) so if the service was launched from another folder or with a different value of YA_CONSENT_STATS set in the session the information shown setting may be not accurate. + +### How to change the settings: + +set the new setting in the CONSENT file, requires yagna restart to take effect. +- yagna consent allow/deny +- restart yagna/golemsp with YA_CONSENT_STATS set, the setting in the CONSENT file will be updated to the value set by the variable. + +### Details: + +```golemsp``` will ask the question about the consent if it cannot be determined from the YA_CONSENT_STATS variable or CONSENT file. +If Yagna cannot determine the settings from the YA_CONSENT_STATS variable or CONSENT file it will assume the consent is not given, but will not set it in the CONSENT file. + +### Motivation: +```golemsp``` is designed to install the provider nodes interactively. Therefore, it will expect the question to be answered. The user still can avoid the question by setting the env variable. +The default answer is "allow" as we do not collect data that is both personal and not already publicly available for the other network users. The data is used to augment the information shown on the stats.golem.network and most of the providers expect these data to be available there. +Yagna on the other hand won't stop on the question if the setting is not defined, to prevent the interruption of automatic updates of Yagna that run as a background service. +We expect such a scenario mostly for requestors. \ No newline at end of file diff --git a/utils/consent/src/api.rs b/utils/consent/src/api.rs new file mode 100644 index 0000000000..22b6a3499c --- /dev/null +++ b/utils/consent/src/api.rs @@ -0,0 +1,292 @@ +use crate::fs::{load_entries, save_entries}; +use crate::model::display_consent_path; +use crate::model::{extra_info, full_question}; +use crate::{ConsentCommand, ConsentEntry, ConsentScope}; +use anyhow::anyhow; +use metrics::gauge; +use parking_lot::Mutex; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use std::collections::BTreeMap; +use std::path::PathBuf; +use std::str::FromStr; +use std::sync::Arc; +use std::{env, fmt}; +use structopt::lazy_static::lazy_static; +use strum::{EnumIter, IntoEnumIterator}; +use ya_utils_path::data_dir::DataDir; + +lazy_static! { + static ref CONSENT_PATH: Arc>> = Arc::new(Mutex::new(None)); + static ref CONSENT_CACHE: Arc>> = + Arc::new(Mutex::new(BTreeMap::new())); +} + +pub fn set_consent_path(path: PathBuf) { + *CONSENT_PATH.lock() = Some(path); +} + +pub fn set_consent_path_in_yagna_dir() -> anyhow::Result<()> { + let yagna_datadir = match env::var("YAGNA_DATADIR") { + Ok(val) => match DataDir::from_str(&val) { + Ok(val) => val, + Err(e) => { + return Err(anyhow!( + "Problem when creating yagna path from YAGNA_DATADIR: {}", + e + )) + } + }, + Err(_) => DataDir::new("yagna"), + }; + + let val = match yagna_datadir.get_or_create() { + Ok(val) => val, + Err(e) => return Err(anyhow!("Problem when creating yagna path: {}", e)), + }; + + let val = val.join("CONSENT"); + log::info!("Using yagna path: {}", val.as_path().display()); + set_consent_path(val); + Ok(()) +} + +fn get_consent_env_path() -> Option { + env::var("YA_CONSENT_PATH").ok().map(PathBuf::from) +} + +pub fn get_consent_path() -> Option { + let env_path = get_consent_env_path(); + + // Environment path is prioritized + if let Some(env_path) = env_path { + return Some(env_path); + } + + // If no environment path is set, use path setup by set_consent_path + CONSENT_PATH.lock().clone() +} + +struct ConsentEntryCached { + consent: HaveConsentResult, + cached_time: std::time::Instant, +} + +#[derive(Copy, Debug, Clone, Serialize, Deserialize, PartialEq, EnumIter, Eq)] +pub enum ConsentSource { + Default, + Config, + Env, +} +impl fmt::Display for ConsentSource { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +#[derive(Copy, Debug, Clone)] +pub struct HaveConsentResult { + pub consent: Option, + pub source: ConsentSource, +} + +/// Get current status of consent, it is cached for some time, so you can safely call it as much as you want +pub fn have_consent_cached(consent_scope: ConsentScope) -> HaveConsentResult { + if cfg!(feature = "require-consent") { + let mut map = CONSENT_CACHE.lock(); + + if let Some(entry) = map.get(&consent_scope) { + if entry.cached_time.elapsed().as_secs() < 15 { + return entry.consent; + } + } + let consent_res = have_consent(consent_scope, false); + map.insert( + consent_scope, + ConsentEntryCached { + consent: consent_res, + cached_time: std::time::Instant::now(), + }, + ); + gauge!( + format!("consent.{}", consent_scope.to_lowercase_str()), + consent_res + .consent + .map(|v| if v { 1 } else { 0 }) + .unwrap_or(-1) as i64 + ); + consent_res + } else { + // if feature require-consent is disabled, return true without checking + HaveConsentResult { + consent: Some(true), + source: ConsentSource::Default, + } + } +} + +/// Save from env is used to check if consent should be saved to configuration if set in variable +pub(crate) fn have_consent(consent_scope: ConsentScope, save_from_env: bool) -> HaveConsentResult { + // for example: + // YA_CONSENT_STATS=allow + + let env_variable_name = format!("YA_CONSENT_{}", consent_scope.to_string().to_uppercase()); + let result_from_env = if let Ok(env_value) = env::var(&env_variable_name) { + if env_value.trim().to_lowercase() == "allow" { + Some(HaveConsentResult { + consent: Some(true), + source: ConsentSource::Env, + }) + } else if env_value.trim().to_lowercase() == "deny" { + Some(HaveConsentResult { + consent: Some(false), + source: ConsentSource::Env, + }) + } else { + panic!("Invalid value for consent: {env_variable_name}={env_value}, possible values allow/deny"); + } + } else { + None + }; + if let Some(result_from_env) = result_from_env { + if save_from_env { + //save and read again from fail + set_consent(consent_scope, result_from_env.consent); + } else { + //return early with the result + return result_from_env; + } + } + + let path = match get_consent_path() { + Some(path) => path, + None => { + log::warn!("No consent path found"); + return HaveConsentResult { + consent: None, + source: ConsentSource::Default, + }; + } + }; + let entries = load_entries(&path); + let mut allowed = None; + for entry in entries { + if entry.consent_scope == consent_scope { + allowed = Some(entry.allowed); + } + } + HaveConsentResult { + consent: allowed, + source: ConsentSource::Config, + } +} + +pub fn set_consent(consent_scope: ConsentScope, allowed: Option) { + { + CONSENT_CACHE.lock().clear(); + } + let path = match get_consent_path() { + Some(path) => path, + None => { + log::warn!("No consent path found - set consent failed"); + return; + } + }; + for consent_scope in ConsentScope::iter() { + let env_name = format!("YA_CONSENT_{}", consent_scope.to_string().to_uppercase()); + if let Ok(env_val) = env::var(&env_name) { + log::warn!( + "Consent {} is already set by environment variable, changes to configuration may not have effect: {}={}", + consent_scope, + env_name, + env_val) + } + } + let mut entries = load_entries(&path); + entries.retain(|entry| entry.consent_scope != consent_scope); + if let Some(allowed) = allowed { + entries.push(ConsentEntry { + consent_scope, + allowed, + }); + } + entries.sort_by(|a, b| a.consent_scope.cmp(&b.consent_scope)); + match save_entries(&path, entries) { + Ok(_) => log::info!("Consent saved: {} {:?}", consent_scope, allowed), + Err(e) => log::error!("Error when saving consent: {}", e), + } +} + +pub fn to_json() -> serde_json::Value { + json!({ + "consents": ConsentScope::iter() + .map(|consent_scope: ConsentScope| { + let consent_res = have_consent(consent_scope, false); + let consent = match consent_res.consent { + Some(true) => "allow", + Some(false) => "deny", + None => "not set", + }; + let source_location = match consent_res.source { + ConsentSource::Config => display_consent_path(), + ConsentSource::Env => { + let env_var_name = format!("YA_CONSENT_{}", &consent_scope.to_string().to_uppercase()); + format!("({}={})", &env_var_name, env::var(&env_var_name).unwrap_or("".to_string())) + }, + ConsentSource::Default => "N/A".to_string(), + }; + json!({ + "type": consent_scope.to_string(), + "consent": consent, + "source": consent_res.source.to_string(), + "location": source_location, + "info": extra_info(consent_scope), + "question": full_question(consent_scope), + }) + }) + .collect::>() + }) +} + +pub fn run_consent_command(consent_command: ConsentCommand) { + match consent_command { + ConsentCommand::Show => { + println!( + "{}", + serde_json::to_string_pretty(&to_json()).expect("json serialization failed") + ); + } + ConsentCommand::Allow(consent_scope) => { + set_consent(consent_scope, Some(true)); + } + ConsentCommand::Deny(consent_scope) => { + set_consent(consent_scope, Some(false)); + } + ConsentCommand::Unset(consent_scope) => { + set_consent(consent_scope, None); + } + ConsentCommand::AllowAll => { + for consent_scope in ConsentScope::iter() { + set_consent(consent_scope, Some(true)); + } + } + ConsentCommand::DenyAll => { + for consent_scope in ConsentScope::iter() { + set_consent(consent_scope, Some(false)); + } + } + ConsentCommand::UnsetAll => { + for consent_scope in ConsentScope::iter() { + set_consent(consent_scope, None); + } + } + ConsentCommand::Path => { + println!( + "{}", + get_consent_path() + .map(|p| p.to_string_lossy().to_string()) + .unwrap_or("not found".to_string()) + ) + } + } +} diff --git a/utils/consent/src/fs.rs b/utils/consent/src/fs.rs new file mode 100644 index 0000000000..4302ad021e --- /dev/null +++ b/utils/consent/src/fs.rs @@ -0,0 +1,157 @@ +use crate::parser::{entries_to_str, str_to_entries}; +use crate::ConsentEntry; +use std::fs::{File, OpenOptions}; +use std::io; +use std::io::{Read, Write}; +use std::path::Path; + +pub fn save_entries(path: &Path, entries: Vec) -> std::io::Result<()> { + let file_exists = path.exists(); + // Open the file in write-only mode + let file = match OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .open(path) + { + Ok(file) => file, + Err(e) => { + log::error!("Error opening file for write: {}", e); + return Err(e); + } + }; + if file_exists { + log::info!("Overwriting consent file: {}", path.display()); + } else { + log::info!("Created consent file: {}", path.display()); + } + let mut writer = io::BufWriter::new(file); + + writer.write_all(entries_to_str(entries).as_bytes()) +} + +pub fn load_entries(path: &Path) -> Vec { + log::debug!("Loading entries from {:?}", path); + + let str = { + if !path.exists() { + log::info!("Consent file not exist: {}", path.display()); + return vec![]; + } + // Open the file in read-only mode + let file = match File::open(path) { + Ok(file) => file, + Err(e) => { + log::error!("Error opening file: {} {}", path.display(), e); + return vec![]; + } + }; + + let file_len = match file.metadata() { + Ok(metadata) => metadata.len(), + Err(e) => { + log::error!("Error reading file metadata: {} {}", path.display(), e); + return vec![]; + } + }; + + if file_len > 100000 { + log::error!( + "File unreasonably large, skipping parsing: {}", + path.display() + ); + return vec![]; + } + + let mut reader = io::BufReader::new(file); + + let mut buf = vec![0; file_len as usize]; + + match reader.read_exact(&mut buf) { + Ok(_) => (), + Err(e) => { + log::error!("Error reading file: {} {}", path.display(), e); + return vec![]; + } + } + match String::from_utf8(buf) { + Ok(str) => str, + Err(e) => { + log::error!( + "Error when decoding file (wrong binary format): {} {}", + path.display(), + e + ); + return vec![]; + } + } + }; + + let entries = str_to_entries(&str, path.display().to_string()); + + log::debug!("Loaded entries: {:?}", entries); + // normalize entries file + let str_entries = entries_to_str(entries.clone()); + let entries2 = str_to_entries(&str_entries, "internal".to_string()); + + if entries2 != entries { + log::warn!("Internal problem when normalizing entries file"); + return entries; + } + + if str_entries != str { + log::info!("Fixing consent file: {}", path.display()); + match OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .open(path) + { + Ok(file) => { + let mut writer = io::BufWriter::new(file); + + match writer.write_all(str_entries.as_bytes()) { + Ok(_) => (), + Err(e) => { + log::error!("Error writing to file: {} {}", path.display(), e); + } + } + } + Err(e) => { + log::error!("Error opening file for write: {}", e); + } + }; + } else { + log::debug!("Consent file doesn't need fixing: {}", path.display()); + } + + entries +} + +#[test] +pub fn test_entries_internal() { + use crate::ConsentScope; + use rand::Rng; + use std::path::PathBuf; + if std::env::var("RUST_LOG").is_err() { + std::env::set_var("RUST_LOG", "debug"); + } + let rand_string: String = rand::thread_rng() + .sample_iter(&rand::distributions::Alphanumeric) + .take(10) + .map(char::from) + .collect(); + + env_logger::init(); + let path = PathBuf::from(format!("tmp-{}.txt", rand_string)); + let entries = vec![ConsentEntry { + consent_scope: ConsentScope::Stats, + allowed: true, + }]; + + save_entries(&path, entries.clone()).unwrap(); + let loaded_entries = load_entries(&path); + + assert_eq!(entries, loaded_entries); + std::fs::remove_file(&path).unwrap(); +} diff --git a/utils/consent/src/lib.rs b/utils/consent/src/lib.rs new file mode 100644 index 0000000000..2ce0e735c4 --- /dev/null +++ b/utils/consent/src/lib.rs @@ -0,0 +1,19 @@ +mod api; +mod fs; +mod model; +mod parser; +mod startup; + +pub use api::{ + have_consent_cached, run_consent_command, set_consent, set_consent_path_in_yagna_dir, +}; +pub use model::{ConsentCommand, ConsentEntry, ConsentScope}; +pub use startup::consent_check_before_startup; + +use ya_service_api_interfaces::*; + +pub struct ConsentService; + +impl Service for ConsentService { + type Cli = ConsentCommand; +} diff --git a/utils/consent/src/model.rs b/utils/consent/src/model.rs new file mode 100644 index 0000000000..d7474e5974 --- /dev/null +++ b/utils/consent/src/model.rs @@ -0,0 +1,175 @@ +use crate::api::{get_consent_path, have_consent, to_json, ConsentSource}; +use crate::set_consent; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use std::cmp::Ordering; +use std::{env, fmt}; +use structopt::StructOpt; +use strum::{EnumIter, IntoEnumIterator}; +use ya_service_api::{CliCtx, CommandOutput}; + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct ConsentEntry { + pub consent_scope: ConsentScope, + pub allowed: bool, +} + +#[derive(StructOpt, Copy, Debug, Clone, Serialize, Deserialize, PartialEq, EnumIter, Eq)] +pub enum ConsentScope { + /// Consent to augment stats.golem.network portal + /// with data collected from your node. + Stats, +} + +impl PartialOrd for ConsentScope { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} +impl Ord for ConsentScope { + fn cmp(&self, other: &Self) -> Ordering { + self.to_string().cmp(&other.to_string()) + } +} + +pub fn extra_info(consent_scope: ConsentScope) -> String { + match consent_scope { + ConsentScope::Stats => { + "Consent to augment stats.golem.network\nportal with data collected from your node." + .to_string() + } + } +} + +pub fn extra_info_comment(consent_scope: ConsentScope) -> String { + let info = extra_info(consent_scope); + let mut comment_info = String::new(); + for line in info.split('\n') { + comment_info.push_str(&format!("# {}\n", line)); + } + comment_info +} + +pub fn full_question(consent_scope: ConsentScope) -> String { + match consent_scope { + ConsentScope::Stats => { + "Do you agree to augment stats.golem.network with data collected from your node (you can check the full range of information transferred in Terms)[allow/deny]?".to_string() + } + } +} + +impl fmt::Display for ConsentScope { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl ConsentScope { + pub fn to_lowercase_str(&self) -> String { + self.to_string().to_lowercase() + } +} + +/// Consent management +#[derive(StructOpt, Debug)] +pub enum ConsentCommand { + /// Show current settings + Show, + /// Allow all types of consent (for now there is only one) + AllowAll, + /// Deny all types of consent (for now there is only one) + DenyAll, + /// Unset all types of consent (for now there is only one) + UnsetAll, + /// Change settings + Allow(ConsentScope), + /// Change settings + Deny(ConsentScope), + /// Unset setting + Unset(ConsentScope), + /// Show path to the consent file + Path, +} + +pub fn display_consent_path() -> String { + get_consent_path() + .map(|p| p.to_string_lossy().to_string()) + .unwrap_or("not found".to_string()) +} + +impl ConsentCommand { + pub async fn run_command(self, ctx: &CliCtx) -> anyhow::Result { + match self { + ConsentCommand::Show => { + if ctx.json_output { + return Ok(CommandOutput::Object(to_json())); + } + let mut values = vec![]; + for consent_scope in ConsentScope::iter() { + let consent_res = have_consent(consent_scope, false); + let info = extra_info(consent_scope); + let is_allowed = match consent_res.consent { + Some(true) => "allow", + Some(false) => "deny", + None => "not set", + }; + let source = match consent_res.source { + ConsentSource::Config => "config file".to_string(), + ConsentSource::Env => { + let env_var_name = + format!("YA_CONSENT_{}", &consent_scope.to_string().to_uppercase()); + format!( + "env variable\n({}={})", + &env_var_name, + env::var(&env_var_name).unwrap_or("".to_string()) + ) + } + ConsentSource::Default => "N/A".to_string(), + }; + values.push(json!([consent_scope.to_string(), is_allowed, source, info])); + } + + return Ok(CommandOutput::Table { + columns: ["Scope", "Status", "Source", "Info"] + .iter() + .map(ToString::to_string) + .collect(), + values, + summary: vec![json!(["", "", "", ""])], + header: Some( + "Consents given to the Golem service, you can change them, run consent --help for more info\nSee Terms https://golem.network/privacy for details of the information collected.".to_string()), + }); + } + ConsentCommand::Allow(consent_scope) => { + set_consent(consent_scope, Some(true)); + } + ConsentCommand::Deny(consent_scope) => { + set_consent(consent_scope, Some(false)); + } + ConsentCommand::Unset(consent_scope) => { + set_consent(consent_scope, None); + } + ConsentCommand::AllowAll => { + for consent_scope in ConsentScope::iter() { + set_consent(consent_scope, Some(true)); + } + } + ConsentCommand::DenyAll => { + for consent_scope in ConsentScope::iter() { + set_consent(consent_scope, Some(false)); + } + } + ConsentCommand::UnsetAll => { + for consent_scope in ConsentScope::iter() { + set_consent(consent_scope, None); + } + } + ConsentCommand::Path => { + return Ok(CommandOutput::Object(json!({ + "path": crate::api::get_consent_path().map(|p| p.to_string_lossy().to_string()).unwrap_or("not found".to_string()), + }))); + } + }; + Ok(CommandOutput::NoOutput) + } +} diff --git a/utils/consent/src/parser.rs b/utils/consent/src/parser.rs new file mode 100644 index 0000000000..b1b00512a3 --- /dev/null +++ b/utils/consent/src/parser.rs @@ -0,0 +1,84 @@ +use crate::model::extra_info_comment; +use crate::{ConsentEntry, ConsentScope}; +use std::collections::BTreeMap; +use strum::IntoEnumIterator; + +pub fn entries_to_str(entries: Vec) -> String { + let mut res = String::new(); + res.push_str("# This file contains consent settings\n"); + res.push_str("# Format: \n"); + res.push_str("# Restart golem service (golemsp or yagna) to make sure changes are applied\n"); + + for entry in entries { + let allow_str = if entry.allowed { "allow" } else { "deny" }; + res.push_str(&format!( + "\n\n{}{} {} \n", + extra_info_comment(entry.consent_scope), + entry.consent_scope, + allow_str + )); + } + res.replace("\n\n", "\n") +} + +pub fn str_to_entries(str: &str, err_decorator_path: String) -> Vec { + let mut entries_map: BTreeMap = BTreeMap::new(); + // Iterate over the lines in the file + + 'outer: for (line_no, line) in str.split('\n').enumerate() { + let line = line.split('#').next().unwrap_or(line).trim().to_lowercase(); + if line.is_empty() { + continue; + } + for consent_scope in ConsentScope::iter() { + let consent_scope_str = consent_scope.to_lowercase_str(); + if line.starts_with(&consent_scope_str) { + let Some(split) = line.split_once(' ') else { + log::warn!("Invalid line: {} in file {}", line_no, err_decorator_path); + continue 'outer; + }; + let second_str = split.1.trim(); + + let allowed = if second_str == "allow" { + true + } else if second_str == "deny" { + false + } else { + log::warn!( + "Error when parsing consent: No allow or deny, line: {} in file {}", + line_no, + err_decorator_path + ); + continue 'outer; + }; + if let Some(entry) = entries_map.get_mut(&consent_scope_str) { + if entry.allowed != allowed { + log::warn!( + "Error when parsing consent: Duplicate entry with different value, line: {} in file {}", + line_no, + err_decorator_path + ); + } + } else { + let entry = ConsentEntry { + consent_scope, + allowed, + }; + entries_map.insert(consent_scope_str, entry); + } + continue 'outer; + } + } + log::warn!( + "Error when parsing consent: Invalid line: {} in file {}", + line_no, + err_decorator_path + ); + } + + let mut entries: Vec = Vec::new(); + for (_, entry) in entries_map { + entries.push(entry); + } + entries +} diff --git a/utils/consent/src/startup.rs b/utils/consent/src/startup.rs new file mode 100644 index 0000000000..131a7dc137 --- /dev/null +++ b/utils/consent/src/startup.rs @@ -0,0 +1,62 @@ +use crate::api::{have_consent, set_consent}; +use crate::model::full_question; +use crate::ConsentScope; +use anyhow::anyhow; +use strum::IntoEnumIterator; + +pub fn consent_check_before_startup(interactive: bool) -> anyhow::Result<()> { + // if feature require-consent is enabled, skip check + if cfg!(feature = "require-consent") { + if interactive { + log::info!("Checking consents interactive"); + } else { + log::info!("Checking consents before startup non-interactive"); + } + for consent_scope in ConsentScope::iter() { + let consent_int = have_consent(consent_scope, true); + if consent_int.consent.is_none() { + let res = loop { + let prompt_res = if interactive { + match promptly::prompt_default( + format!("{} [allow/deny]", full_question(consent_scope)), + "allow".to_string(), + ) { + Ok(res) => res, + Err(err) => { + return Err(anyhow!( + "Error when prompting: {}. Run setup again.", + err + )); + } + } + } else { + log::warn!("Consent {} not set. Run installer again or run command yagna consent allow {}", + consent_scope, + consent_scope.to_lowercase_str()); + return Ok(()); + }; + if prompt_res == "allow" { + break true; + } else if prompt_res == "deny" { + break false; + } + std::thread::sleep(std::time::Duration::from_secs(1)); + }; + set_consent(consent_scope, Some(res)); + } + } + + for consent_scope in ConsentScope::iter() { + let consent_res = have_consent(consent_scope, false); + if let Some(consent) = consent_res.consent { + log::info!( + "Consent {} - {} ({})", + consent_scope, + if consent { "allow" } else { "deny" }, + consent_res.source + ); + }; + } + } + Ok(()) +} diff --git a/utils/consent/tests/test-consent.rs b/utils/consent/tests/test-consent.rs new file mode 100644 index 0000000000..55c65109b2 --- /dev/null +++ b/utils/consent/tests/test-consent.rs @@ -0,0 +1,28 @@ +use std::env; +use ya_utils_consent::set_consent; +use ya_utils_consent::ConsentScope; + +#[test] +pub fn test_save_and_load_entries() { + use rand::Rng; + if env::var("RUST_LOG").is_err() { + env::set_var("RUST_LOG", "debug"); + } + let rand_string: String = rand::thread_rng() + .sample_iter(&rand::distributions::Alphanumeric) + .take(10) + .map(char::from) + .collect(); + + let consent_path = format!("tmp-{}.txt", rand_string); + env::set_var("YA_CONSENT_PATH", &consent_path); + env_logger::init(); + + { + set_consent(ConsentScope::Stats, Some(true)); + + let consent = ya_utils_consent::have_consent_cached(ConsentScope::Stats); + assert_eq!(consent.consent, Some(true)); + } + std::fs::remove_file(&consent_path).unwrap(); +} diff --git a/utils/manifest-utils/Cargo.toml b/utils/manifest-utils/Cargo.toml index 4bb6e1707f..86db7ef3c3 100644 --- a/utils/manifest-utils/Cargo.toml +++ b/utils/manifest-utils/Cargo.toml @@ -21,7 +21,7 @@ schema = ["schemars"] [dependencies] ya-agreement-utils = { workspace = true } -ya-utils-path = "0.1" +ya-utils-path.workspace = true ya-client-model.workspace = true golem-certificate = "0.1.1" @@ -57,8 +57,8 @@ serial_test = "2" shlex = "1.1" tar = "0.4" test-case = "3.1" -ya-manifest-test-utils = "0.1" +ya-manifest-test-utils.workspace = true openssl.workspace = true [lints] -workspace = true \ No newline at end of file +workspace = true diff --git a/utils/manifest-utils/src/keystore/golem_keystore.rs b/utils/manifest-utils/src/keystore/golem_keystore.rs index eacb1cb65d..5431a0f49d 100644 --- a/utils/manifest-utils/src/keystore/golem_keystore.rs +++ b/utils/manifest-utils/src/keystore/golem_keystore.rs @@ -23,6 +23,7 @@ pub struct GolemCertificateEntry { cert: ValidatedCertificate, } +#[allow(dead_code)] pub(super) trait GolemCertAddParams {} pub struct GolemKeystoreBuilder { diff --git a/utils/manifest-utils/test-utils/Cargo.toml b/utils/manifest-utils/test-utils/Cargo.toml index 27aac3c1ba..449d74c03c 100644 --- a/utils/manifest-utils/test-utils/Cargo.toml +++ b/utils/manifest-utils/test-utils/Cargo.toml @@ -13,5 +13,5 @@ base64 = "0.13" tar = "0.4" openssl = "0.10" -ya-manifest-utils = "0.2" +ya-manifest-utils.workspace = true ya-agreement-utils = { workspace = true } diff --git a/utils/path/src/data_dir.rs b/utils/path/src/data_dir.rs index 2626071e6d..2f9efa3bd7 100644 --- a/utils/path/src/data_dir.rs +++ b/utils/path/src/data_dir.rs @@ -1,6 +1,7 @@ use crate::normalize_path; use anyhow::Context; -use std::{ops::Not, path::PathBuf, str::FromStr, string::ToString}; +use std::fmt::Display; +use std::{ops::Not, path::PathBuf, str::FromStr}; const ORGANIZATION: &str = "GolemFactory"; const QUALIFIER: &str = ""; @@ -20,7 +21,7 @@ impl DataDir { pub fn get_or_create(&self) -> anyhow::Result { if self.0.exists().not() { // not using logger here bc it might haven't been initialized yet - eprintln!("Creating data dir: {}", self.0.display()); + log::info!("Creating data dir: {}", self.0.display()); std::fs::create_dir_all(&self.0) .context(format!("data dir {:?} creation error", self))?; } @@ -36,14 +37,8 @@ impl FromStr for DataDir { } } -impl ToString for DataDir { - fn to_string(&self) -> String { - /* - It's important for output to not include quotes. - Otherwise flexi logger tries to create a path like - "/home/user/.local/share/yagna"/yagna.log - and those extra quotes are causing problems. - */ - self.0.to_string_lossy().to_string() +impl Display for DataDir { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0.display()) } }