diff --git a/.config/nextest.toml b/.config/nextest.toml index 912bf2514a77..b4bdec4aea92 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -21,7 +21,6 @@ retries = 5 # The number of threads to run tests with. Supported values are either an integer or # the string "num-cpus". Can be overridden through the `--test-threads` option. # test-threads = "num-cpus" - test-threads = 20 # The number of threads required for each test. This is generally used in overrides to diff --git a/.github/scripts/release/build-linux-release.sh b/.github/scripts/release/build-linux-release.sh index a6bd658d292a..874c9b44788b 100755 --- a/.github/scripts/release/build-linux-release.sh +++ b/.github/scripts/release/build-linux-release.sh @@ -3,6 +3,8 @@ # This is used to build our binaries: # - polkadot # - polkadot-parachain +# - polkadot-omni-node +# # set -e BIN=$1 @@ -21,7 +23,7 @@ time cargo build --profile $PROFILE --locked --verbose --bin $BIN --package $PAC echo "Artifact target: $ARTIFACTS" cp ./target/$PROFILE/$BIN "$ARTIFACTS" -pushd "$ARTIFACTS" > /dev/nul +pushd "$ARTIFACTS" > /dev/null sha256sum "$BIN" | tee "$BIN.sha256" EXTRATAG="$($ARTIFACTS/$BIN --version | diff --git a/.github/scripts/release/build-macos-release.sh b/.github/scripts/release/build-macos-release.sh new file mode 100755 index 000000000000..ba6dcc65d650 --- /dev/null +++ b/.github/scripts/release/build-macos-release.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +# This is used to build our binaries: +# - polkadot +# - polkadot-parachain +# - polkadot-omni-node +# set -e + +BIN=$1 +PACKAGE=${2:-$BIN} + +PROFILE=${PROFILE:-production} +# parity-macos runner needs a path where it can +# write, so make it relative to github workspace. +ARTIFACTS=$GITHUB_WORKSPACE/artifacts/$BIN +VERSION=$(git tag -l --contains HEAD | grep -E "^v.*") + +echo "Artifacts will be copied into $ARTIFACTS" +mkdir -p "$ARTIFACTS" + +git log --pretty=oneline -n 1 +time cargo build --profile $PROFILE --locked --verbose --bin $BIN --package $PACKAGE + +echo "Artifact target: $ARTIFACTS" + +cp ./target/$PROFILE/$BIN "$ARTIFACTS" +pushd "$ARTIFACTS" > /dev/null +sha256sum "$BIN" | tee "$BIN.sha256" + +EXTRATAG="$($ARTIFACTS/$BIN --version | + sed -n -r 's/^'$BIN' ([0-9.]+.*-[0-9a-f]{7,13})-.*$/\1/p')" + +EXTRATAG="${VERSION}-${EXTRATAG}-$(cut -c 1-8 $ARTIFACTS/$BIN.sha256)" + +echo "$BIN version = ${VERSION} (EXTRATAG = ${EXTRATAG})" +echo -n ${VERSION} > "$ARTIFACTS/VERSION" +echo -n ${EXTRATAG} > "$ARTIFACTS/EXTRATAG" diff --git a/.github/scripts/release/release_lib.sh b/.github/scripts/release/release_lib.sh index f5032073b617..8b9254ec3f29 100644 --- a/.github/scripts/release/release_lib.sh +++ b/.github/scripts/release/release_lib.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Set the new version by replacing the value of the constant given as patetrn +# Set the new version by replacing the value of the constant given as pattern # in the file. # # input: pattern, version, file @@ -119,21 +119,23 @@ set_polkadot_parachain_binary_version() { upload_s3_release() { - alias aws='podman run --rm -it docker.io/paritytech/awscli -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e AWS_BUCKET aws' - - product=$1 - version=$2 - - echo "Working on product: $product " - echo "Working on version: $version " - - echo "Current content, should be empty on new uploads:" - aws s3 ls "s3://releases.parity.io/polkadot/${version}/" --recursive --human-readable --summarize || true - echo "Content to be uploaded:" - artifacts="artifacts/$product/" - ls "$artifacts" - aws s3 sync --acl public-read "$artifacts" "s3://releases.parity.io/polkadot/${version}/" - echo "Uploaded files:" - aws s3 ls "s3://releases.parity.io/polkadot/${version}/" --recursive --human-readable --summarize - echo "✅ The release should be at https://releases.parity.io/polkadot/${version}" + alias aws='podman run --rm -it docker.io/paritytech/awscli -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e AWS_BUCKET aws' + + product=$1 + version=$2 + target=$3 + + echo "Working on product: $product " + echo "Working on version: $version " + echo "Working on platform: $target " + + echo "Current content, should be empty on new uploads:" + aws s3 ls "s3://releases.parity.io/${product}/${version}/${target}" --recursive --human-readable --summarize || true + echo "Content to be uploaded:" + artifacts="artifacts/$product/" + ls "$artifacts" + aws s3 sync --acl public-read "$artifacts" "s3://releases.parity.io/${product}/${version}/${target}" + echo "Uploaded files:" + aws s3 ls "s3://releases.parity.io/${product}/${version}/${target}" --recursive --human-readable --summarize + echo "✅ The release should be at https://releases.parity.io/${product}/${version}/${target}" } diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index 376f5fbce909..4364b4f80457 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -34,7 +34,7 @@ jobs: strategy: matrix: # Tuples of [package, binary-name] - binary: [ [frame-omni-bencher, frame-omni-bencher], [staging-chain-spec-builder, chain-spec-builder], [polkadot-omni-node, polkadot-omni-node] ] + binary: [ [frame-omni-bencher, frame-omni-bencher], [staging-chain-spec-builder, chain-spec-builder] ] steps: - name: Checkout sources uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.0.0 @@ -161,7 +161,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - binary: [frame-omni-bencher, chain-spec-builder, polkadot-omni-node] + binary: [frame-omni-bencher, chain-spec-builder] steps: - name: Download artifacts diff --git a/.github/workflows/release-build-rc.yml b/.github/workflows/release-build-rc.yml index 94bacf320898..a43c2b282a8d 100644 --- a/.github/workflows/release-build-rc.yml +++ b/.github/workflows/release-build-rc.yml @@ -10,6 +10,7 @@ on: options: - polkadot - polkadot-parachain + - polkadot-omni-node - all release_tag: @@ -47,6 +48,7 @@ jobs: binary: '["polkadot", "polkadot-prepare-worker", "polkadot-execute-worker"]' package: polkadot release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: x86_64-unknown-linux-gnu secrets: PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} @@ -68,6 +70,95 @@ jobs: binary: '["polkadot-parachain"]' package: "polkadot-parachain-bin" release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: x86_64-unknown-linux-gnu + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-polkadot-omni-node-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["polkadot-omni-node"]' + package: "polkadot-omni-node" + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: x86_64-unknown-linux-gnu + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-polkadot-macos-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'polkadot' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["polkadot", "polkadot-prepare-worker", "polkadot-execute-worker"]' + package: polkadot + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: aarch64-apple-darwin + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-polkadot-parachain-macos-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["polkadot-parachain"]' + package: "polkadot-parachain-bin" + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: aarch64-apple-darwin + secrets: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + permissions: + id-token: write + attestations: write + contents: read + + build-polkadot-omni-node-macos-binary: + needs: [validate-inputs] + if: ${{ inputs.binary == 'polkadot-omni-node' || inputs.binary == 'all' }} + uses: "./.github/workflows/release-reusable-rc-buid.yml" + with: + binary: '["polkadot-omni-node"]' + package: "polkadot-omni-node" + release_tag: ${{ needs.validate-inputs.outputs.release_tag }} + target: aarch64-apple-darwin secrets: PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} diff --git a/.github/workflows/release-reusable-rc-buid.yml b/.github/workflows/release-reusable-rc-buid.yml index d925839fb84a..7e31a4744b59 100644 --- a/.github/workflows/release-reusable-rc-buid.yml +++ b/.github/workflows/release-reusable-rc-buid.yml @@ -10,7 +10,7 @@ on: type: string package: - description: Package to be built, for now is either polkadot or polkadot-parachain-bin + description: Package to be built, for now can be polkadot, polkadot-parachain-bin, or polkadot-omni-node required: true type: string @@ -19,6 +19,11 @@ on: required: true type: string + target: + description: Target triple for which the artifacts are being built (e.g. x86_64-unknown-linux-gnu) + required: true + type: string + secrets: PGP_KMS_KEY: required: true @@ -57,6 +62,7 @@ jobs: run: cat .github/env >> $GITHUB_OUTPUT build-rc: + if: ${{ inputs.target == 'x86_64-unknown-linux-gnu' }} needs: [set-image] runs-on: ubuntu-latest-m environment: release @@ -130,8 +136,124 @@ jobs: name: ${{ matrix.binaries }} path: /artifacts/${{ matrix.binaries }} + build-macos-rc: + if: ${{ inputs.target == 'aarch64-apple-darwin' }} + runs-on: parity-macos + environment: release + strategy: + matrix: + binaries: ${{ fromJSON(inputs.binary) }} + env: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + SKIP_WASM_BUILD: 1 + steps: + - name: Checkout sources + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + with: + ref: ${{ inputs.release_tag }} + fetch-depth: 0 + + - name: Set rust version from env file + run: | + RUST_VERSION=$(cat .github/env | sed -E 's/.*ci-unified:([^-]+)-([^-]+).*/\2/') + echo $RUST_VERSION + echo "RUST_VERSION=${RUST_VERSION}" >> $GITHUB_ENV + - name: Set workspace environment variable + # relevant for artifacts upload, which can not interpolate Github Action variable syntax when + # used within valid paths. We can not use root-based paths either, since it is set as read-only + # on the `parity-macos` runner. + run: echo "ARTIFACTS_PATH=${GITHUB_WORKSPACE}/artifacts/${{ matrix.binaries }}" >> $GITHUB_ENV + + - name: Set up Homebrew + uses: Homebrew/actions/setup-homebrew@1ccc07ccd54b6048295516a3eb89b192c35057dc # master from 12.09.2024 + - name: Set homebrew binaries location on path + run: echo "/opt/homebrew/bin" >> $GITHUB_PATH + + - name: Install rust ${{ env.RUST_VERSION }} + uses: actions-rust-lang/setup-rust-toolchain@11df97af8e8102fd60b60a77dfbf58d40cd843b8 # v1.10.1 + with: + cache: false + toolchain: ${{ env.RUST_VERSION }} + target: wasm32-unknown-unknown + components: cargo, clippy, rust-docs, rust-src, rustfmt, rustc, rust-std + + - name: cargo info + run: | + echo "######## rustup show ########" + rustup show + echo "######## cargo --version ########" + cargo --version + + - name: Install protobuf + run: brew install protobuf + - name: Install gpg + run: | + brew install gnupg + # Setup for being able to resolve: keyserver.ubuntu.com. + # See: https://github.com/actions/runner-images/issues/9777 + mkdir -p ~/.gnupg/ + touch ~/.gnupg/dirmngr.conf + echo "standard-resolver" > ~/.gnupg/dirmngr.conf + - name: Install sha256sum + run: | + brew install coreutils + + - name: Install pgpkkms + run: | + # Install pgpkms that is used to sign built artifacts + python3 -m pip install "pgpkms @ git+https://github.com/paritytech-release/pgpkms.git@5a8f82fbb607ea102d8c178e761659de54c7af69" --break-system-packages + + - name: Import gpg keys + shell: bash + run: | + . ./.github/scripts/common/lib.sh + + import_gpg_keys + + - name: Build binary + run: | + git config --global --add safe.directory "${GITHUB_WORKSPACE}" #avoid "detected dubious ownership" error + ./.github/scripts/release/build-macos-release.sh ${{ matrix.binaries }} ${{ inputs.package }} + + - name: Generate artifact attestation + uses: actions/attest-build-provenance@1c608d11d69870c2092266b3f9a6f3abbf17002c # v1.4.3 + with: + subject-path: ${{ env.ARTIFACTS_PATH }}/${{ matrix.binaries }} + + - name: Sign artifacts + working-directory: ${{ env.ARTIFACTS_PATH }} + run: | + python3 -m pgpkms sign --input ${{matrix.binaries }} -o ${{ matrix.binaries }}.asc + + - name: Check sha256 ${{ matrix.binaries }} + working-directory: ${{ env.ARTIFACTS_PATH }} + shell: bash + run: | + . "${GITHUB_WORKSPACE}"/.github/scripts/common/lib.sh + + echo "Checking binary ${{ matrix.binaries }}" + check_sha256 ${{ matrix.binaries }} + + - name: Check GPG ${{ matrix.binaries }} + working-directory: ${{ env.ARTIFACTS_PATH }} + shell: bash + run: | + . "${GITHUB_WORKSPACE}"/.github/scripts/common/lib.sh + + check_gpg ${{ matrix.binaries }} + + - name: Upload ${{ matrix.binaries }} artifacts + uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + with: + name: ${{ matrix.binaries }}_${{ inputs.target }} + path: ${{ env.ARTIFACTS_PATH }} + build-polkadot-deb-package: - if: ${{ inputs.package == 'polkadot' }} + if: ${{ inputs.package == 'polkadot' && inputs.target == 'x86_64-unknown-linux-gnu' }} needs: [build-rc] runs-on: ubuntu-latest @@ -168,12 +290,13 @@ jobs: overwrite: true upload-polkadot-artifacts-to-s3: - if: ${{ inputs.package == 'polkadot' }} + if: ${{ inputs.package == 'polkadot' && inputs.target == 'x86_64-unknown-linux-gnu' }} needs: [build-polkadot-deb-package] uses: ./.github/workflows/release-reusable-s3-upload.yml with: package: ${{ inputs.package }} release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} secrets: AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} @@ -181,12 +304,93 @@ jobs: upload-polkadot-parachain-artifacts-to-s3: - if: ${{ inputs.package == 'polkadot-parachain-bin' }} + if: ${{ inputs.package == 'polkadot-parachain-bin' && inputs.target == 'x86_64-unknown-linux-gnu' }} needs: [build-rc] uses: ./.github/workflows/release-reusable-s3-upload.yml with: package: polkadot-parachain release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-omni-node-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot-omni-node' && inputs.target == 'x86_64-unknown-linux-gnu' }} + needs: [build-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-macos-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot' && inputs.target == 'aarch64-apple-darwin' }} + # TODO: add and use a `build-polkadot-homebrew-package` which packs all `polkadot` binaries: + # `polkadot`, `polkadot-prepare-worker` and `polkadot-execute-worker`. + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-prepare-worker-macos-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: polkadot-prepare-worker + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-execute-worker-macos-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: polkadot-execute-worker + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-omni-node-macos-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot-omni-node' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: ${{ inputs.package }} + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} + secrets: + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} + AWS_RELEASE_SECRET_ACCESS_KEY: ${{ secrets.AWS_RELEASE_SECRET_ACCESS_KEY }} + + upload-polkadot-parachain-macos-artifacts-to-s3: + if: ${{ inputs.package == 'polkadot-parachain-bin' && inputs.target == 'aarch64-apple-darwin' }} + needs: [build-macos-rc] + uses: ./.github/workflows/release-reusable-s3-upload.yml + with: + package: polkadot-parachain + release_tag: ${{ inputs.release_tag }} + target: ${{ inputs.target }} secrets: AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} AWS_RELEASE_ACCESS_KEY_ID: ${{ secrets.AWS_RELEASE_ACCESS_KEY_ID }} diff --git a/.github/workflows/release-reusable-s3-upload.yml b/.github/workflows/release-reusable-s3-upload.yml index 6776b78da8e6..f85466bc8c07 100644 --- a/.github/workflows/release-reusable-s3-upload.yml +++ b/.github/workflows/release-reusable-s3-upload.yml @@ -13,6 +13,11 @@ on: required: true type: string + target: + description: Target triple for which the artifacts are being uploaded (e.g aarch64-apple-darwin) + required: true + type: string + secrets: AWS_DEFAULT_REGION: required: true @@ -34,12 +39,20 @@ jobs: - name: Checkout uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - - name: Download artifacts + - name: Download amd64 artifacts + if: ${{ inputs.target == 'x86_64-unknown-linux-gnu' }} uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: name: ${{ inputs.package }} path: artifacts/${{ inputs.package }} + - name: Download arm artifacts + if: ${{ inputs.target == 'aarch64-apple-darwin' }} + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: ${{ inputs.package }}_aarch64-apple-darwin + path: artifacts/${{ inputs.package }} + - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 with: @@ -50,4 +63,4 @@ jobs: - name: Upload ${{ inputs.package }} artifacts to s3 run: | . ./.github/scripts/release/release_lib.sh - upload_s3_release ${{ inputs.package }} ${{ inputs.release_tag }} + upload_s3_release ${{ inputs.package }} ${{ inputs.release_tag }} ${{ inputs.target }} diff --git a/Cargo.lock b/Cargo.lock index 85388fbed437..3a2657b318f2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17548,6 +17548,7 @@ dependencies = [ "rococo-runtime", "rusty-fork", "sc-sysinfo", + "sc-tracing", "slotmap", "sp-core 28.0.0", "sp-maybe-compressed-blob 11.0.0", diff --git a/bridges/bin/runtime-common/src/integrity.rs b/bridges/bin/runtime-common/src/integrity.rs index 2ff6c4c9165a..535f1a26e5e8 100644 --- a/bridges/bin/runtime-common/src/integrity.rs +++ b/bridges/bin/runtime-common/src/integrity.rs @@ -89,13 +89,11 @@ macro_rules! assert_bridge_messages_pallet_types( /// Macro that combines four other macro calls - `assert_chain_types`, `assert_bridge_types`, /// and `assert_bridge_messages_pallet_types`. It may be used -/// at the chain that is implementing complete standard messages bridge (i.e. with bridge GRANDPA -/// and messages pallets deployed). +/// at the chain that is implementing standard messages bridge with messages pallets deployed. #[macro_export] macro_rules! assert_complete_bridge_types( ( runtime: $r:path, - with_bridged_chain_grandpa_instance: $gi:path, with_bridged_chain_messages_instance: $mi:path, this_chain: $this:path, bridged_chain: $bridged:path, @@ -186,34 +184,55 @@ where ); } -/// Parameters for asserting bridge pallet names. +/// Parameters for asserting bridge GRANDPA pallet names. #[derive(Debug)] -pub struct AssertBridgePalletNames<'a> { +struct AssertBridgeGrandpaPalletNames<'a> { /// Name of the GRANDPA pallet, deployed at this chain and used to bridge with the bridged /// chain. pub with_bridged_chain_grandpa_pallet_name: &'a str, - /// Name of the messages pallet, deployed at this chain and used to bridge with the bridged - /// chain. - pub with_bridged_chain_messages_pallet_name: &'a str, } /// Tests that bridge pallet names used in `construct_runtime!()` macro call are matching constants /// from chain primitives crates. -fn assert_bridge_pallet_names(params: AssertBridgePalletNames) +fn assert_bridge_grandpa_pallet_names(params: AssertBridgeGrandpaPalletNames) where - R: pallet_bridge_grandpa::Config + pallet_bridge_messages::Config, + R: pallet_bridge_grandpa::Config, GI: 'static, - MI: 'static, { // check that the bridge GRANDPA pallet has required name assert_eq!( - pallet_bridge_grandpa::PalletOwner::::storage_value_final_key().to_vec(), + pallet_bridge_grandpa::PalletOwner::::storage_value_final_key().to_vec(), + bp_runtime::storage_value_key( + params.with_bridged_chain_grandpa_pallet_name, + "PalletOwner", + ) + .0, + ); + assert_eq!( + pallet_bridge_grandpa::PalletOperatingMode::::storage_value_final_key().to_vec(), bp_runtime::storage_value_key( params.with_bridged_chain_grandpa_pallet_name, - "PalletOwner", - ).0, + "PalletOperatingMode", + ) + .0, ); +} +/// Parameters for asserting bridge messages pallet names. +#[derive(Debug)] +struct AssertBridgeMessagesPalletNames<'a> { + /// Name of the messages pallet, deployed at this chain and used to bridge with the bridged + /// chain. + pub with_bridged_chain_messages_pallet_name: &'a str, +} + +/// Tests that bridge pallet names used in `construct_runtime!()` macro call are matching constants +/// from chain primitives crates. +fn assert_bridge_messages_pallet_names(params: AssertBridgeMessagesPalletNames) +where + R: pallet_bridge_messages::Config, + MI: 'static, +{ // check that the bridge messages pallet has required name assert_eq!( pallet_bridge_messages::PalletOwner::::storage_value_final_key().to_vec(), @@ -223,6 +242,14 @@ where ) .0, ); + assert_eq!( + pallet_bridge_messages::PalletOperatingMode::::storage_value_final_key().to_vec(), + bp_runtime::storage_value_key( + params.with_bridged_chain_messages_pallet_name, + "PalletOperatingMode", + ) + .0, + ); } /// Parameters for asserting complete standard messages bridge. @@ -246,9 +273,11 @@ pub fn assert_complete_with_relay_chain_bridge_constants( assert_chain_constants::(params.this_chain_constants); assert_bridge_grandpa_pallet_constants::(); assert_bridge_messages_pallet_constants::(); - assert_bridge_pallet_names::(AssertBridgePalletNames { + assert_bridge_grandpa_pallet_names::(AssertBridgeGrandpaPalletNames { with_bridged_chain_grandpa_pallet_name: >::BridgedChain::WITH_CHAIN_GRANDPA_PALLET_NAME, + }); + assert_bridge_messages_pallet_names::(AssertBridgeMessagesPalletNames { with_bridged_chain_messages_pallet_name: >::BridgedChain::WITH_CHAIN_MESSAGES_PALLET_NAME, }); @@ -256,21 +285,43 @@ pub fn assert_complete_with_relay_chain_bridge_constants( /// All bridge-related constants tests for the complete standard parachain messages bridge /// (i.e. with bridge GRANDPA, parachains and messages pallets deployed). -pub fn assert_complete_with_parachain_bridge_constants( +pub fn assert_complete_with_parachain_bridge_constants( params: AssertCompleteBridgeConstants, ) where R: frame_system::Config - + pallet_bridge_grandpa::Config + + pallet_bridge_parachains::Config + pallet_bridge_messages::Config, - GI: 'static, + >::BridgedRelayChain: ChainWithGrandpa, + PI: 'static, + MI: 'static, +{ + assert_chain_constants::(params.this_chain_constants); + assert_bridge_grandpa_pallet_constants::(); + assert_bridge_messages_pallet_constants::(); + assert_bridge_grandpa_pallet_names::( + AssertBridgeGrandpaPalletNames { + with_bridged_chain_grandpa_pallet_name: + <>::BridgedRelayChain>::WITH_CHAIN_GRANDPA_PALLET_NAME, + }, + ); + assert_bridge_messages_pallet_names::(AssertBridgeMessagesPalletNames { + with_bridged_chain_messages_pallet_name: + >::BridgedChain::WITH_CHAIN_MESSAGES_PALLET_NAME, + }); +} + +/// All bridge-related constants tests for the standalone messages bridge deployment (only with +/// messages pallets deployed). +pub fn assert_standalone_messages_bridge_constants(params: AssertCompleteBridgeConstants) +where + R: frame_system::Config + pallet_bridge_messages::Config, MI: 'static, - RelayChain: ChainWithGrandpa, { assert_chain_constants::(params.this_chain_constants); - assert_bridge_grandpa_pallet_constants::(); assert_bridge_messages_pallet_constants::(); - assert_bridge_pallet_names::(AssertBridgePalletNames { - with_bridged_chain_grandpa_pallet_name: RelayChain::WITH_CHAIN_GRANDPA_PALLET_NAME, + assert_bridge_messages_pallet_names::(AssertBridgeMessagesPalletNames { with_bridged_chain_messages_pallet_name: >::BridgedChain::WITH_CHAIN_MESSAGES_PALLET_NAME, }); diff --git a/bridges/bin/runtime-common/src/mock.rs b/bridges/bin/runtime-common/src/mock.rs index 6cf04b452da7..88037d9deff5 100644 --- a/bridges/bin/runtime-common/src/mock.rs +++ b/bridges/bin/runtime-common/src/mock.rs @@ -196,6 +196,7 @@ impl pallet_bridge_messages::Config for TestRuntime { type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< TestRuntime, (), + (), ConstU64<100_000>, >; type OnMessagesDelivered = (); diff --git a/bridges/modules/relayers/src/lib.rs b/bridges/modules/relayers/src/lib.rs index f06c2e16ac24..d1c71b6d3051 100644 --- a/bridges/modules/relayers/src/lib.rs +++ b/bridges/modules/relayers/src/lib.rs @@ -22,8 +22,9 @@ use bp_relayers::{ ExplicitOrAccountParams, PaymentProcedure, Registration, RelayerRewardsKeyProvider, - RewardsAccountParams, StakeAndSlash, + StakeAndSlash, }; +pub use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; use bp_runtime::StorageDoubleMapKeyProvider; use frame_support::fail; use sp_arithmetic::traits::{AtLeast32BitUnsigned, Zero}; @@ -31,7 +32,7 @@ use sp_runtime::{traits::CheckedSub, Saturating}; use sp_std::marker::PhantomData; pub use pallet::*; -pub use payment_adapter::DeliveryConfirmationPaymentsAdapter; +pub use payment_adapter::{DeliveryConfirmationPaymentsAdapter, PayRewardFromAccount}; pub use stake_adapter::StakeAndSlashNamed; pub use weights::WeightInfo; pub use weights_ext::WeightInfoExt; diff --git a/bridges/modules/relayers/src/mock.rs b/bridges/modules/relayers/src/mock.rs index d186e968e648..7dc213249379 100644 --- a/bridges/modules/relayers/src/mock.rs +++ b/bridges/modules/relayers/src/mock.rs @@ -171,14 +171,14 @@ pub type TestStakeAndSlash = pallet_bridge_relayers::StakeAndSlashNamed< frame_support::construct_runtime! { pub enum TestRuntime { - System: frame_system::{Pallet, Call, Config, Storage, Event}, + System: frame_system, Utility: pallet_utility, - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event}, - BridgeRelayers: pallet_bridge_relayers::{Pallet, Call, Storage, Event}, - BridgeGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage, Event}, - BridgeParachains: pallet_bridge_parachains::{Pallet, Call, Storage, Event}, - BridgeMessages: pallet_bridge_messages::{Pallet, Call, Storage, Event, Config}, + Balances: pallet_balances, + TransactionPayment: pallet_transaction_payment, + BridgeRelayers: pallet_bridge_relayers, + BridgeGrandpa: pallet_bridge_grandpa, + BridgeParachains: pallet_bridge_parachains, + BridgeMessages: pallet_bridge_messages, } } @@ -267,6 +267,7 @@ impl pallet_bridge_messages::Config for TestRuntime { type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< TestRuntime, (), + (), ConstU64<100_000>, >; type OnMessagesDelivered = (); diff --git a/bridges/modules/relayers/src/payment_adapter.rs b/bridges/modules/relayers/src/payment_adapter.rs index 5383cba5ecbd..5af0d8f9dfbf 100644 --- a/bridges/modules/relayers/src/payment_adapter.rs +++ b/bridges/modules/relayers/src/payment_adapter.rs @@ -22,6 +22,7 @@ use bp_messages::{ source_chain::{DeliveryConfirmationPayments, RelayersRewards}, MessageNonce, }; +pub use bp_relayers::PayRewardFromAccount; use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; use bp_runtime::Chain; use frame_support::{sp_runtime::SaturatedConversion, traits::Get}; @@ -31,15 +32,16 @@ use sp_std::{collections::vec_deque::VecDeque, marker::PhantomData, ops::RangeIn /// Adapter that allows relayers pallet to be used as a delivery+dispatch payment mechanism /// for the messages pallet. -pub struct DeliveryConfirmationPaymentsAdapter( - PhantomData<(T, MI, DeliveryReward)>, +pub struct DeliveryConfirmationPaymentsAdapter( + PhantomData<(T, MI, RI, DeliveryReward)>, ); -impl DeliveryConfirmationPayments> - for DeliveryConfirmationPaymentsAdapter +impl DeliveryConfirmationPayments> + for DeliveryConfirmationPaymentsAdapter where - T: Config + pallet_bridge_messages::Config::LaneId>, + T: Config + pallet_bridge_messages::Config>::LaneId>, MI: 'static, + RI: 'static, DeliveryReward: Get, { type Error = &'static str; @@ -54,7 +56,7 @@ where bp_messages::calc_relayers_rewards::(messages_relayers, received_range); let rewarded_relayers = relayers_rewards.len(); - register_relayers_rewards::( + register_relayers_rewards::( confirmation_relayer, relayers_rewards, RewardsAccountParams::new( @@ -70,7 +72,7 @@ where } // Update rewards to given relayers, optionally rewarding confirmation relayer. -fn register_relayers_rewards( +fn register_relayers_rewards, I: 'static>( confirmation_relayer: &T::AccountId, relayers_rewards: RelayersRewards, lane_id: RewardsAccountParams, @@ -84,7 +86,7 @@ fn register_relayers_rewards( let relayer_reward = T::Reward::saturated_from(messages).saturating_mul(delivery_fee); if relayer != *confirmation_relayer { - Pallet::::register_relayer_reward(lane_id, &relayer, relayer_reward); + Pallet::::register_relayer_reward(lane_id, &relayer, relayer_reward); } else { confirmation_relayer_reward = confirmation_relayer_reward.saturating_add(relayer_reward); @@ -92,7 +94,7 @@ fn register_relayers_rewards( } // finally - pay reward to confirmation relayer - Pallet::::register_relayer_reward( + Pallet::::register_relayer_reward( lane_id, confirmation_relayer, confirmation_relayer_reward, @@ -115,7 +117,7 @@ mod tests { #[test] fn confirmation_relayer_is_rewarded_if_it_has_also_delivered_messages() { run_test(|| { - register_relayers_rewards::( + register_relayers_rewards::( &RELAYER_2, relayers_rewards(), test_reward_account_param(), @@ -136,7 +138,7 @@ mod tests { #[test] fn confirmation_relayer_is_not_rewarded_if_it_has_not_delivered_any_messages() { run_test(|| { - register_relayers_rewards::( + register_relayers_rewards::( &RELAYER_3, relayers_rewards(), test_reward_account_param(), diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index 7453d3c89d08..18e63681d578 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -28,6 +28,7 @@ //! during the relay chain block. After the block is built, the block builder task sends it to //! the collation task which compresses it and submits it to the collation-generation subsystem. +use self::{block_builder_task::run_block_builder, collation_task::run_collation_task}; use codec::Codec; use consensus_common::ParachainCandidate; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; @@ -36,32 +37,28 @@ use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::AuraUnincludedSegmentApi; use cumulus_primitives_core::GetCoreSelectorApi; use cumulus_relay_chain_interface::RelayChainInterface; +use futures::FutureExt; use polkadot_primitives::{ CollatorPair, CoreIndex, Hash as RelayHash, Id as ParaId, ValidationCodeHash, }; - use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; use sc_consensus::BlockImport; use sc_utils::mpsc::tracing_unbounded; - use sp_api::ProvideRuntimeApi; use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; use sp_consensus_aura::AuraApi; -use sp_core::crypto::Pair; +use sp_core::{crypto::Pair, traits::SpawnNamed}; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, Member}; - use std::{sync::Arc, time::Duration}; -use self::{block_builder_task::run_block_builder, collation_task::run_collation_task}; - mod block_builder_task; mod collation_task; /// Parameters for [`run`]. -pub struct Params { +pub struct Params { /// Inherent data providers. Only non-consensus inherent data should be provided, i.e. /// the timestamp, slot, and paras inherents should be omitted, as they are set by this /// collator. @@ -93,13 +90,30 @@ pub struct Params { /// Drift slots by a fixed duration. This can be used to create more preferrable authoring /// timings. pub slot_drift: Duration, + /// Spawner for spawning futures. + pub spawner: Spawner, } /// Run aura-based block building and collation task. -pub fn run( - params: Params, -) -> (impl futures::Future, impl futures::Future) -where +pub fn run( + Params { + create_inherent_data_providers, + block_import, + para_client, + para_backend, + relay_client, + code_hash_provider, + keystore, + collator_key, + para_id, + proposer, + collator_service, + authoring_duration, + reinitialize, + slot_drift, + spawner, + }: Params, +) where Block: BlockT, Client: ProvideRuntimeApi + BlockOf @@ -123,39 +137,49 @@ where P: Pair + 'static, P::Public: AppPublic + Member + Codec, P::Signature: TryFrom> + Member + Codec, + Spawner: SpawnNamed, { let (tx, rx) = tracing_unbounded("mpsc_builder_to_collator", 100); let collator_task_params = collation_task::Params { - relay_client: params.relay_client.clone(), - collator_key: params.collator_key, - para_id: params.para_id, - reinitialize: params.reinitialize, - collator_service: params.collator_service.clone(), + relay_client: relay_client.clone(), + collator_key, + para_id, + reinitialize, + collator_service: collator_service.clone(), collator_receiver: rx, }; let collation_task_fut = run_collation_task::(collator_task_params); let block_builder_params = block_builder_task::BuilderTaskParams { - create_inherent_data_providers: params.create_inherent_data_providers, - block_import: params.block_import, - para_client: params.para_client, - para_backend: params.para_backend, - relay_client: params.relay_client, - code_hash_provider: params.code_hash_provider, - keystore: params.keystore, - para_id: params.para_id, - proposer: params.proposer, - collator_service: params.collator_service, - authoring_duration: params.authoring_duration, + create_inherent_data_providers, + block_import, + para_client, + para_backend, + relay_client, + code_hash_provider, + keystore, + para_id, + proposer, + collator_service, + authoring_duration, collator_sender: tx, - slot_drift: params.slot_drift, + slot_drift, }; let block_builder_fut = run_block_builder::(block_builder_params); - (collation_task_fut, block_builder_fut) + spawner.spawn_blocking( + "slot-based-block-builder", + Some("slot-based-collator"), + block_builder_fut.boxed(), + ); + spawner.spawn_blocking( + "slot-based-collation", + Some("slot-based-collator"), + collation_task_fut.boxed(), + ); } /// Message to be sent from the block builder to the collation task. diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs index 7e0385692375..b284fa9e7af7 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs @@ -201,7 +201,6 @@ mod tests { fn ensure_bridge_integrity() { assert_complete_bridge_types!( runtime: Runtime, - with_bridged_chain_grandpa_instance: BridgeGrandpaRococoBulletinInstance, with_bridged_chain_messages_instance: WithRococoBulletinMessagesInstance, this_chain: bp_bridge_hub_rococo::BridgeHubRococo, bridged_chain: bp_polkadot_bulletin::PolkadotBulletin, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs index 0eab3c74a7e2..2710d033d64b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs @@ -121,6 +121,7 @@ impl pallet_bridge_messages::Config for Ru type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< Runtime, WithBridgeHubWestendMessagesInstance, + RelayersForLegacyLaneIdsMessagesInstance, DeliveryRewardInBalance, >; @@ -256,7 +257,6 @@ mod tests { fn ensure_bridge_integrity() { assert_complete_bridge_types!( runtime: Runtime, - with_bridged_chain_grandpa_instance: BridgeGrandpaWestendInstance, with_bridged_chain_messages_instance: WithBridgeHubWestendMessagesInstance, this_chain: bp_bridge_hub_rococo::BridgeHubRococo, bridged_chain: bp_bridge_hub_westend::BridgeHubWestend, @@ -266,7 +266,6 @@ mod tests { Runtime, BridgeGrandpaWestendInstance, WithBridgeHubWestendMessagesInstance, - bp_westend::Westend, >(AssertCompleteBridgeConstants { this_chain_constants: AssertChainConstants { block_length: bp_bridge_hub_rococo::BlockLength::get(), diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index 2e7dd98e9dce..6ca858e961d3 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -324,11 +324,12 @@ mod bridge_hub_westend_tests { >( SiblingParachainLocation::get(), BridgedUniversalLocation::get(), - |locations, fee| { + false, + |locations, _fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverBridgeHubWestendInstance - >(locations, fee, LegacyLaneId([0, 0, 0, 1])) + >(locations, LegacyLaneId([0, 0, 0, 1])) } ).1 }, @@ -388,11 +389,12 @@ mod bridge_hub_westend_tests { >( SiblingParachainLocation::get(), BridgedUniversalLocation::get(), - |locations, fee| { + false, + |locations, _fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverBridgeHubWestendInstance, - >(locations, fee, LegacyLaneId([0, 0, 0, 1])) + >(locations, LegacyLaneId([0, 0, 0, 1])) }, ) .1 @@ -422,11 +424,12 @@ mod bridge_hub_westend_tests { >( SiblingParachainLocation::get(), BridgedUniversalLocation::get(), - |locations, fee| { + false, + |locations, _fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverBridgeHubWestendInstance, - >(locations, fee, LegacyLaneId([0, 0, 0, 1])) + >(locations, LegacyLaneId([0, 0, 0, 1])) }, ) .1 @@ -591,11 +594,12 @@ mod bridge_hub_bulletin_tests { >( SiblingPeopleParachainLocation::get(), BridgedBulletinLocation::get(), - |locations, fee| { + false, + |locations, _fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverPolkadotBulletinInstance - >(locations, fee, HashedLaneId::try_new(1, 2).unwrap()) + >(locations, HashedLaneId::try_new(1, 2).unwrap()) } ).1 }, @@ -654,11 +658,12 @@ mod bridge_hub_bulletin_tests { >( SiblingPeopleParachainLocation::get(), BridgedBulletinLocation::get(), - |locations, fee| { + false, + |locations, _fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverPolkadotBulletinInstance, - >(locations, fee, HashedLaneId::try_new(1, 2).unwrap()) + >(locations, HashedLaneId::try_new(1, 2).unwrap()) }, ) .1 @@ -687,11 +692,12 @@ mod bridge_hub_bulletin_tests { >( SiblingPeopleParachainLocation::get(), BridgedBulletinLocation::get(), - |locations, fee| { + false, + |locations, _fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverPolkadotBulletinInstance, - >(locations, fee, HashedLaneId::try_new(1, 2).unwrap()) + >(locations, HashedLaneId::try_new(1, 2).unwrap()) }, ) .1 diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs index 62c93da7c831..cd3465513144 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs @@ -152,6 +152,7 @@ impl pallet_bridge_messages::Config for Run type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< Runtime, WithBridgeHubRococoMessagesInstance, + RelayersForLegacyLaneIdsMessagesInstance, DeliveryRewardInBalance, >; @@ -284,7 +285,6 @@ mod tests { fn ensure_bridge_integrity() { assert_complete_bridge_types!( runtime: Runtime, - with_bridged_chain_grandpa_instance: BridgeGrandpaRococoInstance, with_bridged_chain_messages_instance: WithBridgeHubRococoMessagesInstance, this_chain: bp_bridge_hub_westend::BridgeHubWestend, bridged_chain: bp_bridge_hub_rococo::BridgeHubRococo, @@ -294,7 +294,6 @@ mod tests { Runtime, BridgeGrandpaRococoInstance, WithBridgeHubRococoMessagesInstance, - bp_rococo::Rococo, >(AssertCompleteBridgeConstants { this_chain_constants: AssertChainConstants { block_length: bp_bridge_hub_westend::BlockLength::get(), diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs index 69301b34fe6b..84025c4cefeb 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -246,10 +246,11 @@ fn handle_export_message_from_system_parachain_add_to_outbound_queue_works() { >( SiblingParachainLocation::get(), BridgedUniversalLocation::get(), - |locations, fee| { + false, + |locations, _fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverBridgeHubRococoInstance - >(locations, fee, LegacyLaneId([0, 0, 0, 1])) + >(locations, LegacyLaneId([0, 0, 0, 1])) } ).1 }, @@ -307,11 +308,12 @@ fn relayed_incoming_message_works() { >( SiblingParachainLocation::get(), BridgedUniversalLocation::get(), - |locations, fee| { + false, + |locations, _fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverBridgeHubRococoInstance, - >(locations, fee, LegacyLaneId([0, 0, 0, 1])) + >(locations, LegacyLaneId([0, 0, 0, 1])) }, ) .1 @@ -341,11 +343,12 @@ fn free_relay_extrinsic_works() { >( SiblingParachainLocation::get(), BridgedUniversalLocation::get(), - |locations, fee| { + false, + |locations, _fee| { bridge_hub_test_utils::open_bridge_with_storage::< Runtime, XcmOverBridgeHubRococoInstance, - >(locations, fee, LegacyLaneId([0, 0, 0, 1])) + >(locations, LegacyLaneId([0, 0, 0, 1])) }, ) .1 diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs index aac60bba0b53..03ddc4313b45 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs @@ -29,7 +29,7 @@ use core::marker::PhantomData; use frame_support::{ assert_ok, dispatch::GetDispatchInfo, - traits::{fungible::Mutate, OnFinalize, OnInitialize, PalletInfoAccess}, + traits::{fungible::Mutate, Contains, OnFinalize, OnInitialize, PalletInfoAccess}, }; use frame_system::pallet_prelude::BlockNumberFor; use pallet_bridge_grandpa::{BridgedBlockHash, BridgedHeader}; @@ -395,7 +395,7 @@ pub fn ensure_opened_bridge< XcmOverBridgePalletInstance, LocationToAccountId, TokenLocation> -(source: Location, destination: InteriorLocation, bridge_opener: impl Fn(BridgeLocations, Asset)) -> (BridgeLocations, pallet_xcm_bridge_hub::LaneIdOf) +(source: Location, destination: InteriorLocation, is_paid_xcm_execution: bool, bridge_opener: impl Fn(BridgeLocations, Option)) -> (BridgeLocations, pallet_xcm_bridge_hub::LaneIdOf) where Runtime: BasicParachainRuntime + BridgeXcmOverBridgeConfig, XcmOverBridgePalletInstance: 'static, @@ -416,24 +416,37 @@ TokenLocation: Get{ ) .is_none()); - // required balance: ED + fee + BridgeDeposit - let bridge_deposit = - >::BridgeDeposit::get( - ); - // random high enough value for `BuyExecution` fees - let buy_execution_fee_amount = 5_000_000_000_000_u128; - let buy_execution_fee = (TokenLocation::get(), buy_execution_fee_amount).into(); - let balance_needed = ::ExistentialDeposit::get() + - buy_execution_fee_amount.into() + - bridge_deposit.into(); - // SA of source location needs to have some required balance - let source_account_id = LocationToAccountId::convert_location(&source).expect("valid location"); - let _ = >::mint_into(&source_account_id, balance_needed) - .expect("mint_into passes"); + if !>::AllowWithoutBridgeDeposit::contains(&source) { + // required balance: ED + fee + BridgeDeposit + let bridge_deposit = + >::BridgeDeposit::get( + ); + let balance_needed = ::ExistentialDeposit::get() + bridge_deposit.into(); + + let source_account_id = LocationToAccountId::convert_location(&source).expect("valid location"); + let _ = >::mint_into(&source_account_id, balance_needed) + .expect("mint_into passes"); + }; + + let maybe_paid_execution = if is_paid_xcm_execution { + // random high enough value for `BuyExecution` fees + let buy_execution_fee_amount = 5_000_000_000_000_u128; + let buy_execution_fee = (TokenLocation::get(), buy_execution_fee_amount).into(); + + let balance_needed = ::ExistentialDeposit::get() + + buy_execution_fee_amount.into(); + let source_account_id = + LocationToAccountId::convert_location(&source).expect("valid location"); + let _ = >::mint_into(&source_account_id, balance_needed) + .expect("mint_into passes"); + Some(buy_execution_fee) + } else { + None + }; // call the bridge opener - bridge_opener(*locations.clone(), buy_execution_fee); + bridge_opener(*locations.clone(), maybe_paid_execution); // check opened bridge let bridge = pallet_xcm_bridge_hub::Bridges::::get( @@ -452,8 +465,9 @@ TokenLocation: Get{ /// Utility for opening bridge with dedicated `pallet_xcm_bridge_hub`'s extrinsic. pub fn open_bridge_with_extrinsic( - locations: BridgeLocations, - buy_execution_fee: Asset, + (origin, origin_kind): (Location, OriginKind), + bridge_destination_universal_location: InteriorLocation, + maybe_paid_execution: Option, ) where Runtime: frame_system::Config + pallet_xcm_bridge_hub::Config @@ -469,15 +483,15 @@ pub fn open_bridge_with_extrinsic( XcmOverBridgePalletInstance, >::open_bridge { bridge_destination_universal_location: Box::new( - locations.bridge_destination_universal_location().clone().into(), + bridge_destination_universal_location.clone().into(), ), }); // execute XCM as source origin would do with `Transact -> Origin::Xcm` - assert_ok!(RuntimeHelper::::execute_as_origin_xcm( - locations.bridge_origin_relative_location().clone(), + assert_ok!(RuntimeHelper::::execute_as_origin( + (origin, origin_kind), open_bridge_call, - buy_execution_fee + maybe_paid_execution ) .ensure_complete()); } @@ -486,7 +500,6 @@ pub fn open_bridge_with_extrinsic( /// purposes). pub fn open_bridge_with_storage( locations: BridgeLocations, - _buy_execution_fee: Asset, lane_id: pallet_xcm_bridge_hub::LaneIdOf, ) where Runtime: pallet_xcm_bridge_hub::Config, @@ -503,8 +516,12 @@ pub fn open_bridge_with_storage( } /// Helper function to close the bridge/lane for `source` and `destination`. -pub fn close_bridge(source: Location, destination: InteriorLocation) -where +pub fn close_bridge( + expected_source: Location, + bridge_destination_universal_location: InteriorLocation, + (origin, origin_kind): (Location, OriginKind), + is_paid_xcm_execution: bool +) where Runtime: BasicParachainRuntime + BridgeXcmOverBridgeConfig, XcmOverBridgePalletInstance: 'static, ::RuntimeCall: GetDispatchInfo + From>, @@ -515,8 +532,8 @@ TokenLocation: Get{ // construct expected bridge configuration let locations = pallet_xcm_bridge_hub::Pallet::::bridge_locations( - source.clone().into(), - destination.clone().into(), + expected_source.clone().into(), + bridge_destination_universal_location.clone().into(), ) .expect("valid bridge locations"); assert!(pallet_xcm_bridge_hub::Bridges::::get( @@ -525,35 +542,38 @@ TokenLocation: Get{ .is_some()); // required balance: ED + fee + BridgeDeposit - let bridge_deposit = - >::BridgeDeposit::get( - ); - // random high enough value for `BuyExecution` fees - let buy_execution_fee_amount = 2_500_000_000_000_u128; - let buy_execution_fee = (TokenLocation::get(), buy_execution_fee_amount).into(); - let balance_needed = ::ExistentialDeposit::get() + - buy_execution_fee_amount.into() + - bridge_deposit.into(); - - // SA of source location needs to have some required balance - let source_account_id = LocationToAccountId::convert_location(&source).expect("valid location"); - let _ = >::mint_into(&source_account_id, balance_needed) - .expect("mint_into passes"); + let maybe_paid_execution = if is_paid_xcm_execution { + // random high enough value for `BuyExecution` fees + let buy_execution_fee_amount = 2_500_000_000_000_u128; + let buy_execution_fee = (TokenLocation::get(), buy_execution_fee_amount).into(); + + let balance_needed = ::ExistentialDeposit::get() + + buy_execution_fee_amount.into(); + let source_account_id = + LocationToAccountId::convert_location(&expected_source).expect("valid location"); + let _ = >::mint_into(&source_account_id, balance_needed) + .expect("mint_into passes"); + Some(buy_execution_fee) + } else { + None + }; // close bridge with `Transact` call let close_bridge_call = RuntimeCallOf::::from(BridgeXcmOverBridgeCall::< Runtime, XcmOverBridgePalletInstance, >::close_bridge { - bridge_destination_universal_location: Box::new(destination.into()), + bridge_destination_universal_location: Box::new( + bridge_destination_universal_location.into(), + ), may_prune_messages: 16, }); // execute XCM as source origin would do with `Transact -> Origin::Xcm` - assert_ok!(RuntimeHelper::::execute_as_origin_xcm( - source.clone(), + assert_ok!(RuntimeHelper::::execute_as_origin( + (origin, origin_kind), close_bridge_call, - buy_execution_fee + maybe_paid_execution ) .ensure_complete()); diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs index ad6db0b83e80..f96d0bf405b9 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs @@ -654,8 +654,10 @@ where pub fn open_and_close_bridge_works( collator_session_key: CollatorSessionKeys, runtime_para_id: u32, - source: Location, + expected_source: Location, destination: InteriorLocation, + origin_with_origin_kind: (Location, OriginKind), + is_paid_xcm_execution: bool, ) where Runtime: BasicParachainRuntime + BridgeXcmOverBridgeConfig, XcmOverBridgePalletInstance: 'static, @@ -669,7 +671,7 @@ pub fn open_and_close_bridge_works(collator_session_key, runtime_para_id, vec![], || { // construct expected bridge configuration let locations = pallet_xcm_bridge_hub::Pallet::::bridge_locations( - source.clone().into(), + expected_source.clone().into(), destination.clone().into(), ).expect("valid bridge locations"); let expected_lane_id = @@ -704,7 +706,7 @@ pub fn open_and_close_bridge_works( - source.clone(), + expected_source.clone(), destination.clone(), - open_bridge_with_extrinsic:: + is_paid_xcm_execution, + |locations, maybe_paid_execution| open_bridge_with_extrinsic::< + Runtime, + XcmOverBridgePalletInstance, + >( + origin_with_origin_kind.clone(), + locations.bridge_destination_universal_location().clone(), + maybe_paid_execution + ) ) .0 .bridge_id(), @@ -727,7 +737,7 @@ pub fn open_and_close_bridge_works(source.clone(), destination); + >(expected_source, destination, origin_with_origin_kind, is_paid_xcm_execution); // check bridge/lane DOES not exist assert_eq!( diff --git a/cumulus/parachains/runtimes/test-utils/src/lib.rs b/cumulus/parachains/runtimes/test-utils/src/lib.rs index 05ecf6ca8e81..3f2e721d13f6 100644 --- a/cumulus/parachains/runtimes/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/test-utils/src/lib.rs @@ -460,18 +460,26 @@ impl< ) } - pub fn execute_as_origin_xcm( - origin: Location, + pub fn execute_as_origin( + (origin, origin_kind): (Location, OriginKind), call: Call, - buy_execution_fee: Asset, + maybe_buy_execution_fee: Option, ) -> Outcome { + let mut instructions = if let Some(buy_execution_fee) = maybe_buy_execution_fee { + vec![ + WithdrawAsset(buy_execution_fee.clone().into()), + BuyExecution { fees: buy_execution_fee.clone(), weight_limit: Unlimited }, + ] + } else { + vec![UnpaidExecution { check_origin: None, weight_limit: Unlimited }] + }; + // prepare `Transact` xcm - let xcm = Xcm(vec![ - WithdrawAsset(buy_execution_fee.clone().into()), - BuyExecution { fees: buy_execution_fee.clone(), weight_limit: Unlimited }, - Transact { origin_kind: OriginKind::Xcm, call: call.encode().into() }, + instructions.extend(vec![ + Transact { origin_kind, call: call.encode().into() }, ExpectTransactStatus(MaybeErrorCode::Success), ]); + let xcm = Xcm(instructions); // execute xcm as parent origin let mut hash = xcm.using_encoded(sp_io::hashing::blake2_256); diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs index ec5d0a439ec4..0b2c230f695d 100644 --- a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs @@ -54,6 +54,7 @@ use sc_service::{Configuration, Error, TaskManager}; use sc_telemetry::TelemetryHandle; use sc_transaction_pool::TransactionPoolHandle; use sp_api::ProvideRuntimeApi; +use sp_core::traits::SpawnNamed; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::{ @@ -242,7 +243,7 @@ where AuraId: AuraIdT + Sync, { #[docify::export_content] - fn launch_slot_based_collator( + fn launch_slot_based_collator( params: SlotBasedParams< ParachainBlockImport, CIDP, @@ -252,28 +253,17 @@ where CHP, Proposer, CS, + Spawner, >, - task_manager: &TaskManager, ) where CIDP: CreateInherentDataProviders + 'static, CIDP::InherentDataProviders: Send, CHP: cumulus_client_consensus_common::ValidationCodeHashProvider + Send + 'static, Proposer: ProposerInterface + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + Clone + 'static, + Spawner: SpawnNamed, { - let (collation_future, block_builder_future) = - slot_based::run::::Pair, _, _, _, _, _, _, _, _>(params); - - task_manager.spawn_essential_handle().spawn( - "collation-task", - Some("parachain-block-authoring"), - collation_future, - ); - task_manager.spawn_essential_handle().spawn( - "block-builder-task", - Some("parachain-block-authoring"), - block_builder_future, - ); + slot_based::run::::Pair, _, _, _, _, _, _, _, _, _>(params); } } @@ -335,11 +325,12 @@ where authoring_duration: Duration::from_millis(2000), reinitialize: false, slot_drift: Duration::from_secs(1), + spawner: task_manager.spawn_handle(), }; // We have a separate function only to be able to use `docify::export` on this piece of // code. - Self::launch_slot_based_collator(params, task_manager); + Self::launch_slot_based_collator(params); Ok(()) } diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 9234442d399c..f01da9becef1 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -497,20 +497,10 @@ where authoring_duration: Duration::from_millis(2000), reinitialize: false, slot_drift: Duration::from_secs(1), + spawner: task_manager.spawn_handle(), }; - let (collation_future, block_builder_future) = - slot_based::run::(params); - task_manager.spawn_essential_handle().spawn( - "collation-task", - None, - collation_future, - ); - task_manager.spawn_essential_handle().spawn( - "block-builder-task", - None, - block_builder_future, - ); + slot_based::run::(params); } else { tracing::info!(target: LOG_TARGET, "Starting block authoring with lookahead collator."); let params = AuraParams { diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index a9f97c308f26..37d5878ea597 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -38,6 +38,7 @@ polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } +sc-tracing = { workspace = true } sp-core = { workspace = true, default-features = true } sp-maybe-compressed-blob = { optional = true, workspace = true, default-features = true } polkadot-node-core-pvf-prepare-worker = { optional = true, workspace = true, default-features = true } diff --git a/polkadot/node/core/pvf/src/worker_interface.rs b/polkadot/node/core/pvf/src/worker_interface.rs index e63778d4692f..f279fbb53544 100644 --- a/polkadot/node/core/pvf/src/worker_interface.rs +++ b/polkadot/node/core/pvf/src/worker_interface.rs @@ -237,10 +237,8 @@ impl WorkerHandle { // Clear all env vars from the spawned process. let mut command = process::Command::new(program.as_ref()); command.env_clear(); - // Add back any env vars we want to keep. - if let Ok(value) = std::env::var("RUST_LOG") { - command.env("RUST_LOG", value); - } + + command.env("RUST_LOG", sc_tracing::logging::get_directives().join(",")); let mut child = command .args(extra_args) diff --git a/prdoc/pr_5703.prdoc b/prdoc/pr_5703.prdoc new file mode 100644 index 000000000000..3cef4468a87d --- /dev/null +++ b/prdoc/pr_5703.prdoc @@ -0,0 +1,13 @@ +title: Properly handle block gap created by fast sync + +doc: + - audience: Node Dev + description: | + Implements support for handling block gaps generated during fast sync. This includes managing the creation, + updating, and removal of block gaps. + Note that this feature is not fully activated until the `body` attribute is removed from the `LightState` + block request in chain sync, which will occur after the issue #5406 is resolved. + +crates: + - name: sc-client-db + bump: patch diff --git a/prdoc/pr_6521.prdoc b/prdoc/pr_6521.prdoc new file mode 100644 index 000000000000..6f4acf8d028b --- /dev/null +++ b/prdoc/pr_6521.prdoc @@ -0,0 +1,10 @@ +title: Pure state sync refactoring (part-2) + +doc: +- audience: Node Dev + description: | + This is the last part of the pure refactoring of state sync, focusing on encapsulating `StateSyncMetadata` as a separate entity. + +crates: +- name: sc-network-sync + bump: none diff --git a/prdoc/pr_6522.prdoc b/prdoc/pr_6522.prdoc new file mode 100644 index 000000000000..bd59e9cb08dc --- /dev/null +++ b/prdoc/pr_6522.prdoc @@ -0,0 +1,18 @@ +title: Removes constraint in BlockNumberProvider from treasury + +doc: +- audience: Runtime Dev + description: |- + https://github.com/paritytech/polkadot-sdk/pull/3970 updated the treasury pallet to support + relay chain block number provider. However, it added a constraint to the `BlockNumberProvider` + trait to have the same block number type as `frame_system`: + + ```rust + type BlockNumberProvider: BlockNumberProvider>; + ``` + + This PR removes that constraint and allows the treasury pallet to use any block number type. + +crates: +- name: pallet-treasury + bump: major \ No newline at end of file diff --git a/prdoc/pr_6534.prdoc b/prdoc/pr_6534.prdoc new file mode 100644 index 000000000000..7a92fe3c857b --- /dev/null +++ b/prdoc/pr_6534.prdoc @@ -0,0 +1,10 @@ +title: Forward logging directives to Polkadot workers +doc: +- audience: Node Dev + description: |- + This pull request forward all the logging directives given to the node via `RUST_LOG` or `-l` to the workers, instead of only forwarding `RUST_LOG`. +crates: +- name: polkadot-node-core-pvf + bump: patch +- name: sc-tracing + bump: patch diff --git a/prdoc/pr_6536.prdoc b/prdoc/pr_6536.prdoc new file mode 100644 index 000000000000..676b5c131f17 --- /dev/null +++ b/prdoc/pr_6536.prdoc @@ -0,0 +1,24 @@ +title: Bridges testing improvements +doc: +- audience: Runtime Dev + description: |- + This PR includes: + - Refactored integrity tests to support standalone deployment of `pallet-bridge-messages`. + - Refactored the `open_and_close_bridge_works` test case to support multiple scenarios, such as: + 1. A local chain opening a bridge. + 2. Sibling parachains opening a bridge. + 3. The relay chain opening a bridge. + - Previously, we added instance support for `pallet-bridge-relayer` but overlooked updating the `DeliveryConfirmationPaymentsAdapter`. +crates: +- name: bridge-runtime-common + bump: patch +- name: pallet-bridge-relayers + bump: patch +- name: bridge-hub-rococo-runtime + bump: patch +- name: bridge-hub-westend-runtime + bump: patch +- name: bridge-hub-test-utils + bump: major +- name: parachains-runtimes-test-utils + bump: major diff --git a/prdoc/pr_6540.prdoc b/prdoc/pr_6540.prdoc new file mode 100644 index 000000000000..5e0305205521 --- /dev/null +++ b/prdoc/pr_6540.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Only allow apply slash to be executed if the slash amount is atleast ED + +doc: + - audience: Runtime User + description: | + This change prevents `pools::apply_slash` from being executed when the pending slash amount of the member is lower + than the ED. With this change, such small slashes will still be applied but only when member funds are withdrawn. + +crates: +- name: pallet-nomination-pools-runtime-api + bump: patch +- name: pallet-nomination-pools + bump: major diff --git a/prdoc/pr_6544.prdoc b/prdoc/pr_6544.prdoc new file mode 100644 index 000000000000..f2bc9627697d --- /dev/null +++ b/prdoc/pr_6544.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add and test events to conviction voting pallet + +doc: + - audience: Runtime Dev + description: | + Add event for the unlocking of an expired conviction vote's funds, and test recently added + voting events. + +crates: + - name: pallet-conviction-voting + bump: major diff --git a/prdoc/pr_6546.prdoc b/prdoc/pr_6546.prdoc new file mode 100644 index 000000000000..353578a7f58f --- /dev/null +++ b/prdoc/pr_6546.prdoc @@ -0,0 +1,13 @@ +title: Increase default trie cache size to 1GiB +doc: +- audience: Node Operator + description: "The default trie cache size before was set to `64MiB`, which is quite\ + \ low to achieve real speed ups. `1GiB` should be a reasonable number as the requirements\ + \ for validators/collators/full nodes are much higher when it comes to minimum\ + \ memory requirements. Also the cache will not use `1GiB` from the start and fills\ + \ over time. The setting can be changed by setting `--trie-cache-size BYTE_SIZE`.\ + The CLI option `--state-cache-size` is also removed, which was not having any effect anymore.\r\ + \n" +crates: +- name: sc-cli + bump: patch diff --git a/prdoc/pr_6553.prdoc b/prdoc/pr_6553.prdoc new file mode 100644 index 000000000000..8692eba3a9f5 --- /dev/null +++ b/prdoc/pr_6553.prdoc @@ -0,0 +1,13 @@ +title: Ensure sync event is processed on unknown peer roles + +doc: + - audience: Node Dev + description: | + The GossipEngine::poll_next implementation polls both the notification_service and the sync_event_stream. + This PR ensures both events are processed gracefully. + +crates: + - name: sc-network-gossip + bump: patch + - name: sc-network-sync + bump: patch diff --git a/prdoc/pr_6561.prdoc b/prdoc/pr_6561.prdoc new file mode 100644 index 000000000000..714521925a6b --- /dev/null +++ b/prdoc/pr_6561.prdoc @@ -0,0 +1,11 @@ +title: 'slot-based-collator: Move spawning of the futures' +doc: +- audience: Node Dev + description: "Move spawning of the slot-based collator into the `run` function.\ + \ Also the tasks are being spawned as blocking task and not just as normal tasks.\r\ + \n" +crates: +- name: cumulus-client-consensus-aura + bump: major +- name: polkadot-omni-node-lib + bump: major diff --git a/substrate/.config/nextest.toml b/substrate/.config/nextest.toml deleted file mode 100644 index eb0ed09cad92..000000000000 --- a/substrate/.config/nextest.toml +++ /dev/null @@ -1,124 +0,0 @@ -# This is the default config used by nextest. It is embedded in the binary at -# build time. It may be used as a template for .config/nextest.toml. - -[store] -# The directory under the workspace root at which nextest-related files are -# written. Profile-specific storage is currently written to dir/. -dir = "target/nextest" - -# This section defines the default nextest profile. Custom profiles are layered -# on top of the default profile. -[profile.default] -# "retries" defines the number of times a test should be retried. If set to a -# non-zero value, tests that succeed on a subsequent attempt will be marked as -# non-flaky. Can be overridden through the `--retries` option. -# Examples -# * retries = 3 -# * retries = { backoff = "fixed", count = 2, delay = "1s" } -# * retries = { backoff = "exponential", count = 10, delay = "1s", jitter = true, max-delay = "10s" } -retries = 5 - -# The number of threads to run tests with. Supported values are either an integer or -# the string "num-cpus". Can be overridden through the `--test-threads` option. -test-threads = "num-cpus" - -# The number of threads required for each test. This is generally used in overrides to -# mark certain tests as heavier than others. However, it can also be set as a global parameter. -threads-required = 1 - -# Show these test statuses in the output. -# -# The possible values this can take are: -# * none: no output -# * fail: show failed (including exec-failed) tests -# * retry: show flaky and retried tests -# * slow: show slow tests -# * pass: show passed tests -# * skip: show skipped tests (most useful for CI) -# * all: all of the above -# -# Each value includes all the values above it; for example, "slow" includes -# failed and retried tests. -# -# Can be overridden through the `--status-level` flag. -status-level = "pass" - -# Similar to status-level, show these test statuses at the end of the run. -final-status-level = "flaky" - -# "failure-output" defines when standard output and standard error for failing tests are produced. -# Accepted values are -# * "immediate": output failures as soon as they happen -# * "final": output failures at the end of the test run -# * "immediate-final": output failures as soon as they happen and at the end of -# the test run; combination of "immediate" and "final" -# * "never": don't output failures at all -# -# For large test suites and CI it is generally useful to use "immediate-final". -# -# Can be overridden through the `--failure-output` option. -failure-output = "immediate" - -# "success-output" controls production of standard output and standard error on success. This should -# generally be set to "never". -success-output = "never" - -# Cancel the test run on the first failure. For CI runs, consider setting this -# to false. -fail-fast = true - -# Treat a test that takes longer than the configured 'period' as slow, and print a message. -# See for more information. -# -# Optional: specify the parameter 'terminate-after' with a non-zero integer, -# which will cause slow tests to be terminated after the specified number of -# periods have passed. -# Example: slow-timeout = { period = "60s", terminate-after = 2 } -slow-timeout = { period = "60s" } - -# Treat a test as leaky if after the process is shut down, standard output and standard error -# aren't closed within this duration. -# -# This usually happens in case of a test that creates a child process and lets it inherit those -# handles, but doesn't clean the child process up (especially when it fails). -# -# See for more information. -leak-timeout = "100ms" - -[profile.default.junit] -# Output a JUnit report into the given file inside 'store.dir/'. -# If unspecified, JUnit is not written out. - -path = "junit.xml" - -# The name of the top-level "report" element in JUnit report. If aggregating -# reports across different test runs, it may be useful to provide separate names -# for each report. -report-name = "substrate" - -# Whether standard output and standard error for passing tests should be stored in the JUnit report. -# Output is stored in the and elements of the element. -store-success-output = false - -# Whether standard output and standard error for failing tests should be stored in the JUnit report. -# Output is stored in the and elements of the element. -# -# Note that if a description can be extracted from the output, it is always stored in the -# element. -store-failure-output = true - -# This profile is activated if MIRI_SYSROOT is set. -[profile.default-miri] -# Miri tests take up a lot of memory, so only run 1 test at a time by default. -test-threads = 1 - -# Mutual exclusion of tests with `cargo build` invocation as a lock to avoid multiple -# simultaneous invocations clobbering each other. -[test-groups] -serial-integration = { max-threads = 1 } - -# Running UI tests sequentially -# More info can be found here: https://github.com/paritytech/ci_cd/issues/754 -[[profile.default.overrides]] -filter = 'test(/(^ui$|_ui|ui_)/)' -test-group = 'serial-integration' diff --git a/substrate/client/cli/src/params/import_params.rs b/substrate/client/cli/src/params/import_params.rs index add7cb4f8505..e4b8b9644feb 100644 --- a/substrate/client/cli/src/params/import_params.rs +++ b/substrate/client/cli/src/params/import_params.rs @@ -78,21 +78,13 @@ pub struct ImportParams { /// Specify the state cache size. /// /// Providing `0` will disable the cache. - #[arg(long, value_name = "Bytes", default_value_t = 67108864)] + #[arg(long, value_name = "Bytes", default_value_t = 1024 * 1024 * 1024)] pub trie_cache_size: usize, - - /// DEPRECATED: switch to `--trie-cache-size`. - #[arg(long)] - state_cache_size: Option, } impl ImportParams { /// Specify the trie cache maximum size. pub fn trie_cache_maximum_size(&self) -> Option { - if self.state_cache_size.is_some() { - eprintln!("`--state-cache-size` was deprecated. Please switch to `--trie-cache-size`."); - } - if self.trie_cache_size == 0 { None } else { diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index cec981c05602..092101945107 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -1486,6 +1486,7 @@ impl Backend { .map(|(n, _)| n) .unwrap_or(Zero::zero()); let existing_header = number <= highest_leaf && self.blockchain.header(hash)?.is_some(); + let existing_body = pending_block.body.is_some(); // blocks are keyed by number + hash. let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?; @@ -1677,6 +1678,23 @@ impl Backend { children, ); } + } + + let should_check_block_gap = !existing_header || !existing_body; + + if should_check_block_gap { + let insert_new_gap = + |transaction: &mut Transaction, + new_gap: BlockGap>, + block_gap: &mut Option>>| { + transaction.set(columns::META, meta_keys::BLOCK_GAP, &new_gap.encode()); + transaction.set( + columns::META, + meta_keys::BLOCK_GAP_VERSION, + &BLOCK_GAP_CURRENT_VERSION.encode(), + ); + block_gap.replace(new_gap); + }; if let Some(mut gap) = block_gap { match gap.gap_type { @@ -1695,43 +1713,65 @@ impl Backend { block_gap = None; debug!(target: "db", "Removed block gap."); } else { - block_gap = Some(gap); + insert_new_gap(&mut transaction, gap, &mut block_gap); debug!(target: "db", "Update block gap. {block_gap:?}"); - transaction.set( - columns::META, - meta_keys::BLOCK_GAP, - &gap.encode(), - ); - transaction.set( - columns::META, - meta_keys::BLOCK_GAP_VERSION, - &BLOCK_GAP_CURRENT_VERSION.encode(), - ); } block_gap_updated = true; }, BlockGapType::MissingBody => { - unreachable!("Unsupported block gap. TODO: https://github.com/paritytech/polkadot-sdk/issues/5406") + // Gap increased when syncing the header chain during fast sync. + if number == gap.end + One::one() && !existing_body { + gap.end += One::one(); + utils::insert_number_to_key_mapping( + &mut transaction, + columns::KEY_LOOKUP, + number, + hash, + )?; + insert_new_gap(&mut transaction, gap, &mut block_gap); + debug!(target: "db", "Update block gap. {block_gap:?}"); + block_gap_updated = true; + // Gap decreased when downloading the full blocks. + } else if number == gap.start && existing_body { + gap.start += One::one(); + if gap.start > gap.end { + transaction.remove(columns::META, meta_keys::BLOCK_GAP); + transaction.remove(columns::META, meta_keys::BLOCK_GAP_VERSION); + block_gap = None; + debug!(target: "db", "Removed block gap."); + } else { + insert_new_gap(&mut transaction, gap, &mut block_gap); + debug!(target: "db", "Update block gap. {block_gap:?}"); + } + block_gap_updated = true; + } }, } - } else if operation.create_gap && - number > best_num + One::one() && - self.blockchain.header(parent_hash)?.is_none() - { - let gap = BlockGap { - start: best_num + One::one(), - end: number - One::one(), - gap_type: BlockGapType::MissingHeaderAndBody, - }; - transaction.set(columns::META, meta_keys::BLOCK_GAP, &gap.encode()); - transaction.set( - columns::META, - meta_keys::BLOCK_GAP_VERSION, - &BLOCK_GAP_CURRENT_VERSION.encode(), - ); - block_gap = Some(gap); - block_gap_updated = true; - debug!(target: "db", "Detected block gap {block_gap:?}"); + } else if operation.create_gap { + if number > best_num + One::one() && + self.blockchain.header(parent_hash)?.is_none() + { + let gap = BlockGap { + start: best_num + One::one(), + end: number - One::one(), + gap_type: BlockGapType::MissingHeaderAndBody, + }; + insert_new_gap(&mut transaction, gap, &mut block_gap); + block_gap_updated = true; + debug!(target: "db", "Detected block gap (warp sync) {block_gap:?}"); + } else if number == best_num + One::one() && + self.blockchain.header(parent_hash)?.is_some() && + !existing_body + { + let gap = BlockGap { + start: number, + end: number, + gap_type: BlockGapType::MissingBody, + }; + insert_new_gap(&mut transaction, gap, &mut block_gap); + block_gap_updated = true; + debug!(target: "db", "Detected block gap (fast sync) {block_gap:?}"); + } } } diff --git a/substrate/client/network-gossip/src/bridge.rs b/substrate/client/network-gossip/src/bridge.rs index a4bd922a76d5..2daf1e49ee4b 100644 --- a/substrate/client/network-gossip/src/bridge.rs +++ b/substrate/client/network-gossip/src/bridge.rs @@ -220,18 +220,16 @@ impl Future for GossipEngine { }, NotificationEvent::NotificationStreamOpened { peer, handshake, .. - } => { - let Some(role) = this.network.peer_role(peer, handshake) else { + } => + if let Some(role) = this.network.peer_role(peer, handshake) { + this.state_machine.new_peer( + &mut this.notification_service, + peer, + role, + ); + } else { log::debug!(target: "gossip", "role for {peer} couldn't be determined"); - continue - }; - - this.state_machine.new_peer( - &mut this.notification_service, - peer, - role, - ); - }, + }, NotificationEvent::NotificationStreamClosed { peer } => { this.state_machine .peer_disconnected(&mut this.notification_service, peer); diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index cc2089d1974c..349c41ee1f4a 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -545,7 +545,14 @@ where self.process_service_command(command), notification_event = self.notification_service.next_event() => match notification_event { Some(event) => self.process_notification_event(event), - None => return, + None => { + error!( + target: LOG_TARGET, + "Terminating `SyncingEngine` because `NotificationService` has terminated.", + ); + + return; + } }, response_event = self.pending_responses.select_next_some() => self.process_response_event(response_event), diff --git a/substrate/client/network/sync/src/strategy/state_sync.rs b/substrate/client/network/sync/src/strategy/state_sync.rs index 7a0cc1191609..47d859a1b7c6 100644 --- a/substrate/client/network/sync/src/strategy/state_sync.rs +++ b/substrate/client/network/sync/src/strategy/state_sync.rs @@ -89,22 +89,62 @@ pub enum ImportResult { BadResponse, } -/// State sync state machine. Accumulates partial state data until it -/// is ready to be imported. -pub struct StateSync { - target_block: B::Hash, +struct StateSyncMetadata { + last_key: SmallVec<[Vec; 2]>, target_header: B::Header, - target_root: B::Hash, target_body: Option>, target_justifications: Option, - last_key: SmallVec<[Vec; 2]>, - state: HashMap, (Vec<(Vec, Vec)>, Vec>)>, complete: bool, - client: Arc, imported_bytes: u64, skip_proof: bool, } +impl StateSyncMetadata { + fn target_hash(&self) -> B::Hash { + self.target_header.hash() + } + + /// Returns target block number. + fn target_number(&self) -> NumberFor { + *self.target_header.number() + } + + fn target_root(&self) -> B::Hash { + *self.target_header.state_root() + } + + fn next_request(&self) -> StateRequest { + StateRequest { + block: self.target_hash().encode(), + start: self.last_key.clone().into_vec(), + no_proof: self.skip_proof, + } + } + + fn progress(&self) -> StateSyncProgress { + let cursor = *self.last_key.get(0).and_then(|last| last.get(0)).unwrap_or(&0u8); + let percent_done = cursor as u32 * 100 / 256; + StateSyncProgress { + percentage: percent_done, + size: self.imported_bytes, + phase: if self.complete { + StateSyncPhase::ImportingState + } else { + StateSyncPhase::DownloadingState + }, + } + } +} + +/// State sync state machine. +/// +/// Accumulates partial state data until it is ready to be imported. +pub struct StateSync { + metadata: StateSyncMetadata, + state: HashMap, (Vec<(Vec, Vec)>, Vec>)>, + client: Arc, +} + impl StateSync where B: BlockT, @@ -120,16 +160,16 @@ where ) -> Self { Self { client, - target_block: target_header.hash(), - target_root: *target_header.state_root(), - target_header, - target_body, - target_justifications, - last_key: SmallVec::default(), + metadata: StateSyncMetadata { + last_key: SmallVec::default(), + target_header, + target_body, + target_justifications, + complete: false, + imported_bytes: 0, + skip_proof, + }, state: HashMap::default(), - complete: false, - imported_bytes: 0, - skip_proof, } } @@ -155,7 +195,7 @@ where if is_top && well_known_keys::is_child_storage_key(key.as_slice()) { child_storage_roots.push((value, key)); } else { - self.imported_bytes += key.len() as u64; + self.metadata.imported_bytes += key.len() as u64; entry.0.push((key, value)); } } @@ -177,11 +217,11 @@ where // the parent cursor stays valid. // Empty parent trie content only happens when all the response content // is part of a single child trie. - if self.last_key.len() == 2 && response.entries[0].entries.is_empty() { + if self.metadata.last_key.len() == 2 && response.entries[0].entries.is_empty() { // Do not remove the parent trie position. - self.last_key.pop(); + self.metadata.last_key.pop(); } else { - self.last_key.clear(); + self.metadata.last_key.clear(); } for state in response.entries { debug!( @@ -193,7 +233,7 @@ where if !state.complete { if let Some(e) = state.entries.last() { - self.last_key.push(e.key.clone()); + self.metadata.last_key.push(e.key.clone()); } complete = false; } @@ -219,11 +259,11 @@ where debug!(target: LOG_TARGET, "Bad state response"); return ImportResult::BadResponse } - if !self.skip_proof && response.proof.is_empty() { + if !self.metadata.skip_proof && response.proof.is_empty() { debug!(target: LOG_TARGET, "Missing proof"); return ImportResult::BadResponse } - let complete = if !self.skip_proof { + let complete = if !self.metadata.skip_proof { debug!(target: LOG_TARGET, "Importing state from {} trie nodes", response.proof.len()); let proof_size = response.proof.len() as u64; let proof = match CompactProof::decode(&mut response.proof.as_ref()) { @@ -234,9 +274,9 @@ where }, }; let (values, completed) = match self.client.verify_range_proof( - self.target_root, + self.metadata.target_root(), proof, - self.last_key.as_slice(), + self.metadata.last_key.as_slice(), ) { Err(e) => { debug!( @@ -251,27 +291,25 @@ where debug!(target: LOG_TARGET, "Imported with {} keys", values.len()); let complete = completed == 0; - if !complete && !values.update_last_key(completed, &mut self.last_key) { + if !complete && !values.update_last_key(completed, &mut self.metadata.last_key) { debug!(target: LOG_TARGET, "Error updating key cursor, depth: {}", completed); }; self.process_state_verified(values); - self.imported_bytes += proof_size; + self.metadata.imported_bytes += proof_size; complete } else { self.process_state_unverified(response) }; if complete { - self.complete = true; + self.metadata.complete = true; + let target_hash = self.metadata.target_hash(); ImportResult::Import( - self.target_block, - self.target_header.clone(), - ImportedState { - block: self.target_block, - state: std::mem::take(&mut self.state).into(), - }, - self.target_body.clone(), - self.target_justifications.clone(), + target_hash, + self.metadata.target_header.clone(), + ImportedState { block: target_hash, state: std::mem::take(&mut self.state).into() }, + self.metadata.target_body.clone(), + self.metadata.target_justifications.clone(), ) } else { ImportResult::Continue @@ -280,40 +318,26 @@ where /// Produce next state request. fn next_request(&self) -> StateRequest { - StateRequest { - block: self.target_block.encode(), - start: self.last_key.clone().into_vec(), - no_proof: self.skip_proof, - } + self.metadata.next_request() } /// Check if the state is complete. fn is_complete(&self) -> bool { - self.complete + self.metadata.complete } /// Returns target block number. fn target_number(&self) -> NumberFor { - *self.target_header.number() + self.metadata.target_number() } /// Returns target block hash. fn target_hash(&self) -> B::Hash { - self.target_block + self.metadata.target_hash() } /// Returns state sync estimated progress. fn progress(&self) -> StateSyncProgress { - let cursor = *self.last_key.get(0).and_then(|last| last.get(0)).unwrap_or(&0u8); - let percent_done = cursor as u32 * 100 / 256; - StateSyncProgress { - percentage: percent_done, - size: self.imported_bytes, - phase: if self.complete { - StateSyncPhase::ImportingState - } else { - StateSyncPhase::DownloadingState - }, - } + self.metadata.progress() } } diff --git a/substrate/client/tracing/src/logging/directives.rs b/substrate/client/tracing/src/logging/directives.rs index a99e9c4c8909..811511bb20f5 100644 --- a/substrate/client/tracing/src/logging/directives.rs +++ b/substrate/client/tracing/src/logging/directives.rs @@ -40,7 +40,7 @@ pub(crate) fn add_default_directives(directives: &str) { add_directives(directives); } -/// Add directives to current directives +/// Add directives to current directives. pub fn add_directives(directives: &str) { CURRENT_DIRECTIVES .get_or_init(|| Mutex::new(Vec::new())) @@ -48,6 +48,11 @@ pub fn add_directives(directives: &str) { .push(directives.to_owned()); } +/// Returns the current directives. +pub fn get_directives() -> Vec { + CURRENT_DIRECTIVES.get_or_init(|| Mutex::new(Vec::new())).lock().clone() +} + /// Parse `Directive` and add to default directives if successful. /// /// Ensures the supplied directive will be restored when resetting the log filter. diff --git a/substrate/frame/bounties/src/benchmarking.rs b/substrate/frame/bounties/src/benchmarking.rs index 8ad85d5420ed..1e931958898d 100644 --- a/substrate/frame/bounties/src/benchmarking.rs +++ b/substrate/frame/bounties/src/benchmarking.rs @@ -25,7 +25,7 @@ use alloc::{vec, vec::Vec}; use frame_benchmarking::v1::{ account, benchmarks_instance_pallet, whitelisted_caller, BenchmarkError, }; -use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; +use frame_system::{pallet_prelude::BlockNumberFor as SystemBlockNumberFor, RawOrigin}; use sp_runtime::traits::{BlockNumberProvider, Bounded}; use crate::Pallet as Bounties; @@ -33,7 +33,7 @@ use pallet_treasury::Pallet as Treasury; const SEED: u32 = 0; -fn set_block_number, I: 'static>(n: BlockNumberFor) { +fn set_block_number, I: 'static>(n: BlockNumberFor) { >::BlockNumberProvider::set_block_number(n); } @@ -132,7 +132,7 @@ benchmarks_instance_pallet! { Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; let approve_origin = T::SpendOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - Treasury::::on_initialize(BlockNumberFor::::zero()); + Treasury::::on_initialize(SystemBlockNumberFor::::zero()); }: _(approve_origin, bounty_id, curator_lookup, fee) verify { assert_last_event::( diff --git a/substrate/frame/bounties/src/lib.rs b/substrate/frame/bounties/src/lib.rs index 3ed408a19120..729c76b5cc75 100644 --- a/substrate/frame/bounties/src/lib.rs +++ b/substrate/frame/bounties/src/lib.rs @@ -105,7 +105,9 @@ use sp_runtime::{ use frame_support::{dispatch::DispatchResultWithPostInfo, traits::EnsureOrigin}; use frame_support::pallet_prelude::*; -use frame_system::pallet_prelude::*; +use frame_system::pallet_prelude::{ + ensure_signed, BlockNumberFor as SystemBlockNumberFor, OriginFor, +}; use scale_info::TypeInfo; pub use weights::WeightInfo; @@ -120,6 +122,9 @@ pub type BountyIndex = u32; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; +type BlockNumberFor = + <>::BlockNumberProvider as BlockNumberProvider>::BlockNumber; + /// A bounty proposal. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct Bounty { @@ -213,11 +218,11 @@ pub mod pallet { /// The delay period for which a bounty beneficiary need to wait before claim the payout. #[pallet::constant] - type BountyDepositPayoutDelay: Get>; + type BountyDepositPayoutDelay: Get>; /// Bounty duration in blocks. #[pallet::constant] - type BountyUpdatePeriod: Get>; + type BountyUpdatePeriod: Get>; /// The curator deposit is calculated as a percentage of the curator fee. /// @@ -326,7 +331,7 @@ pub mod pallet { _, Twox64Concat, BountyIndex, - Bounty, BlockNumberFor>, + Bounty, BlockNumberFor>, >; /// The description of each bounty. @@ -876,9 +881,9 @@ pub mod pallet { } #[pallet::hooks] - impl, I: 'static> Hooks> for Pallet { + impl, I: 'static> Hooks> for Pallet { #[cfg(feature = "try-runtime")] - fn try_state(_n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + fn try_state(_n: SystemBlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { Self::do_try_state() } } @@ -928,7 +933,7 @@ impl, I: 'static> Pallet { /// Get the block number used in the treasury pallet. /// /// It may be configured to use the relay chain block number on a parachain. - pub fn treasury_block_number() -> BlockNumberFor { + pub fn treasury_block_number() -> BlockNumberFor { >::BlockNumberProvider::current_block_number() } diff --git a/substrate/frame/child-bounties/src/benchmarking.rs b/substrate/frame/child-bounties/src/benchmarking.rs index 4b2d62cd920e..2864f3ab5048 100644 --- a/substrate/frame/child-bounties/src/benchmarking.rs +++ b/substrate/frame/child-bounties/src/benchmarking.rs @@ -22,7 +22,7 @@ use alloc::vec; use frame_benchmarking::{v2::*, BenchmarkError}; use frame_support::ensure; -use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; +use frame_system::RawOrigin; use pallet_bounties::Pallet as Bounties; use pallet_treasury::Pallet as Treasury; use sp_runtime::traits::BlockNumberProvider; diff --git a/substrate/frame/child-bounties/src/lib.rs b/substrate/frame/child-bounties/src/lib.rs index ea1d9547d465..9fca26510989 100644 --- a/substrate/frame/child-bounties/src/lib.rs +++ b/substrate/frame/child-bounties/src/lib.rs @@ -79,7 +79,9 @@ use sp_runtime::{ }; use frame_support::pallet_prelude::*; -use frame_system::pallet_prelude::*; +use frame_system::pallet_prelude::{ + ensure_signed, BlockNumberFor as SystemBlockNumberFor, OriginFor, +}; use pallet_bounties::BountyStatus; use scale_info::TypeInfo; pub use weights::WeightInfo; @@ -90,6 +92,8 @@ type BalanceOf = pallet_treasury::BalanceOf; type BountiesError = pallet_bounties::Error; type BountyIndex = pallet_bounties::BountyIndex; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; +type BlockNumberFor = + <::BlockNumberProvider as BlockNumberProvider>::BlockNumber; /// A child bounty proposal. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] @@ -810,7 +814,7 @@ pub mod pallet { } #[pallet::hooks] - impl Hooks> for Pallet { + impl Hooks> for Pallet { fn integrity_test() { let parent_bounty_id: BountyIndex = 1; let child_bounty_id: BountyIndex = 2; diff --git a/substrate/frame/conviction-voting/src/lib.rs b/substrate/frame/conviction-voting/src/lib.rs index 85da1aed3c27..31bd6b85ec86 100644 --- a/substrate/frame/conviction-voting/src/lib.rs +++ b/substrate/frame/conviction-voting/src/lib.rs @@ -171,10 +171,12 @@ pub mod pallet { Delegated(T::AccountId, T::AccountId), /// An \[account\] has cancelled a previous delegation operation. Undelegated(T::AccountId), - /// An account that has voted + /// An account has voted Voted { who: T::AccountId, vote: AccountVote> }, - /// A vote that been removed + /// A vote has been removed VoteRemoved { who: T::AccountId, vote: AccountVote> }, + /// The lockup period of a conviction vote expired, and the funds have been unlocked. + VoteUnlocked { who: T::AccountId, class: ClassOf }, } #[pallet::error] @@ -315,6 +317,7 @@ pub mod pallet { ensure_signed(origin)?; let target = T::Lookup::lookup(target)?; Self::update_lock(&class, &target); + Self::deposit_event(Event::VoteUnlocked { who: target, class }); Ok(()) } diff --git a/substrate/frame/conviction-voting/src/tests.rs b/substrate/frame/conviction-voting/src/tests.rs index 37cdd7a5b338..dd9ee33ee183 100644 --- a/substrate/frame/conviction-voting/src/tests.rs +++ b/substrate/frame/conviction-voting/src/tests.rs @@ -238,27 +238,52 @@ fn basic_stuff() { fn basic_voting_works() { new_test_ext().execute_with(|| { assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(2, 5))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 1, + vote: aye(2, 5), + })); assert_eq!(tally(3), Tally::from_parts(10, 0, 2)); assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(2, 5))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 1, + vote: nay(2, 5), + })); assert_eq!(tally(3), Tally::from_parts(0, 10, 0)); assert_eq!(Balances::usable_balance(1), 8); assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(5, 1))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 1, + vote: aye(5, 1), + })); assert_eq!(tally(3), Tally::from_parts(5, 0, 5)); assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(5, 1))); assert_eq!(tally(3), Tally::from_parts(0, 5, 0)); assert_eq!(Balances::usable_balance(1), 5); assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(10, 0))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 1, + vote: aye(10, 0), + })); assert_eq!(tally(3), Tally::from_parts(1, 0, 10)); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(10, 0))); assert_eq!(tally(3), Tally::from_parts(0, 1, 0)); assert_eq!(Balances::usable_balance(1), 0); assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), None, 3)); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteRemoved { + who: 1, + vote: nay(10, 0), + })); assert_eq!(tally(3), Tally::from_parts(0, 0, 0)); assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), class(3), 1)); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteUnlocked { + who: 1, + class: class(3), + })); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -267,15 +292,32 @@ fn basic_voting_works() { fn split_voting_works() { new_test_ext().execute_with(|| { assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, split(10, 0))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 1, + vote: split(10, 0), + })); assert_eq!(tally(3), Tally::from_parts(1, 0, 10)); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, split(5, 5))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 1, + vote: split(5, 5), + })); assert_eq!(tally(3), Tally::from_parts(0, 0, 5)); assert_eq!(Balances::usable_balance(1), 0); assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), None, 3)); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteRemoved { + who: 1, + vote: split(5, 5), + })); assert_eq!(tally(3), Tally::from_parts(0, 0, 0)); assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), class(3), 1)); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteUnlocked { + who: 1, + class: class(3), + })); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -284,25 +326,48 @@ fn split_voting_works() { fn abstain_voting_works() { new_test_ext().execute_with(|| { assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, split_abstain(0, 0, 10))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 1, + vote: split_abstain(0, 0, 10), + })); assert_eq!(tally(3), Tally::from_parts(0, 0, 10)); - assert_ok!(Voting::vote(RuntimeOrigin::signed(2), 3, split_abstain(0, 0, 20))); - assert_eq!(tally(3), Tally::from_parts(0, 0, 30)); - assert_ok!(Voting::vote(RuntimeOrigin::signed(2), 3, split_abstain(10, 0, 10))); - assert_eq!(tally(3), Tally::from_parts(1, 0, 30)); + + assert_ok!(Voting::vote(RuntimeOrigin::signed(6), 3, split_abstain(10, 0, 20))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 6, + vote: split_abstain(10, 0, 20), + })); + assert_eq!(tally(3), Tally::from_parts(1, 0, 40)); + + assert_ok!(Voting::vote(RuntimeOrigin::signed(6), 3, split_abstain(0, 0, 40))); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::Voted { + who: 6, + vote: split_abstain(0, 0, 40), + })); + + assert_eq!(tally(3), Tally::from_parts(0, 0, 50)); assert_eq!(Balances::usable_balance(1), 0); - assert_eq!(Balances::usable_balance(2), 0); + assert_eq!(Balances::usable_balance(6), 20); assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), None, 3)); - assert_eq!(tally(3), Tally::from_parts(1, 0, 20)); - - assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(2), None, 3)); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteRemoved { + who: 1, + vote: split_abstain(0, 0, 10), + })); + assert_eq!(tally(3), Tally::from_parts(0, 0, 40)); + + assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(6), Some(class(3)), 3)); + System::assert_last_event(tests::RuntimeEvent::Voting(Event::VoteRemoved { + who: 6, + vote: split_abstain(0, 0, 40), + })); assert_eq!(tally(3), Tally::from_parts(0, 0, 0)); assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), class(3), 1)); assert_eq!(Balances::usable_balance(1), 10); - assert_ok!(Voting::unlock(RuntimeOrigin::signed(2), class(3), 2)); - assert_eq!(Balances::usable_balance(2), 20); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(6), class(3), 6)); + assert_eq!(Balances::usable_balance(6), 60); }); } diff --git a/substrate/frame/conviction-voting/src/types.rs b/substrate/frame/conviction-voting/src/types.rs index d6bbb678a14b..aa7dd578fbad 100644 --- a/substrate/frame/conviction-voting/src/types.rs +++ b/substrate/frame/conviction-voting/src/types.rs @@ -117,14 +117,9 @@ impl< pub fn from_parts( ayes_with_conviction: Votes, nays_with_conviction: Votes, - ayes: Votes, + support: Votes, ) -> Self { - Self { - ayes: ayes_with_conviction, - nays: nays_with_conviction, - support: ayes, - dummy: PhantomData, - } + Self { ayes: ayes_with_conviction, nays: nays_with_conviction, support, dummy: PhantomData } } /// Add an account's vote into the tally. diff --git a/substrate/frame/nomination-pools/runtime-api/src/lib.rs b/substrate/frame/nomination-pools/runtime-api/src/lib.rs index 4138dd22d898..644ee07fd634 100644 --- a/substrate/frame/nomination-pools/runtime-api/src/lib.rs +++ b/substrate/frame/nomination-pools/runtime-api/src/lib.rs @@ -43,6 +43,9 @@ sp_api::decl_runtime_apis! { fn pool_pending_slash(pool_id: PoolId) -> Balance; /// Returns the pending slash for a given pool member. + /// + /// If pending slash of the member exceeds `ExistentialDeposit`, it can be reported on + /// chain. fn member_pending_slash(member: AccountId) -> Balance; /// Returns true if the pool with `pool_id` needs migration. diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index 201b0af1d608..dc82bf3a37c6 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -1944,6 +1944,8 @@ pub mod pallet { NothingToAdjust, /// No slash pending that can be applied to the member. NothingToSlash, + /// The slash amount is too low to be applied. + SlashTooLow, /// The pool or member delegation has already migrated to delegate stake. AlreadyMigrated, /// The pool or member delegation has not migrated yet to delegate stake. @@ -2300,7 +2302,7 @@ pub mod pallet { let slash_weight = // apply slash if any before withdraw. - match Self::do_apply_slash(&member_account, None) { + match Self::do_apply_slash(&member_account, None, false) { Ok(_) => T::WeightInfo::apply_slash(), Err(e) => { let no_pending_slash: DispatchResult = Err(Error::::NothingToSlash.into()); @@ -2974,8 +2976,10 @@ pub mod pallet { /// Fails unless [`crate::pallet::Config::StakeAdapter`] is of strategy type: /// [`adapter::StakeStrategyType::Delegate`]. /// - /// This call can be dispatched permissionlessly (i.e. by any account). If the member has - /// slash to be applied, caller may be rewarded with the part of the slash. + /// The pending slash amount of the member must be equal or more than `ExistentialDeposit`. + /// This call can be dispatched permissionlessly (i.e. by any account). If the execution + /// is successful, fee is refunded and caller may be rewarded with a part of the slash + /// based on the [`crate::pallet::Config::StakeAdapter`] configuration. #[pallet::call_index(23)] #[pallet::weight(T::WeightInfo::apply_slash())] pub fn apply_slash( @@ -2989,7 +2993,7 @@ pub mod pallet { let who = ensure_signed(origin)?; let member_account = T::Lookup::lookup(member_account)?; - Self::do_apply_slash(&member_account, Some(who))?; + Self::do_apply_slash(&member_account, Some(who), true)?; // If successful, refund the fees. Ok(Pays::No.into()) @@ -3574,15 +3578,21 @@ impl Pallet { fn do_apply_slash( member_account: &T::AccountId, reporter: Option, + enforce_min_slash: bool, ) -> DispatchResult { let member = PoolMembers::::get(member_account).ok_or(Error::::PoolMemberNotFound)?; let pending_slash = Self::member_pending_slash(Member::from(member_account.clone()), member.clone())?; - // if nothing to slash, return error. + // ensure there is something to slash. ensure!(!pending_slash.is_zero(), Error::::NothingToSlash); + if enforce_min_slash { + // ensure slashed amount is at least the minimum balance. + ensure!(pending_slash >= T::Currency::minimum_balance(), Error::::SlashTooLow); + } + T::StakeAdapter::member_slash( Member::from(member_account.clone()), Pool::from(Pallet::::generate_bonded_account(member.pool_id)), @@ -3946,6 +3956,9 @@ impl Pallet { /// Returns the unapplied slash of a member. /// /// Pending slash is only applicable with [`adapter::DelegateStake`] strategy. + /// + /// If pending slash of the member exceeds `ExistentialDeposit`, it can be reported on + /// chain via [`Call::apply_slash`]. pub fn api_member_pending_slash(who: T::AccountId) -> BalanceOf { PoolMembers::::get(who.clone()) .map(|pool_member| { diff --git a/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml b/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml index 7940caaff775..70e1591409b8 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml +++ b/substrate/frame/nomination-pools/test-delegate-stake/Cargo.toml @@ -26,7 +26,7 @@ sp-staking = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } -frame-support = { workspace = true, default-features = true } +frame-support = { features = ["experimental"], workspace = true, default-features = true } frame-election-provider-support = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs index 40025cdbb3cd..cc6335959ab7 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs +++ b/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs @@ -20,7 +20,7 @@ mod mock; use frame_support::{ - assert_noop, assert_ok, + assert_noop, assert_ok, hypothetically, traits::{fungible::InspectHold, Currency}, }; use mock::*; @@ -537,10 +537,10 @@ fn pool_slash_proportional() { // a typical example where 3 pool members unbond in era 99, 100, and 101, and a slash that // happened in era 100 should only affect the latter two. new_test_ext().execute_with(|| { - ExistentialDeposit::set(1); + ExistentialDeposit::set(2); BondingDuration::set(28); - assert_eq!(Balances::minimum_balance(), 1); - assert_eq!(CurrentEra::::get(), None); + assert_eq!(Balances::minimum_balance(), 2); + assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); @@ -670,6 +670,34 @@ fn pool_slash_proportional() { // no pending slash yet. assert_eq!(Pools::api_pool_pending_slash(1), 0); + // and therefore applying slash fails + assert_noop!( + Pools::apply_slash(RuntimeOrigin::signed(10), 21), + PoolsError::::NothingToSlash + ); + + hypothetically!({ + // a very small amount is slashed + pallet_staking::slashing::do_slash::( + &POOL1_BONDED, + 3, + &mut Default::default(), + &mut Default::default(), + 100, + ); + + // ensure correct amount is pending to be slashed + assert_eq!(Pools::api_pool_pending_slash(1), 3); + + // 21 has pending slash lower than ED (2) + assert_eq!(Pools::api_member_pending_slash(21), 1); + + // slash fails as minimum pending slash amount not met. + assert_noop!( + Pools::apply_slash(RuntimeOrigin::signed(10), 21), + PoolsError::::SlashTooLow + ); + }); pallet_staking::slashing::do_slash::( &POOL1_BONDED, @@ -909,6 +937,7 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { ); }); } + #[test] fn pool_migration_e2e() { new_test_ext().execute_with(|| { diff --git a/substrate/frame/revive/rpc/src/tests.rs b/substrate/frame/revive/rpc/src/tests.rs index eb23bd7583a0..7734c8c57209 100644 --- a/substrate/frame/revive/rpc/src/tests.rs +++ b/substrate/frame/revive/rpc/src/tests.rs @@ -32,9 +32,9 @@ use static_init::dynamic; use std::thread; use substrate_cli_test_utils::*; -/// Create a websocket client with a 30s timeout. +/// Create a websocket client with a 120s timeout. async fn ws_client_with_retry(url: &str) -> WsClient { - let timeout = tokio::time::Duration::from_secs(30); + let timeout = tokio::time::Duration::from_secs(120); tokio::time::timeout(timeout, async { loop { if let Ok(client) = WsClientBuilder::default().build(url).await { diff --git a/substrate/frame/scheduler/src/benchmarking.rs b/substrate/frame/scheduler/src/benchmarking.rs index d0a14fc73d64..ff40e8ef8abf 100644 --- a/substrate/frame/scheduler/src/benchmarking.rs +++ b/substrate/frame/scheduler/src/benchmarking.rs @@ -17,25 +17,23 @@ //! Scheduler pallet benchmarking. -use super::*; use alloc::vec; -use frame_benchmarking::v1::{account, benchmarks, BenchmarkError}; +use frame_benchmarking::v2::*; use frame_support::{ ensure, traits::{schedule::Priority, BoundedInline}, weights::WeightMeter, }; -use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; +use frame_system::{EventRecord, RawOrigin}; -use crate::Pallet as Scheduler; -use frame_system::{Call as SystemCall, EventRecord}; +use crate::*; -const SEED: u32 = 0; +type SystemCall = frame_system::Call; +type SystemOrigin = ::RuntimeOrigin; +const SEED: u32 = 0; const BLOCK_NUMBER: u32 = 2; -type SystemOrigin = ::RuntimeOrigin; - fn assert_last_event(generic_event: ::RuntimeEvent) { let events = frame_system::Pallet::::events(); let system_event: ::RuntimeEvent = generic_event.into(); @@ -61,7 +59,7 @@ fn fill_schedule( let call = make_call::(None); let period = Some(((i + 100).into(), 100)); let name = u32_to_name(i); - Scheduler::::do_schedule_named(name, t, period, 0, origin.clone(), call)?; + Pallet::::do_schedule_named(name, t, period, 0, origin.clone(), call)?; } ensure!(Agenda::::get(when).len() == n as usize, "didn't fill schedule"); Ok(()) @@ -134,107 +132,160 @@ fn make_origin(signed: bool) -> ::PalletsOrigin { } } -benchmarks! { +#[benchmarks] +mod benchmarks { + use super::*; + // `service_agendas` when no work is done. - service_agendas_base { - let now = BlockNumberFor::::from(BLOCK_NUMBER); + #[benchmark] + fn service_agendas_base() { + let now = BLOCK_NUMBER.into(); IncompleteSince::::put(now - One::one()); - }: { - Scheduler::::service_agendas(&mut WeightMeter::new(), now, 0); - } verify { + + #[block] + { + Pallet::::service_agendas(&mut WeightMeter::new(), now, 0); + } + assert_eq!(IncompleteSince::::get(), Some(now - One::one())); } // `service_agenda` when no work is done. - service_agenda_base { + #[benchmark] + fn service_agenda_base( + s: Linear<0, { T::MaxScheduledPerBlock::get() }>, + ) -> Result<(), BenchmarkError> { let now = BLOCK_NUMBER.into(); - let s in 0 .. T::MaxScheduledPerBlock::get(); fill_schedule::(now, s)?; let mut executed = 0; - }: { - Scheduler::::service_agenda(&mut WeightMeter::new(), &mut executed, now, now, 0); - } verify { + + #[block] + { + Pallet::::service_agenda(&mut WeightMeter::new(), &mut executed, now, now, 0); + } + assert_eq!(executed, 0); + + Ok(()) } // `service_task` when the task is a non-periodic, non-named, non-fetched call which is not // dispatched (e.g. due to being overweight). - service_task_base { + #[benchmark] + fn service_task_base() { let now = BLOCK_NUMBER.into(); let task = make_task::(false, false, false, None, 0); // prevent any tasks from actually being executed as we only want the surrounding weight. let mut counter = WeightMeter::with_limit(Weight::zero()); - }: { - let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); - } verify { - //assert_eq!(result, Ok(())); + let _result; + + #[block] + { + _result = Pallet::::service_task(&mut counter, now, now, 0, true, task); + } + + // assert!(_result.is_ok()); } // `service_task` when the task is a non-periodic, non-named, fetched call (with a known // preimage length) and which is not dispatched (e.g. due to being overweight). - #[pov_mode = MaxEncodedLen { + #[benchmark(pov_mode = MaxEncodedLen { // Use measured PoV size for the Preimages since we pass in a length witness. Preimage::PreimageFor: Measured - }] - service_task_fetched { - let s in (BoundedInline::bound() as u32) .. (T::Preimages::MAX_LENGTH as u32); + })] + fn service_task_fetched( + s: Linear<{ BoundedInline::bound() as u32 }, { T::Preimages::MAX_LENGTH as u32 }>, + ) { let now = BLOCK_NUMBER.into(); let task = make_task::(false, false, false, Some(s), 0); // prevent any tasks from actually being executed as we only want the surrounding weight. let mut counter = WeightMeter::with_limit(Weight::zero()); - }: { - let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); - } verify { + let _result; + + #[block] + { + _result = Pallet::::service_task(&mut counter, now, now, 0, true, task); + } + + // assert!(result.is_ok()); } // `service_task` when the task is a non-periodic, named, non-fetched call which is not // dispatched (e.g. due to being overweight). - service_task_named { + #[benchmark] + fn service_task_named() { let now = BLOCK_NUMBER.into(); let task = make_task::(false, true, false, None, 0); // prevent any tasks from actually being executed as we only want the surrounding weight. let mut counter = WeightMeter::with_limit(Weight::zero()); - }: { - let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); - } verify { + let _result; + + #[block] + { + _result = Pallet::::service_task(&mut counter, now, now, 0, true, task); + } + + // assert!(result.is_ok()); } // `service_task` when the task is a periodic, non-named, non-fetched call which is not // dispatched (e.g. due to being overweight). - service_task_periodic { + #[benchmark] + fn service_task_periodic() { let now = BLOCK_NUMBER.into(); let task = make_task::(true, false, false, None, 0); // prevent any tasks from actually being executed as we only want the surrounding weight. let mut counter = WeightMeter::with_limit(Weight::zero()); - }: { - let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); - } verify { + let _result; + + #[block] + { + _result = Pallet::::service_task(&mut counter, now, now, 0, true, task); + } + + // assert!(result.is_ok()); } // `execute_dispatch` when the origin is `Signed`, not counting the dispatchable's weight. - execute_dispatch_signed { + #[benchmark] + fn execute_dispatch_signed() -> Result<(), BenchmarkError> { let mut counter = WeightMeter::new(); let origin = make_origin::(true); - let call = T::Preimages::realize(&make_call::(None)).unwrap().0; - }: { - assert!(Scheduler::::execute_dispatch(&mut counter, origin, call).is_ok()); - } - verify { + let call = T::Preimages::realize(&make_call::(None))?.0; + let result; + + #[block] + { + result = Pallet::::execute_dispatch(&mut counter, origin, call); + } + + assert!(result.is_ok()); + + Ok(()) } // `execute_dispatch` when the origin is not `Signed`, not counting the dispatchable's weight. - execute_dispatch_unsigned { + #[benchmark] + fn execute_dispatch_unsigned() -> Result<(), BenchmarkError> { let mut counter = WeightMeter::new(); let origin = make_origin::(false); - let call = T::Preimages::realize(&make_call::(None)).unwrap().0; - }: { - assert!(Scheduler::::execute_dispatch(&mut counter, origin, call).is_ok()); - } - verify { + let call = T::Preimages::realize(&make_call::(None))?.0; + let result; + + #[block] + { + result = Pallet::::execute_dispatch(&mut counter, origin, call); + } + + assert!(result.is_ok()); + + Ok(()) } - schedule { - let s in 0 .. (T::MaxScheduledPerBlock::get() - 1); + #[benchmark] + fn schedule( + s: Linear<0, { T::MaxScheduledPerBlock::get() - 1 }>, + ) -> Result<(), BenchmarkError> { let when = BLOCK_NUMBER.into(); let periodic = Some((BlockNumberFor::::one(), 100)); let priority = 0; @@ -242,24 +293,27 @@ benchmarks! { let call = Box::new(SystemCall::set_storage { items: vec![] }.into()); fill_schedule::(when, s)?; - }: _(RawOrigin::Root, when, periodic, priority, call) - verify { - ensure!( - Agenda::::get(when).len() == (s + 1) as usize, - "didn't add to schedule" - ); + + #[extrinsic_call] + _(RawOrigin::Root, when, periodic, priority, call); + + ensure!(Agenda::::get(when).len() == s as usize + 1, "didn't add to schedule"); + + Ok(()) } - cancel { - let s in 1 .. T::MaxScheduledPerBlock::get(); + #[benchmark] + fn cancel(s: Linear<1, { T::MaxScheduledPerBlock::get() }>) -> Result<(), BenchmarkError> { let when = BLOCK_NUMBER.into(); fill_schedule::(when, s)?; assert_eq!(Agenda::::get(when).len(), s as usize); let schedule_origin = T::ScheduleOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - }: _>(schedule_origin, when, 0) - verify { + + #[extrinsic_call] + _(schedule_origin as SystemOrigin, when, 0); + ensure!( s == 1 || Lookup::::get(u32_to_name(0)).is_none(), "didn't remove from lookup if more than 1 task scheduled for `when`" @@ -273,10 +327,14 @@ benchmarks! { s > 1 || Agenda::::get(when).len() == 0, "remove from schedule if only 1 task scheduled for `when`" ); + + Ok(()) } - schedule_named { - let s in 0 .. (T::MaxScheduledPerBlock::get() - 1); + #[benchmark] + fn schedule_named( + s: Linear<0, { T::MaxScheduledPerBlock::get() - 1 }>, + ) -> Result<(), BenchmarkError> { let id = u32_to_name(s); let when = BLOCK_NUMBER.into(); let periodic = Some((BlockNumberFor::::one(), 100)); @@ -285,21 +343,26 @@ benchmarks! { let call = Box::new(SystemCall::set_storage { items: vec![] }.into()); fill_schedule::(when, s)?; - }: _(RawOrigin::Root, id, when, periodic, priority, call) - verify { - ensure!( - Agenda::::get(when).len() == (s + 1) as usize, - "didn't add to schedule" - ); + + #[extrinsic_call] + _(RawOrigin::Root, id, when, periodic, priority, call); + + ensure!(Agenda::::get(when).len() == s as usize + 1, "didn't add to schedule"); + + Ok(()) } - cancel_named { - let s in 1 .. T::MaxScheduledPerBlock::get(); + #[benchmark] + fn cancel_named( + s: Linear<1, { T::MaxScheduledPerBlock::get() }>, + ) -> Result<(), BenchmarkError> { let when = BLOCK_NUMBER.into(); fill_schedule::(when, s)?; - }: _(RawOrigin::Root, u32_to_name(0)) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, u32_to_name(0)); + ensure!( s == 1 || Lookup::::get(u32_to_name(0)).is_none(), "didn't remove from lookup if more than 1 task scheduled for `when`" @@ -313,33 +376,49 @@ benchmarks! { s > 1 || Agenda::::get(when).len() == 0, "remove from schedule if only 1 task scheduled for `when`" ); + + Ok(()) } - schedule_retry { - let s in 1 .. T::MaxScheduledPerBlock::get(); + #[benchmark] + fn schedule_retry( + s: Linear<1, { T::MaxScheduledPerBlock::get() }>, + ) -> Result<(), BenchmarkError> { let when = BLOCK_NUMBER.into(); fill_schedule::(when, s)?; let name = u32_to_name(s - 1); let address = Lookup::::get(name).unwrap(); - let period: BlockNumberFor = 1u32.into(); - let root: ::PalletsOrigin = frame_system::RawOrigin::Root.into(); + let period: BlockNumberFor = 1_u32.into(); let retry_config = RetryConfig { total_retries: 10, remaining: 10, period }; Retries::::insert(address, retry_config); let (mut when, index) = address; let task = Agenda::::get(when)[index as usize].clone().unwrap(); let mut weight_counter = WeightMeter::with_limit(T::MaximumWeight::get()); - }: { - Scheduler::::schedule_retry(&mut weight_counter, when, when, index, &task, retry_config); - } verify { + + #[block] + { + Pallet::::schedule_retry( + &mut weight_counter, + when, + when, + index, + &task, + retry_config, + ); + } + when = when + BlockNumberFor::::one(); assert_eq!( Retries::::get((when, 0)), Some(RetryConfig { total_retries: 10, remaining: 9, period }) ); + + Ok(()) } - set_retry { + #[benchmark] + fn set_retry() -> Result<(), BenchmarkError> { let s = T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); @@ -348,8 +427,10 @@ benchmarks! { let address = Lookup::::get(name).unwrap(); let (when, index) = address; let period = BlockNumberFor::::one(); - }: _(RawOrigin::Root, (when, index), 10, period) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, (when, index), 10, period); + assert_eq!( Retries::::get((when, index)), Some(RetryConfig { total_retries: 10, remaining: 10, period }) @@ -357,9 +438,12 @@ benchmarks! { assert_last_event::( Event::RetrySet { task: address, id: None, period, retries: 10 }.into(), ); + + Ok(()) } - set_retry_named { + #[benchmark] + fn set_retry_named() -> Result<(), BenchmarkError> { let s = T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); @@ -368,8 +452,10 @@ benchmarks! { let address = Lookup::::get(name).unwrap(); let (when, index) = address; let period = BlockNumberFor::::one(); - }: _(RawOrigin::Root, name, 10, period) - verify { + + #[extrinsic_call] + _(RawOrigin::Root, name, 10, period); + assert_eq!( Retries::::get((when, index)), Some(RetryConfig { total_retries: 10, remaining: 10, period }) @@ -377,9 +463,12 @@ benchmarks! { assert_last_event::( Event::RetrySet { task: address, id: Some(name), period, retries: 10 }.into(), ); + + Ok(()) } - cancel_retry { + #[benchmark] + fn cancel_retry() -> Result<(), BenchmarkError> { let s = T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); @@ -388,16 +477,19 @@ benchmarks! { let address = Lookup::::get(name).unwrap(); let (when, index) = address; let period = BlockNumberFor::::one(); - assert!(Scheduler::::set_retry(RawOrigin::Root.into(), (when, index), 10, period).is_ok()); - }: _(RawOrigin::Root, (when, index)) - verify { + assert!(Pallet::::set_retry(RawOrigin::Root.into(), (when, index), 10, period).is_ok()); + + #[extrinsic_call] + _(RawOrigin::Root, (when, index)); + assert!(!Retries::::contains_key((when, index))); - assert_last_event::( - Event::RetryCancelled { task: address, id: None }.into(), - ); + assert_last_event::(Event::RetryCancelled { task: address, id: None }.into()); + + Ok(()) } - cancel_retry_named { + #[benchmark] + fn cancel_retry_named() -> Result<(), BenchmarkError> { let s = T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); @@ -406,14 +498,20 @@ benchmarks! { let address = Lookup::::get(name).unwrap(); let (when, index) = address; let period = BlockNumberFor::::one(); - assert!(Scheduler::::set_retry_named(RawOrigin::Root.into(), name, 10, period).is_ok()); - }: _(RawOrigin::Root, name) - verify { + assert!(Pallet::::set_retry_named(RawOrigin::Root.into(), name, 10, period).is_ok()); + + #[extrinsic_call] + _(RawOrigin::Root, name); + assert!(!Retries::::contains_key((when, index))); - assert_last_event::( - Event::RetryCancelled { task: address, id: Some(name) }.into(), - ); + assert_last_event::(Event::RetryCancelled { task: address, id: Some(name) }.into()); + + Ok(()) } - impl_benchmark_test_suite!(Scheduler, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite! { + Pallet, + mock::new_test_ext(), + mock::Test + } } diff --git a/substrate/frame/treasury/src/benchmarking.rs b/substrate/frame/treasury/src/benchmarking.rs index a03ee149db9b..a11723a27b2c 100644 --- a/substrate/frame/treasury/src/benchmarking.rs +++ b/substrate/frame/treasury/src/benchmarking.rs @@ -198,7 +198,7 @@ mod benchmarks { None, ); - let valid_from = frame_system::Pallet::::block_number(); + let valid_from = T::BlockNumberProvider::current_block_number(); let expire_at = valid_from.saturating_add(T::PayoutPeriod::get()); assert_last_event::( Event::AssetSpendApproved { diff --git a/substrate/frame/treasury/src/lib.rs b/substrate/frame/treasury/src/lib.rs index faacda1c0783..281012ffb4c9 100644 --- a/substrate/frame/treasury/src/lib.rs +++ b/substrate/frame/treasury/src/lib.rs @@ -106,7 +106,7 @@ use frame_support::{ weights::Weight, BoundedVec, PalletId, }; -use frame_system::pallet_prelude::BlockNumberFor; +use frame_system::pallet_prelude::BlockNumberFor as SystemBlockNumberFor; pub use pallet::*; pub use weights::WeightInfo; @@ -122,6 +122,8 @@ pub type NegativeImbalanceOf = <>::Currency as Currenc >>::NegativeImbalance; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; type BeneficiaryLookupOf = <>::BeneficiaryLookup as StaticLookup>::Source; +pub type BlockNumberFor = + <>::BlockNumberProvider as BlockNumberProvider>::BlockNumber; /// A trait to allow the Treasury Pallet to spend it's funds for other purposes. /// There is an expectation that the implementer of this trait will correctly manage @@ -202,7 +204,7 @@ pub mod pallet { pallet_prelude::*, traits::tokens::{ConversionFromAssetBalance, PaymentStatus}, }; - use frame_system::pallet_prelude::*; + use frame_system::pallet_prelude::{ensure_signed, OriginFor}; #[pallet::pallet] pub struct Pallet(PhantomData<(T, I)>); @@ -221,7 +223,7 @@ pub mod pallet { /// Period between successive spends. #[pallet::constant] - type SpendPeriod: Get>; + type SpendPeriod: Get>; /// Percentage of spare funds (if any) that are burnt per spend period. #[pallet::constant] @@ -277,14 +279,14 @@ pub mod pallet { /// The period during which an approved treasury spend has to be claimed. #[pallet::constant] - type PayoutPeriod: Get>; + type PayoutPeriod: Get>; /// Helper type for benchmarks. #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper: ArgumentsFactory; /// Provider for the block number. Normally this is the `frame_system` pallet. - type BlockNumberProvider: BlockNumberProvider>; + type BlockNumberProvider: BlockNumberProvider; } /// DEPRECATED: associated with `spend_local` call and will be removed in May 2025. @@ -335,7 +337,7 @@ pub mod pallet { T::AssetKind, AssetBalanceOf, T::Beneficiary, - BlockNumberFor, + BlockNumberFor, ::Id, >, OptionQuery, @@ -343,7 +345,7 @@ pub mod pallet { /// The blocknumber for the last triggered spend period. #[pallet::storage] - pub(crate) type LastSpendPeriod = StorageValue<_, BlockNumberFor, OptionQuery>; + pub(crate) type LastSpendPeriod = StorageValue<_, BlockNumberFor, OptionQuery>; #[pallet::genesis_config] #[derive(frame_support::DefaultNoBound)] @@ -391,8 +393,8 @@ pub mod pallet { asset_kind: T::AssetKind, amount: AssetBalanceOf, beneficiary: T::Beneficiary, - valid_from: BlockNumberFor, - expire_at: BlockNumberFor, + valid_from: BlockNumberFor, + expire_at: BlockNumberFor, }, /// An approved spend was voided. AssetSpendVoided { index: SpendIndex }, @@ -434,10 +436,10 @@ pub mod pallet { } #[pallet::hooks] - impl, I: 'static> Hooks> for Pallet { + impl, I: 'static> Hooks> for Pallet { /// ## Complexity /// - `O(A)` where `A` is the number of approvals - fn on_initialize(_do_not_use_local_block_number: BlockNumberFor) -> Weight { + fn on_initialize(_do_not_use_local_block_number: SystemBlockNumberFor) -> Weight { let block_number = T::BlockNumberProvider::current_block_number(); let pot = Self::pot(); let deactivated = Deactivated::::get(); @@ -458,7 +460,7 @@ pub mod pallet { // empty. .unwrap_or_else(|| Self::update_last_spend_period()); let blocks_since_last_spend_period = block_number.saturating_sub(last_spend_period); - let safe_spend_period = T::SpendPeriod::get().max(BlockNumberFor::::one()); + let safe_spend_period = T::SpendPeriod::get().max(BlockNumberFor::::one()); // Safe because of `max(1)` above. let (spend_periods_passed, extra_blocks) = ( @@ -466,7 +468,7 @@ pub mod pallet { blocks_since_last_spend_period % safe_spend_period, ); let new_last_spend_period = block_number.saturating_sub(extra_blocks); - if spend_periods_passed > BlockNumberFor::::zero() { + if spend_periods_passed > BlockNumberFor::::zero() { Self::spend_funds(spend_periods_passed, new_last_spend_period) } else { Weight::zero() @@ -474,7 +476,7 @@ pub mod pallet { } #[cfg(feature = "try-runtime")] - fn try_state(_: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + fn try_state(_: SystemBlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { Self::do_try_state()?; Ok(()) } @@ -638,7 +640,7 @@ pub mod pallet { asset_kind: Box, #[pallet::compact] amount: AssetBalanceOf, beneficiary: Box>, - valid_from: Option>, + valid_from: Option>, ) -> DispatchResult { let max_amount = T::SpendOrigin::ensure_origin(origin)?; let beneficiary = T::BeneficiaryLookup::lookup(*beneficiary)?; @@ -844,9 +846,9 @@ impl, I: 'static> Pallet { // Backfill the `LastSpendPeriod` storage, assuming that no configuration has changed // since introducing this code. Used specifically for a migration-less switch to populate // `LastSpendPeriod`. - fn update_last_spend_period() -> BlockNumberFor { + fn update_last_spend_period() -> BlockNumberFor { let block_number = T::BlockNumberProvider::current_block_number(); - let spend_period = T::SpendPeriod::get().max(BlockNumberFor::::one()); + let spend_period = T::SpendPeriod::get().max(BlockNumberFor::::one()); let time_since_last_spend = block_number % spend_period; // If it happens that this logic runs directly on a spend period block, we need to backdate // to the last spend period so a spend still occurs this block. @@ -889,8 +891,8 @@ impl, I: 'static> Pallet { /// Spend some money! returns number of approvals before spend. pub fn spend_funds( - spend_periods_passed: BlockNumberFor, - new_last_spend_period: BlockNumberFor, + spend_periods_passed: BlockNumberFor, + new_last_spend_period: BlockNumberFor, ) -> Weight { LastSpendPeriod::::put(new_last_spend_period); let mut total_weight = Weight::zero(); diff --git a/substrate/primitives/runtime/src/traits/mod.rs b/substrate/primitives/runtime/src/traits/mod.rs index 01bdcca86b6f..02bc7adc8ba5 100644 --- a/substrate/primitives/runtime/src/traits/mod.rs +++ b/substrate/primitives/runtime/src/traits/mod.rs @@ -2349,7 +2349,8 @@ pub trait BlockNumberProvider { + TypeInfo + Debug + MaxEncodedLen - + Copy; + + Copy + + EncodeLike; /// Returns the current block number. ///