diff --git a/.github/actions/build-push-image/action.yml b/.github/actions/build-push-image/action.yml index 7aed5d1891e49..1b880d7bc2678 100644 --- a/.github/actions/build-push-image/action.yml +++ b/.github/actions/build-push-image/action.yml @@ -61,9 +61,11 @@ runs: - name: push to dockerhub shell: bash if: ${{ inputs.username != '' && inputs.password != '' && github.event_name != 'merge_group' }} + env: + GITHUB_PR_HEAD_SHA: ${{ github.event.pull_request.head.sha }} run: | export DOCKER_IMAGES_VERSION=${{ github.event.pull_request.head.sha }} - export DOCKERHUB_TAG=docker.io/paritypr/${{ inputs.image-name }}:${{ github.event.pull_request.number || 'master' }}-${GITHUB_SHA::8} + export DOCKERHUB_TAG=docker.io/paritypr/${{ inputs.image-name }}:${{ github.event.pull_request.number || 'master' }}-${GITHUB_PR_HEAD_SHA::8} docker tag "europe-docker.pkg.dev/parity-ci-2024/temp-images/${{ inputs.image-name }}:$DOCKER_IMAGES_VERSION" $DOCKERHUB_TAG docker push $DOCKERHUB_TAG diff --git a/.github/workflows/cmd-run.yml b/.github/workflows/cmd-run.yml index bf8deb95c88d9..b5da4a4a9190f 100644 --- a/.github/workflows/cmd-run.yml +++ b/.github/workflows/cmd-run.yml @@ -223,7 +223,6 @@ jobs: if: startsWith(github.event.inputs.cmd, 'bench') run: cargo install subweight - # TODO: fix for forks, refs/remotes/origin/master should be replaced with master branch from paritytech/polkadot-sdk - name: Run Subweight for bench id: subweight if: startsWith(github.event.inputs.cmd, 'bench') @@ -242,6 +241,16 @@ jobs: --ignore-errors \ refs/remotes/origin/master $PR_BRANCH) + echo $result + + echo $result > /tmp/cmd/subweight.log + # Though github claims that it supports 1048576 bytes in GITHUB_OUTPUT in fact it only supports ~200000 bytes of a multiline string + if [ $(wc -c < "/tmp/cmd/subweight.log") -gt 200000 ]; then + echo "Subweight result is too large, truncating..." + echo "Please check subweight.log for the full output" + result="Please check subweight.log for the full output" + fi + echo "Trying to save subweight result to GITHUB_OUTPUT" # Save the multiline result to the output { echo "result<> $GITHUB_OUTPUT + - name: Upload Subweight + uses: actions/upload-artifact@v4 + if: startsWith(github.event.inputs.cmd, 'bench') + with: + name: subweight + path: /tmp/cmd/subweight.log + after-cmd: needs: [cmd, before-cmd] env: diff --git a/.github/workflows/reusable-preflight.yml b/.github/workflows/reusable-preflight.yml index fd28f07f38702..3114c83db4e33 100644 --- a/.github/workflows/reusable-preflight.yml +++ b/.github/workflows/reusable-preflight.yml @@ -78,8 +78,8 @@ jobs: preflight: runs-on: ubuntu-latest outputs: - changes_rust: true - changes_currentWorkflow: true + changes_rust: ${{ steps.set_changes.outputs.rust_any_changed || steps.set_changes.outputs.currentWorkflow_any_changed }} + changes_currentWorkflow: ${{ steps.set_changes.outputs.currentWorkflow_any_changed }} IMAGE: ${{ steps.set_image.outputs.IMAGE }} @@ -112,20 +112,19 @@ jobs: echo "currentWorkflowFile=$(echo ${{ github.workflow_ref }} | sed -nE "s/.*(\.github\/workflows\/[a-zA-Z0-9_-]*\.y[a]?ml)@refs.*/\1/p")" >> $GITHUB_OUTPUT echo "currentActionDir=$(echo ${{ github.action_path }} | sed -nE "s/.*(\.github\/actions\/[a-zA-Z0-9_-]*)/\1/p")" >> $GITHUB_OUTPUT - # removed due to https://news.ycombinator.com/item?id=43368870 - #- name: Set changes - # id: set_changes - # uses: tj-actions/changed-files@v45 - # with: - # files_yaml: | - # rust: - # - '**/*' - # - '!.github/**/*' - # - '!prdoc/**/*' - # - '!docs/**/*' - # currentWorkflow: - # - '${{ steps.current_file.outputs.currentWorkflowFile }}' - # - '.github/workflows/reusable-preflight.yml' + - name: Set changes + id: set_changes + uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c #v46.0.5 + with: + files_yaml: | + rust: + - '**/*' + - '!.github/**/*' + - '!prdoc/**/*' + - '!docs/**/*' + currentWorkflow: + - '${{ steps.current_file.outputs.currentWorkflowFile }}' + - '.github/workflows/reusable-preflight.yml' # # Set image @@ -181,6 +180,7 @@ jobs: shell: bash run: | echo "workflow file: ${{ steps.current_file.outputs.currentWorkflowFile }}" + echo "Modified: ${{ steps.set_changes.outputs.modified_keys }}" # # @@ -219,4 +219,4 @@ jobs: # echo "github.ref: ${{ github.ref }}" echo "github.ref_name: ${{ github.ref_name }}" - echo "github.sha: ${{ github.sha }}" \ No newline at end of file + echo "github.sha: ${{ github.sha }}" diff --git a/.github/workflows/zombienet-reusable-preflight.yml b/.github/workflows/zombienet-reusable-preflight.yml index c98a8201e2700..0e05b77028868 100644 --- a/.github/workflows/zombienet-reusable-preflight.yml +++ b/.github/workflows/zombienet-reusable-preflight.yml @@ -117,11 +117,11 @@ jobs: preflight: runs-on: ubuntu-latest outputs: - changes_substrate: true - changes_cumulus: true - changes_polkadot: true - changes_bridges: true - changes_templates: true + changes_substrate: ${{ steps.set_changes.outputs.substrate_any_changed || steps.set_changes.outputs.currentWorkflow_any_changed }} + changes_cumulus: ${{ steps.set_changes.outputs.cumulus_any_changed || steps.set_changes.outputs.currentWorkflow_any_changed }} + changes_polkadot: ${{ steps.set_changes.outputs.polkadot_any_changed || steps.set_changes.outputs.currentWorkflow_any_changed }} + changes_bridges: ${{ steps.set_changes.outputs.bridges_any_changed || steps.set_changes.outputs.currentWorkflow_any_changed }} + changes_templates: ${{ steps.set_changes.outputs.templates_any_changed || steps.set_changes.outputs.currentWorkflow_any_changed }} CI_IMAGE: ${{ steps.set_vars.outputs.IMAGE }} @@ -157,26 +157,25 @@ jobs: echo "currentWorkflowFile=$(echo ${{ github.workflow_ref }} | sed -nE "s/.*(\.github\/workflows\/[a-zA-Z0-9_-]*\.y[a]?ml)@refs.*/\1/p")" >> $GITHUB_OUTPUT echo "currentActionDir=$(echo ${{ github.action_path }} | sed -nE "s/.*(\.github\/actions\/[a-zA-Z0-9_-]*)/\1/p")" >> $GITHUB_OUTPUT - # removed due to https://news.ycombinator.com/item?id=43368870 - #- name: Set changes - # id: set_changes - # uses: tj-actions/changed-files@v45 - # with: - # files_yaml: | - # substrate: - # - 'substrate/**/*' - # cumulus: - # - 'cumulus/**/*' - # polkadot: - # - 'polkadot/**/*' - # bridges: - # - 'bridges/**/*' - # templates: - # - 'templates/**/*' - # currentWorkflow: - # - '${{ steps.current_file.outputs.currentWorkflowFile }}' - # - '.github/workflows/zombienet-reusable-preflight.yml' - # - '.github/zombienet-env' + - name: Set changes + id: set_changes + uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c #v46.0.5 + with: + files_yaml: | + substrate: + - 'substrate/**/*' + cumulus: + - 'cumulus/**/*' + polkadot: + - 'polkadot/**/*' + bridges: + - 'bridges/**/*' + templates: + - 'templates/**/*' + currentWorkflow: + - '${{ steps.current_file.outputs.currentWorkflowFile }}' + - '.github/workflows/zombienet-reusable-preflight.yml' + - '.github/zombienet-env' # # Set environment vars (including runner/image) @@ -209,6 +208,7 @@ jobs: shell: bash run: | echo "workflow file: ${{ steps.current_file.outputs.currentWorkflowFile }}" + echo "Modified: ${{ steps.set_changes.outputs.modified_keys }}" echo "ZOMBIENET_IMAGE: ${{ steps.set_vars.outputs.ZOMBIENET_IMAGE }}" echo "CI_IMAGE: ${{ steps.set_vars.outputs.IMAGE }}" diff --git a/.github/workflows/zombienet_cumulus.yml b/.github/workflows/zombienet_cumulus.yml index 5b98931e99397..6495ee7f160a4 100644 --- a/.github/workflows/zombienet_cumulus.yml +++ b/.github/workflows/zombienet_cumulus.yml @@ -1,12 +1,13 @@ name: Zombienet Cumulus on: - push: - branches: - - master - pull_request: - types: [opened, synchronize, reopened, ready_for_review] - merge_group: + workflow_dispatch: # Disabled for being flaky + #push: + # branches: + # - master + #pull_request: + # types: [opened, synchronize, reopened, ready_for_review] + #merge_group: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true diff --git a/.github/workflows/zombienet_parachain-template.yml b/.github/workflows/zombienet_parachain-template.yml index 94d1e65ea3f87..c21daaa265483 100644 --- a/.github/workflows/zombienet_parachain-template.yml +++ b/.github/workflows/zombienet_parachain-template.yml @@ -1,12 +1,13 @@ name: Zombienet Parachain Templates on: - push: - branches: - - master - pull_request: - types: [opened, synchronize, reopened, ready_for_review] - merge_group: + workflow_dispatch: # Disabled for being flaky + #push: + # branches: + # - master + #pull_request: + # types: [opened, synchronize, reopened, ready_for_review] + #merge_group: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true diff --git a/.github/workflows/zombienet_polkadot.yml b/.github/workflows/zombienet_polkadot.yml index 7de4f0b350151..be2b0dc64d4e3 100644 --- a/.github/workflows/zombienet_polkadot.yml +++ b/.github/workflows/zombienet_polkadot.yml @@ -1,12 +1,13 @@ name: Zombienet Polkadot on: - push: - branches: - - master - pull_request: - types: [opened, synchronize, reopened, ready_for_review] - merge_group: + workflow_dispatch: # Disabled for being flaky + #push: + # branches: + # - master + #pull_request: + # types: [opened, synchronize, reopened, ready_for_review] + #merge_group: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true diff --git a/.github/workflows/zombienet_substrate.yml b/.github/workflows/zombienet_substrate.yml index 7ddc2e613b238..5510762594772 100644 --- a/.github/workflows/zombienet_substrate.yml +++ b/.github/workflows/zombienet_substrate.yml @@ -1,12 +1,13 @@ name: Zombienet Substrate on: - push: - branches: - - master - pull_request: - types: [opened, synchronize, reopened, ready_for_review] - merge_group: + workflow_dispatch: # Disabled for being flaky + #push: + # branches: + # - master + #pull_request: + # types: [opened, synchronize, reopened, ready_for_review] + #merge_group: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true diff --git a/.github/zombienet-env b/.github/zombienet-env index 92a6b2d5b412b..83954bb4af7c8 100644 --- a/.github/zombienet-env +++ b/.github/zombienet-env @@ -8,4 +8,4 @@ RUN_IN_CI=1 KUBERNETES_CPU_REQUEST=512m KUBERNETES_MEMORY_REQUEST=1Gi TEMP_IMAGES_BASE=europe-docker.pkg.dev/parity-ci-2024/temp-images -FLAKY_TESTS="zombienet-polkadot-coretime-revenue, zombienet-polkadot-smoke-0003-deregister-register-validator" +FLAKY_TESTS="zombienet-polkadot-coretime-revenue, zombienet-polkadot-smoke-0003-deregister-register-validator, zombienet-polkadot-elastic-scaling-slot-based-12cores, zombienet-polkadot-elastic-scaling-doesnt-break-parachains, zombienet-polkadot-functional-duplicate-collations, zombienet-polkadot-functional-0002-parachains-disputes, zombienet-polkadot-functional-async-backing-6-seconds-rate, zombienet-polkadot-elastic-scaling-slot-based-3cores, zombienet-polkadot-malus-0001-dispute-valid, zombienet-substrate-0002-validators-warp-sync" diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml deleted file mode 100644 index 359aec73214e8..0000000000000 --- a/.gitlab-ci.yml +++ /dev/null @@ -1,284 +0,0 @@ -# polkadot-sdk | CI definitions (via GitLab CI) -# -# FYI: Pipelines can be triggered manually through the web UI (if you have enough permissions) -# -# Currently, entire CI instructions are split into different subfiles. Each CI stage has a corresponding -# file which can be found here: .gitlab/pipeline/.yml - -stages: - - check - - test - - build - - publish - - short-benchmarks - - zombienet - - deploy - - notify - -workflow: - rules: - - if: $CI_COMMIT_TAG - - if: $CI_COMMIT_BRANCH - -variables: - # CI_IMAGE: !reference [ .ci-unified, variables, CI_IMAGE ] - CI_IMAGE: "docker.io/paritytech/ci-unified:bullseye-1.84.1-2025-01-28-v202502131220" - # BUILDAH_IMAGE is defined in group variables - BUILDAH_COMMAND: "buildah --storage-driver overlay2" - RELENG_SCRIPTS_BRANCH: "master" - RUSTY_CACHIER_SINGLE_BRANCH: master - RUSTY_CACHIER_DONT_OPERATE_ON_MAIN_BRANCH: "true" - RUSTY_CACHIER_COMPRESSION_METHOD: zstd - NEXTEST_FAILURE_OUTPUT: immediate-final - NEXTEST_SUCCESS_OUTPUT: final - DOCKER_IMAGES_VERSION: "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" - -default: - retry: - max: 2 - when: - - runner_system_failure - - unknown_failure - - api_failure - cache: {} - interruptible: true - -.collect-artifacts: - artifacts: - name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" - when: on_success - expire_in: 1 days - paths: - - artifacts/ - -.collect-artifacts-short: - artifacts: - name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" - when: on_failure - expire_in: 3 hours - paths: - - artifacts/ - -.prepare-env: - before_script: - # $WASM_BUILD_WORKSPACE_HINT enables wasm-builder to find the Cargo.lock from within generated - # packages - - export WASM_BUILD_WORKSPACE_HINT="$PWD" - # ensure that RUSTFLAGS are set correctly - - echo $RUSTFLAGS - -.common-before-script: - before_script: - - !reference [.job-switcher, before_script] - - !reference [.pipeline-stopper-vars, script] - -.job-switcher: - before_script: - - if echo "$CI_DISABLED_JOBS" | grep -xF "$CI_JOB_NAME"; then echo "The job has been cancelled in CI settings"; exit 0; fi - -.kubernetes-env: - image: "${CI_IMAGE}" - before_script: - - !reference [.common-before-script, before_script] - - !reference [.prepare-env, before_script] - tags: - - kubernetes-parity-build - -.rust-info-script: - script: - - rustup show - - cargo --version - - rustup +nightly show - - cargo +nightly --version - -# collecting vars for pipeline stopper -# they will be used if the job fails -.pipeline-stopper-vars: - script: - - echo "Collecting env variables for the cancel-pipeline job" - - echo "FAILED_JOB_URL=${CI_JOB_URL}" > pipeline-stopper.env - - echo "FAILED_JOB_NAME=${CI_JOB_NAME}" >> pipeline-stopper.env - - echo "PR_NUM=${CI_COMMIT_REF_NAME}" >> pipeline-stopper.env - -.pipeline-stopper-artifacts: - artifacts: - reports: - dotenv: pipeline-stopper.env - -.docker-env: - image: "${CI_IMAGE}" - variables: - FL_FORKLIFT_VERSION: !reference [.forklift, variables, FL_FORKLIFT_VERSION] - before_script: - - !reference [.common-before-script, before_script] - - !reference [.prepare-env, before_script] - - !reference [.rust-info-script, script] - - !reference [.forklift-cache, before_script] - tags: - - linux-docker - -# -.forklift-cache: - before_script: - - mkdir ~/.forklift - - cp .forklift/config-gitlab.toml ~/.forklift/config.toml - - cat .forklift/config-gitlab.toml > .forklift/config.toml - - > - if [ "$FORKLIFT_BYPASS" != "true" ]; then - echo "FORKLIFT_BYPASS not set"; - if command -v forklift >/dev/null 2>&1; then - echo "forklift already exists"; - forklift version - else - echo "forklift does not exist, downloading"; - curl --header "PRIVATE-TOKEN: $FL_CI_GROUP_TOKEN" -o forklift -L "${CI_API_V4_URL}/projects/676/packages/generic/forklift/${FL_FORKLIFT_VERSION}/forklift_${FL_FORKLIFT_VERSION}_linux_amd64"; - chmod +x forklift; - export PATH=$PATH:$(pwd); - echo ${FL_FORKLIFT_VERSION}; - fi - echo "Creating alias cargo='forklift cargo'"; - shopt -s expand_aliases; - alias cargo="forklift cargo"; - fi - # - -.common-refs: - rules: - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_PIPELINE_SOURCE == "schedule" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - -.test-pr-refs: - rules: - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues - -.publish-gh-pages-refs: - rules: - - if: $CI_PIPELINE_SOURCE == "pipeline" - when: never - - if: $CI_PIPELINE_SOURCE == "web" && $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "master" - -# handle the specific case where benches could store incorrect bench data because of the downstream staging runs -# exclude cargo-check-benches from such runs -.test-refs-check-benches: - rules: - - if: $CI_COMMIT_REF_NAME == "master" && $CI_PIPELINE_SOURCE == "pipeline" && $CI_IMAGE =~ /staging$/ - when: never - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_PIPELINE_SOURCE == "schedule" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - -.test-refs-no-trigger: - rules: - - if: $CI_PIPELINE_SOURCE == "pipeline" - when: never - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_PIPELINE_SOURCE == "schedule" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/ - -.test-refs-no-trigger-prs-only: - rules: - - if: $CI_PIPELINE_SOURCE == "pipeline" - when: never - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_PIPELINE_SOURCE == "schedule" - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues - -.publish-refs: - rules: - - if: $CI_PIPELINE_SOURCE == "pipeline" - when: never - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_PIPELINE_SOURCE == "schedule" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - -.build-refs: - # publish-refs + PRs - rules: - - if: $CI_PIPELINE_SOURCE == "pipeline" - when: never - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_PIPELINE_SOURCE == "schedule" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues - -include: - # check jobs - - .gitlab/pipeline/check.yml - # test jobs - - .gitlab/pipeline/test.yml - # build jobs - - .gitlab/pipeline/build.yml - # publish jobs - - .gitlab/pipeline/publish.yml - # zombienet jobs - - .gitlab/pipeline/zombienet.yml - # ci image - - project: parity/infrastructure/ci_cd/shared - ref: main - file: /common/ci-unified.yml - - project: parity/infrastructure/ci_cd/shared - ref: main - file: /common/forklift.yml -# This job cancels the whole pipeline if any of provided jobs fail. -# In a DAG, every jobs chain is executed independently of others. The `fail_fast` principle suggests -# to fail the pipeline as soon as possible to shorten the feedback loop. -.cancel-pipeline-template: - stage: .post - rules: - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - when: on_failure - variables: - PROJECT_ID: "${CI_PROJECT_ID}" - PROJECT_NAME: "${CI_PROJECT_NAME}" - PIPELINE_ID: "${CI_PIPELINE_ID}" - FAILED_JOB_URL: "${FAILED_JOB_URL}" - FAILED_JOB_NAME: "${FAILED_JOB_NAME}" - PR_NUM: "${PR_NUM}" - trigger: - project: "parity/infrastructure/ci_cd/pipeline-stopper" - -remove-cancel-pipeline-message: - stage: .post - rules: - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - variables: - PROJECT_ID: "${CI_PROJECT_ID}" - PROJECT_NAME: "${CI_PROJECT_NAME}" - PIPELINE_ID: "${CI_PIPELINE_ID}" - FAILED_JOB_URL: "https://gitlab.com" - FAILED_JOB_NAME: "nope" - PR_NUM: "${CI_COMMIT_REF_NAME}" - trigger: - project: "parity/infrastructure/ci_cd/pipeline-stopper" - -cancel-pipeline-build-linux-stable: - extends: .cancel-pipeline-template - needs: - - job: build-linux-stable - -cancel-pipeline-build-linux-stable-cumulus: - extends: .cancel-pipeline-template - needs: - - job: build-linux-stable-cumulus - -cancel-pipeline-build-linux-substrate: - extends: .cancel-pipeline-template - needs: - - job: build-linux-substrate diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml deleted file mode 100644 index 0676232028216..0000000000000 --- a/.gitlab/pipeline/build.yml +++ /dev/null @@ -1,229 +0,0 @@ -# This file is part of .gitlab-ci.yml -# Here are all jobs that are executed during "build" stage - -# build jobs from polkadot - -build-linux-stable: - stage: build - extends: - - .docker-env - - .common-refs - - .run-immediately - - .collect-artifacts - variables: - RUST_TOOLCHAIN: stable - # Enable debug assertions since we are running optimized builds for testing - # but still want to have debug assertions. - RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" - # Ensure we run the UI tests. - RUN_UI_TESTS: 1 - script: - - time cargo build --locked --profile testnet --features pyroscope,fast-runtime --bin polkadot --bin polkadot-prepare-worker --bin polkadot-execute-worker - - time ROCOCO_EPOCH_DURATION=10 ./polkadot/scripts/build-only-wasm.sh rococo-runtime $(pwd)/runtimes/rococo-runtime-10/ - - time ROCOCO_EPOCH_DURATION=100 ./polkadot/scripts/build-only-wasm.sh rococo-runtime $(pwd)/runtimes/rococo-runtime-100/ - - time ROCOCO_EPOCH_DURATION=600 ./polkadot/scripts/build-only-wasm.sh rococo-runtime $(pwd)/runtimes/rococo-runtime-600/ - - pwd - - ls -alR runtimes - # pack artifacts - - mkdir -p ./artifacts - - VERSION="${CI_COMMIT_REF_NAME}" # will be tag or branch name - - mv ./target/testnet/polkadot ./artifacts/. - - mv ./target/testnet/polkadot-prepare-worker ./artifacts/. - - mv ./target/testnet/polkadot-execute-worker ./artifacts/. - - mv ./runtimes/ ./artifacts/. - - pushd artifacts - - sha256sum polkadot | tee polkadot.sha256 - - shasum -c polkadot.sha256 - - popd - - EXTRATAG="${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" - - echo "Polkadot version = ${VERSION} (EXTRATAG = ${EXTRATAG})" - - echo -n ${VERSION} > ./artifacts/VERSION - - echo -n ${EXTRATAG} > ./artifacts/EXTRATAG - - echo -n ${CI_JOB_ID} > ./artifacts/BUILD_LINUX_JOB_ID - - RELEASE_VERSION=$(./artifacts/polkadot -V | awk '{print $2}'| awk -F "-" '{print $1}') - - echo -n "v${RELEASE_VERSION}" > ./artifacts/BUILD_RELEASE_VERSION - - cp -r docker/* ./artifacts - -build-test-collators: - stage: build - extends: - - .docker-env - - .common-refs - - .run-immediately - - .collect-artifacts - script: - - time cargo build --locked --profile testnet -p test-parachain-adder-collator - - time cargo build --locked --profile testnet -p test-parachain-undying-collator - # pack artifacts - - mkdir -p ./artifacts - - mv ./target/testnet/adder-collator ./artifacts/. - - mv ./target/testnet/undying-collator ./artifacts/. - - echo -n "${CI_COMMIT_REF_NAME}" > ./artifacts/VERSION - - echo -n "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" > ./artifacts/EXTRATAG - - echo "adder-collator version = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))" - - echo "undying-collator version = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))" - - cp -r ./docker/* ./artifacts - -build-malus: - stage: build - extends: - - .docker-env - - .common-refs - - .run-immediately - - .collect-artifacts - script: - - time cargo build --locked --profile testnet -p polkadot-test-malus --bin malus --bin polkadot-prepare-worker --bin polkadot-execute-worker - # pack artifacts - - mkdir -p ./artifacts - - mv ./target/testnet/malus ./artifacts/. - - mv ./target/testnet/polkadot-execute-worker ./artifacts/. - - mv ./target/testnet/polkadot-prepare-worker ./artifacts/. - - echo -n "${CI_COMMIT_REF_NAME}" > ./artifacts/VERSION - - echo -n "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" > ./artifacts/EXTRATAG - - echo "polkadot-test-malus = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))" - - cp -r ./docker/* ./artifacts - -build-templates-node: - stage: build - extends: - - .docker-env - - .common-refs - - .run-immediately - - .collect-artifacts - script: - - time cargo build --locked --package parachain-template-node --release - - time cargo build --locked --package minimal-template-node --release - - time cargo build --locked --package solochain-template-node --release - # pack artifacts - - mkdir -p ./artifacts - - mv ./target/release/parachain-template-node ./artifacts/. - - mv ./target/release/minimal-template-node ./artifacts/. - - mv ./target/release/solochain-template-node ./artifacts/. - -build-implementers-guide: - stage: build - extends: - - .kubernetes-env - - .common-refs - - .run-immediately - - .collect-artifacts - # git depth is set on purpose: https://github.com/paritytech/polkadot/issues/6284 - variables: - GIT_STRATEGY: clone - GIT_DEPTH: 0 - CI_IMAGE: paritytech/mdbook-utils:e14aae4a-20221123 - script: - - mdbook build ./polkadot/roadmap/implementers-guide - - mkdir -p artifacts - - mv polkadot/roadmap/implementers-guide/book artifacts/ - -build-polkadot-zombienet-tests: - stage: build - extends: - - .docker-env - - .common-refs - - .run-immediately - - .collect-artifacts - needs: - - job: build-linux-stable - artifacts: true - - job: build-linux-stable-cumulus - artifacts: true - - script: - - cargo nextest --manifest-path polkadot/zombienet-sdk-tests/Cargo.toml archive --features zombie-metadata,zombie-ci --archive-file polkadot-zombienet-tests.tar.zst - - mkdir -p artifacts - - cp polkadot-zombienet-tests.tar.zst ./artifacts - -# build jobs from cumulus - -build-linux-stable-cumulus: - stage: build - extends: - - .docker-env - - .common-refs - - .run-immediately - - .collect-artifacts - variables: - # Enable debug assertions since we are running optimized builds for testing - # but still want to have debug assertions. - RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" - script: - - echo "___Building a binary, please refrain from using it in production since it goes with the debug assertions.___" - - time cargo build --release --locked -p polkadot-parachain-bin --bin polkadot-parachain - - echo "___Packing the artifacts___" - - mkdir -p ./artifacts - - mv ./target/release/polkadot-parachain ./artifacts/. - - echo "___The VERSION is either a tag name or the curent branch if triggered not by a tag___" - - echo ${CI_COMMIT_REF_NAME} | tee ./artifacts/VERSION - -build-test-parachain: - stage: build - extends: - - .docker-env - - .common-refs - - .run-immediately - - .collect-artifacts - variables: - # Enable debug assertions since we are running optimized builds for testing - # but still want to have debug assertions. - RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" - script: - - echo "___Building a binary, please refrain from using it in production since it goes with the debug assertions.___" - - time cargo build --release --locked -p cumulus-test-service --bin test-parachain - - echo "___Packing the artifacts___" - - mkdir -p ./artifacts - - mv ./target/release/test-parachain ./artifacts/. - - mkdir -p ./artifacts/zombienet - - mv ./target/release/wbuild/cumulus-test-runtime/wasm_binary_spec_version_incremented.rs.compact.compressed.wasm ./artifacts/zombienet/. - -# substrate - -build-linux-substrate: - stage: build - extends: - - .docker-env - - .common-refs - - .collect-artifacts - # DAG - needs: - - job: build-linux-stable - artifacts: false - variables: - # this variable gets overriden by "rusty-cachier environment inject", use the value as default - CARGO_TARGET_DIR: "$CI_PROJECT_DIR/target" - before_script: - - mkdir -p ./artifacts/substrate/ - # tldr: we need to checkout the branch HEAD explicitly because of our dynamic versioning approach while building the substrate binary - # see https://github.com/paritytech/ci_cd/issues/682#issuecomment-1340953589 - - git checkout -B "$CI_COMMIT_REF_NAME" "$CI_COMMIT_SHA" - - !reference [.forklift-cache, before_script] - script: - - time WASM_BUILD_NO_COLOR=1 cargo build --locked --release -p staging-node-cli - - mv $CARGO_TARGET_DIR/release/substrate-node ./artifacts/substrate/substrate - - echo -n "Substrate version = " - - if [ "${CI_COMMIT_TAG}" ]; then - echo "${CI_COMMIT_TAG}" | tee ./artifacts/substrate/VERSION; - else - ./artifacts/substrate/substrate --version | - cut -d ' ' -f 2 | tee ./artifacts/substrate/VERSION; - fi - - sha256sum ./artifacts/substrate/substrate | tee ./artifacts/substrate/substrate.sha256 - - cp -r ./docker/dockerfiles/substrate_injected.Dockerfile ./artifacts/substrate/ - # - printf '\n# building node-template\n\n' - # - ./scripts/ci/node-template-release.sh ./artifacts/substrate/substrate-node-template.tar.gz - -# bridges - -# we need some non-binary artifacts in our bridges+zombienet image -prepare-bridges-zombienet-artifacts: - stage: build - extends: - - .docker-env - - .common-refs - - .run-immediately - - .collect-artifacts - before_script: - - mkdir -p ./artifacts/bridges-polkadot-sdk/bridges - script: - - cp -r bridges/testing ./artifacts/bridges-polkadot-sdk/bridges/testing diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml deleted file mode 100644 index 7d1f37dddd513..0000000000000 --- a/.gitlab/pipeline/check.yml +++ /dev/null @@ -1,9 +0,0 @@ -job-starter: - stage: check - image: paritytech/tools:latest - extends: - - .kubernetes-env - - .common-refs - allow_failure: true - script: - - echo ok diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml deleted file mode 100644 index 92deaea2f6121..0000000000000 --- a/.gitlab/pipeline/publish.yml +++ /dev/null @@ -1,125 +0,0 @@ -# This file is part of .gitlab-ci.yml -# Here are all jobs that are executed during "publish" stage - -# note: images are used not only in zombienet but also in rococo, wococo and versi -.build-push-image: - image: $BUILDAH_IMAGE - extends: - - .zombienet-refs - variables: - DOCKERFILE: "" # docker/path-to.Dockerfile - IMAGE_NAME: "" # docker.io/paritypr/image_name - script: - # Dockertag should differ in a merge queue - - if [[ $CI_COMMIT_REF_NAME == *"gh-readonly-queue"* ]]; then export DOCKER_IMAGES_VERSION="${CI_COMMIT_SHORT_SHA}"; fi - - $BUILDAH_COMMAND build - --format=docker - --build-arg VCS_REF="${CI_COMMIT_SHA}" - --build-arg BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" - --build-arg IMAGE_NAME="${IMAGE_NAME}" - --build-arg ZOMBIENET_IMAGE="${ZOMBIENET_IMAGE}" - --tag "$IMAGE_NAME:${DOCKER_IMAGES_VERSION}" - --file ${DOCKERFILE} . - - echo "$PARITYPR_PASS" | - buildah login --username "$PARITYPR_USER" --password-stdin docker.io - - $BUILDAH_COMMAND info - - $BUILDAH_COMMAND push --format=v2s2 "$IMAGE_NAME:${DOCKER_IMAGES_VERSION}" - after_script: - - buildah logout --all - -build-push-image-polkadot-parachain-debug: - stage: publish - extends: - - .kubernetes-env - - .common-refs - - .build-push-image - needs: - - job: build-linux-stable-cumulus - artifacts: true - variables: - DOCKERFILE: "docker/dockerfiles/polkadot-parachain/polkadot-parachain-debug_unsigned_injected.Dockerfile" - IMAGE_NAME: "docker.io/paritypr/polkadot-parachain-debug" - -build-push-image-test-parachain: - stage: publish - extends: - - .kubernetes-env - - .common-refs - - .build-push-image - needs: - - job: build-test-parachain - artifacts: true - variables: - DOCKERFILE: "docker/dockerfiles/test-parachain_injected.Dockerfile" - IMAGE_NAME: "docker.io/paritypr/test-parachain" - -build-push-image-polkadot-debug: - stage: publish - extends: - - .kubernetes-env - - .common-refs - - .build-push-image - needs: - - job: build-linux-stable - artifacts: true - variables: - DOCKERFILE: "docker/dockerfiles/polkadot/polkadot_injected_debug.Dockerfile" - IMAGE_NAME: "docker.io/paritypr/polkadot-debug" - -build-push-image-colander: - stage: publish - extends: - - .kubernetes-env - - .common-refs - - .build-push-image - needs: - - job: build-test-collators - artifacts: true - variables: - DOCKERFILE: "docker/dockerfiles/collator_injected.Dockerfile" - IMAGE_NAME: "docker.io/paritypr/colander" - -build-push-image-malus: - stage: publish - extends: - - .kubernetes-env - - .common-refs - - .build-push-image - needs: - - job: build-malus - artifacts: true - variables: - DOCKERFILE: "docker/dockerfiles/malus_injected.Dockerfile" - IMAGE_NAME: "docker.io/paritypr/malus" - -build-push-image-substrate-pr: - stage: publish - extends: - - .kubernetes-env - - .common-refs - - .build-push-image - needs: - - job: build-linux-substrate - artifacts: true - variables: - DOCKERFILE: "docker/dockerfiles/substrate_injected.Dockerfile" - IMAGE_NAME: "docker.io/paritypr/substrate" - -# unlike other images, bridges+zombienet image is based on Zombienet image that pulls required binaries -# from other fresh images (polkadot and cumulus) -build-push-image-bridges-zombienet-tests: - stage: publish - extends: - - .kubernetes-env - - .common-refs - - .build-push-image - needs: - - job: build-linux-stable - artifacts: true - - job: build-linux-stable-cumulus - artifacts: true - - job: prepare-bridges-zombienet-artifacts - artifacts: true - variables: - DOCKERFILE: "docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile" - IMAGE_NAME: "docker.io/paritypr/bridges-zombienet-tests" diff --git a/.gitlab/pipeline/short-benchmarks.yml b/.gitlab/pipeline/short-benchmarks.yml deleted file mode 100644 index ed97d539c095c..0000000000000 --- a/.gitlab/pipeline/short-benchmarks.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml deleted file mode 100644 index 8e32a3614679a..0000000000000 --- a/.gitlab/pipeline/test.yml +++ /dev/null @@ -1,111 +0,0 @@ -# this is an artificial job dependency, for pipeline optimization using GitLab's DAGs -# the job can be found in check.yml -.run-immediately: - needs: - - job: job-starter - artifacts: false - -# -# -# -.codecov-check: - script: - - > - if command -v codecovcli -h >/dev/null 2>&1; then - codecovcli --version; - else - echo "downloading codecovcli"; - curl -s -o codecovcli https://cli.codecov.io/latest/linux/codecov; - chmod +x codecovcli; - mv codecovcli /usr/local/bin/codecovcli; - fi - # - - codecovcli --version - -# -# -# -codecov-start: - stage: test - when: manual - allow_failure: false - extends: - - .kubernetes-env - - .common-refs - - .pipeline-stopper-artifacts - - .run-immediately - script: - - !reference [.codecov-check, script] - - > - if [ "$CI_COMMIT_REF_NAME" != "master" ]; then - codecovcli -v create-commit -t ${CODECOV_TOKEN} -r paritytech/polkadot-sdk --commit-sha ${CI_COMMIT_SHA} --fail-on-error --pr ${CI_COMMIT_REF_NAME} --git-service github; - codecovcli -v create-report -t ${CODECOV_TOKEN} -r paritytech/polkadot-sdk --commit-sha ${CI_COMMIT_SHA} --fail-on-error --pr ${CI_COMMIT_REF_NAME} --git-service github; - else - codecovcli -v create-commit -t ${CODECOV_TOKEN} -r paritytech/polkadot-sdk --commit-sha ${CI_COMMIT_SHA} --fail-on-error --git-service github; - codecovcli -v create-report -t ${CODECOV_TOKEN} -r paritytech/polkadot-sdk --commit-sha ${CI_COMMIT_SHA} --fail-on-error --git-service github; - fi - -# -# -# -codecov-finish: - stage: test - extends: - - .kubernetes-env - - .common-refs - - .pipeline-stopper-artifacts - needs: - - test-linux-stable-codecov - script: - - !reference [.codecov-check, script] - - codecovcli -v create-report-results -t ${CODECOV_TOKEN} -r paritytech/polkadot-sdk --commit-sha ${CI_COMMIT_SHA} --git-service github - - codecovcli -v get-report-results -t ${CODECOV_TOKEN} -r paritytech/polkadot-sdk --commit-sha ${CI_COMMIT_SHA} --git-service github - - codecovcli -v send-notifications -t ${CODECOV_TOKEN} -r paritytech/polkadot-sdk --commit-sha ${CI_COMMIT_SHA} --git-service github - -# -# -# -test-linux-stable-codecov: - stage: test - needs: - - codecov-start - extends: - - .docker-env - - .common-refs - - .pipeline-stopper-artifacts - variables: - CI_IMAGE: europe-docker.pkg.dev/parity-build/ci-images/ci-unified:bullseye-1.77.0 - RUST_TOOLCHAIN: stable - RUSTFLAGS: "-Cdebug-assertions=y -Cinstrument-coverage" - LLVM_PROFILE_FILE: "target/coverage/cargo-test-${CI_NODE_INDEX}-%p-%m.profraw" - CARGO_INCREMENTAL: 0 - FORKLIFT_BYPASS: "true" - parallel: 2 - script: - # tools - - !reference [.codecov-check, script] - - rustup component add llvm-tools-preview - - mkdir -p target/coverage/result/ - # Place real test call here - - > - time cargo nextest run -p polkadot \ - --locked \ - --release \ - --no-fail-fast \ - --partition count:${CI_NODE_INDEX}/${CI_NODE_TOTAL} - # generate and upload reports - - > - grcov \ - target/coverage/ \ - --binary-path ./target/release/ \ - -s . \ - -t lcov \ - --branch \ - -o target/coverage/result/report-${CI_NODE_INDEX}.lcov - - ls -l target/coverage/result/ - - > - if [ "$CI_COMMIT_REF_NAME" != "master" ]; then - codecovcli -v do-upload -f target/coverage/result/report-${CI_NODE_INDEX}.lcov --disable-search -t ${CODECOV_TOKEN} -r paritytech/polkadot-sdk --commit-sha ${CI_COMMIT_SHA} --fail-on-error --pr ${CI_COMMIT_REF_NAME} --git-service github; - else - codecovcli -v do-upload -f target/coverage/result/report-${CI_NODE_INDEX}.lcov --disable-search -t ${CODECOV_TOKEN} -r paritytech/polkadot-sdk --commit-sha ${CI_COMMIT_SHA} --fail-on-error --git-service github; - fi diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml deleted file mode 100644 index 5c338eb613597..0000000000000 --- a/.gitlab/pipeline/zombienet.yml +++ /dev/null @@ -1,20 +0,0 @@ -.zombienet-refs: - extends: .build-refs - variables: - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.127" - PUSHGATEWAY_URL: "http://zombienet-prometheus-pushgateway.managed-monitoring:9091/metrics/job/zombie-metrics" - DEBUG: "zombie,zombie::network-node,zombie::kube::client::logs" - ZOMBIE_PROVIDER: "k8s" - RUST_LOG: "info,zombienet_orchestrator=debug" - RUN_IN_CI: "1" - KUBERNETES_CPU_REQUEST: "512m" - KUBERNETES_MEMORY_REQUEST: "1Gi" - timeout: 60m - -include: - # polkadot tests - - .gitlab/pipeline/zombienet/polkadot.yml - # bridges tests - - .gitlab/pipeline/zombienet/bridges.yml - # parachain-template-node tests - - .gitlab/pipeline/zombienet/parachain-template.yml diff --git a/.gitlab/pipeline/zombienet/bridges.yml b/.gitlab/pipeline/zombienet/bridges.yml deleted file mode 100644 index 07711e32a9a3f..0000000000000 --- a/.gitlab/pipeline/zombienet/bridges.yml +++ /dev/null @@ -1,63 +0,0 @@ -# This file is part of .gitlab-ci.yml -# Here are all jobs that are executed during "zombienet" stage for bridges - -# common settings for all zombienet jobs -.zombienet-bridges-common: - extends: - - .kubernetes-env - - .zombienet-refs - rules: - # Docker images have different tag in merge queues - - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ - variables: - DOCKER_IMAGES_VERSION: ${CI_COMMIT_SHORT_SHA} - - !reference [ .build-refs, rules ] - before_script: - - echo "Zombienet Tests Config" - - echo "${ZOMBIENET_IMAGE}" - - echo "${GH_DIR}" - - echo "${LOCAL_DIR}" - - ls "${LOCAL_DIR}" - - export DEBUG=zombie,zombie::network-node - - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${BRIDGES_ZOMBIENET_TESTS_IMAGE}":${BRIDGES_ZOMBIENET_TESTS_IMAGE_TAG} - - echo "${ZOMBIENET_INTEGRATION_TEST_IMAGE}" - stage: zombienet - image: "${BRIDGES_ZOMBIENET_TESTS_IMAGE}:${BRIDGES_ZOMBIENET_TESTS_IMAGE_TAG}" - needs: - - job: build-push-image-bridges-zombienet-tests - artifacts: true - variables: - BRIDGES_ZOMBIENET_TESTS_IMAGE_TAG: ${DOCKER_IMAGES_VERSION} - BRIDGES_ZOMBIENET_TESTS_IMAGE: "docker.io/paritypr/bridges-zombienet-tests" - GH_DIR: "https://github.com/paritytech/polkadot-sdk/tree/${CI_COMMIT_SHA}/bridges/testing" - LOCAL_DIR: "/builds/parity/mirrors/polkadot-sdk/bridges/testing" - FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR: 1 - RUN_IN_CONTAINER: "1" - artifacts: - name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}_zombienet_bridge_tests" - when: always - expire_in: 2 days - paths: - - ./zombienet-logs - after_script: - - mkdir -p ./zombienet-logs - # copy general logs - - cp -r /tmp/bridges-tests-run-*/logs/* ./zombienet-logs/ - # copy logs of rococo nodes - - cp -r /tmp/bridges-tests-run-*/bridge_hub_rococo_local_network/*.log ./zombienet-logs/ - # copy logs of westend nodes - - cp -r /tmp/bridges-tests-run-*/bridge_hub_westend_local_network/*.log ./zombienet-logs/ - -zombienet-bridges-0001-asset-transfer-works: - extends: - - .zombienet-bridges-common - script: - - /home/nonroot/bridges-polkadot-sdk/bridges/testing/run-test.sh 0001-asset-transfer --docker - - echo "Done" - -zombienet-bridges-0002-free-headers-synced-while-idle: - extends: - - .zombienet-bridges-common - script: - - /home/nonroot/bridges-polkadot-sdk/bridges/testing/run-test.sh 0002-free-headers-synced-while-idle --docker - - echo "Done" diff --git a/Cargo.lock b/Cargo.lock index 24f88cf132469..ffa55b3073591 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -179,7 +179,7 @@ dependencies = [ "const-hex", "derive_more 1.0.0", "foldhash", - "hashbrown 0.15.2", + "hashbrown 0.15.3", "hex-literal", "indexmap 2.9.0", "itoa", @@ -215,8 +215,8 @@ dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -231,8 +231,8 @@ dependencies = [ "heck 0.5.0", "indexmap 2.9.0", "proc-macro-error2", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", "syn-solidity", "tiny-keccak", @@ -247,8 +247,8 @@ dependencies = [ "const-hex", "dunce", "heck 0.5.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", "syn-solidity", ] @@ -375,8 +375,8 @@ dependencies = [ "include_dir", "itertools 0.10.5", "proc-macro-error", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -506,7 +506,7 @@ dependencies = [ "ark-std 0.5.0", "educe", "fnv", - "hashbrown 0.15.2", + "hashbrown 0.15.3", "itertools 0.13.0", "num-bigint", "num-integer", @@ -640,7 +640,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" dependencies = [ - "quote 1.0.38", + "quote 1.0.40", "syn 1.0.109", ] @@ -650,7 +650,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" dependencies = [ - "quote 1.0.38", + "quote 1.0.40", "syn 1.0.109", ] @@ -660,7 +660,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" dependencies = [ - "quote 1.0.38", + "quote 1.0.40", "syn 2.0.98", ] @@ -672,7 +672,7 @@ checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" dependencies = [ "num-bigint", "num-traits", - "quote 1.0.38", + "quote 1.0.40", "syn 1.0.109", ] @@ -684,8 +684,8 @@ checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" dependencies = [ "num-bigint", "num-traits", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 1.0.109", ] @@ -697,8 +697,8 @@ checksum = "09be120733ee33f7693ceaa202ca41accd5653b779563608f1234f78ae07c4b3" dependencies = [ "num-bigint", "num-traits", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -740,7 +740,7 @@ dependencies = [ "ark-std 0.5.0", "educe", "fnv", - "hashbrown 0.15.2", + "hashbrown 0.15.3", ] [[package]] @@ -798,8 +798,8 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 1.0.109", ] @@ -809,8 +809,8 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -958,8 +958,8 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", "synstructure 0.13.1", ] @@ -970,8 +970,8 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", "synstructure 0.13.1", ] @@ -982,8 +982,8 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -1337,7 +1337,7 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" dependencies = [ - "quote 1.0.38", + "quote 1.0.40", "syn 1.0.109", ] @@ -1598,8 +1598,8 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -1615,8 +1615,8 @@ version = "0.1.88" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -1685,8 +1685,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 1.0.109", ] @@ -1809,8 +1809,8 @@ dependencies = [ "lazycell", "peeking_take_while", "prettyplease", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "regex", "rustc-hash 1.1.0", "shlex", @@ -2816,9 +2816,9 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "byte-slice-cast" -version = "1.2.2" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" +checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" [[package]] name = "byte-tools" @@ -2883,7 +2883,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "846501f4575cd66766a40bb7ab6d8e960adc7eb49f753c8232bd8e0e09cf6ca2" dependencies = [ - "quote 1.0.38", + "quote 1.0.40", "syn 1.0.109", ] @@ -3187,8 +3187,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -3219,8 +3219,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb844bd05be34d91eb67101329aeba9d3337094c04fd8507d821db7ebb488eaf" dependencies = [ "proc-macro-error2", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -3393,8 +3393,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d51beaa537d73d2d1ff34ee70bc095f170420ab2ec5d687ecd3ec2b0d092514b" dependencies = [ "nom", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 1.0.109", ] @@ -3460,8 +3460,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a54b9c40054eb8999c5d1d36fdc90e4e5f7ff0d1d9621706f360b3cbc8beb828" dependencies = [ "convert_case 0.4.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 1.0.109", ] @@ -3472,8 +3472,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5437e327e861081c91270becff184859f706e3e50f5301a9d4dc8eb50752c3" dependencies = [ "convert_case 0.6.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 1.0.109", ] @@ -3555,8 +3555,8 @@ version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "unicode-xid 0.2.4", ] @@ -3578,12 +3578,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" -[[package]] -name = "constcat" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f272d0c4cf831b4fa80ee529c7707f76585986e910e1fbce1d7921970bc1a241" - [[package]] name = "convert_case" version = "0.4.0" @@ -4263,6 +4257,7 @@ dependencies = [ "sp-runtime 31.0.1", "sp-state-machine 0.35.0", "sp-timestamp", + "sp-tracing 16.0.0", "sp-trie 29.0.0", "substrate-prometheus-endpoint", "tokio", @@ -4523,6 +4518,7 @@ dependencies = [ "frame-support", "frame-system", "futures", + "hashbrown 0.15.3", "hex-literal", "impl-trait-for-tuples", "log", @@ -4555,8 +4551,8 @@ name = "cumulus-pallet-parachain-system-proc-macro" version = "0.6.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -4623,6 +4619,7 @@ dependencies = [ name = "cumulus-pallet-xcmp-queue" version = "0.7.1" dependencies = [ + "approx", "bounded-collections", "bp-xcm-bridge-hub-router", "cumulus-pallet-parachain-system", @@ -5083,8 +5080,8 @@ dependencies = [ "log", "parity-scale-codec", "polkadot-primitives", - "subxt 0.38.1", "tokio", + "zombienet-sdk", ] [[package]] @@ -5098,8 +5095,6 @@ dependencies = [ "polkadot-primitives", "serde", "serde_json", - "subxt 0.38.1", - "subxt-signer 0.38.0", "tokio", "zombienet-orchestrator", "zombienet-sdk", @@ -5171,8 +5166,8 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -5210,8 +5205,8 @@ dependencies = [ "cc", "codespan-reporting", "once_cell", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "scratch", "syn 2.0.98", ] @@ -5228,8 +5223,8 @@ version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50c49547d73ba8dcfd4ad7325d64c6d5391ff4224d498fc39a6f3f49825a530d" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -5251,8 +5246,8 @@ checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "strsim", "syn 2.0.98", ] @@ -5264,7 +5259,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", - "quote 1.0.38", + "quote 1.0.40", "syn 2.0.98", ] @@ -5370,8 +5365,8 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 1.0.109", ] @@ -5381,8 +5376,8 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d65d7ce8132b7c0e54497a4d9a55a1c2a0912a0d786cf894472ba818fba45762" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -5392,8 +5387,8 @@ version = "1.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62d671cc41a825ebabc75757b62d3d168c577f9149b2d49ece1dad1f72119d25" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -5403,8 +5398,8 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -5415,8 +5410,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "rustc_version 0.4.0", "syn 1.0.109", ] @@ -5436,8 +5431,8 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", "unicode-xid 0.2.4", ] @@ -5541,8 +5536,8 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -5585,8 +5580,8 @@ dependencies = [ "common-path", "derive-syn-parse", "once_cell", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "regex", "syn 2.0.98", "termcolor", @@ -5659,8 +5654,8 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 1.0.109", ] @@ -5755,8 +5750,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" dependencies = [ "enum-ordinalize", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -5822,6 +5817,7 @@ dependencies = [ "sp-runtime 31.0.1", "staging-xcm", "xcm-emulator", + "xcm-simulator", ] [[package]] @@ -5846,8 +5842,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -5866,8 +5862,8 @@ version = "4.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -5886,8 +5882,8 @@ version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc4caf64a58d7a6d65ab00639b046ff54399a39f5f2554728895ace4b297cd79" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -5897,8 +5893,8 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -6101,8 +6097,8 @@ dependencies = [ "file-guard", "fs-err", "prettyplease", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -6184,8 +6180,8 @@ dependencies = [ "expander", "indexmap 2.9.0", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -6473,6 +6469,7 @@ dependencies = [ "cumulus-primitives-proof-size-hostfunction", "cumulus-test-runtime", "frame-benchmarking", + "frame-storage-access-test-runtime", "frame-support", "frame-system", "gethostname", @@ -6491,6 +6488,8 @@ dependencies = [ "sc-client-api", "sc-client-db", "sc-executor 0.32.0", + "sc-executor-common 0.29.0", + "sc-executor-wasmtime 0.29.0", "sc-runtime-utilities", "sc-service", "sc-sysinfo", @@ -6515,8 +6514,8 @@ dependencies = [ "sp-version 29.0.0", "sp-wasm-interface 20.0.0", "substrate-test-runtime", - "subxt 0.38.1", - "subxt-signer 0.38.0", + "subxt 0.41.0", + "subxt-signer 0.41.0", "thiserror 1.0.65", "thousands", "westend-runtime", @@ -6551,9 +6550,9 @@ dependencies = [ [[package]] name = "frame-decode" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50c554ce2394e2c04426a070b4cb133c72f6f14c86b665f4e13094addd8e8958" +checksum = "a7cb8796f93fa038f979a014234d632e9688a120e745f936e2635123c77537f7" dependencies = [ "frame-metadata 20.0.0", "parity-scale-codec", @@ -6571,8 +6570,8 @@ dependencies = [ "frame-support", "parity-scale-codec", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "scale-info", "sp-arithmetic 23.0.0", "syn 2.0.98", @@ -6742,6 +6741,19 @@ dependencies = [ "tokio-retry", ] +[[package]] +name = "frame-storage-access-test-runtime" +version = "0.1.0" +dependencies = [ + "cumulus-pallet-parachain-system", + "parity-scale-codec", + "sp-core 28.0.0", + "sp-runtime 31.0.1", + "sp-state-machine 0.35.0", + "sp-trie 29.0.0", + "substrate-wasm-builder", +] + [[package]] name = "frame-support" version = "28.0.0" @@ -6805,8 +6817,8 @@ dependencies = [ "parity-scale-codec", "pretty_assertions", "proc-macro-warning", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "regex", "scale-info", "sp-crypto-hashing 0.1.0", @@ -6822,8 +6834,8 @@ version = "10.0.0" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -6831,8 +6843,8 @@ dependencies = [ name = "frame-support-procedural-tools-derive" version = "11.0.0" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -7095,8 +7107,8 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -7523,11 +7535,12 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.2" +version = "0.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3" dependencies = [ "allocator-api2", + "equivalent", "foldhash", "serde", ] @@ -8069,8 +8082,8 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -8221,8 +8234,8 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -8241,8 +8254,8 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", ] [[package]] @@ -8269,7 +8282,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" dependencies = [ "equivalent", - "hashbrown 0.15.2", + "hashbrown 0.15.3", "serde", ] @@ -8642,8 +8655,8 @@ checksum = "c06c01ae0007548e73412c08e2285ffe5d723195bf268bce67b1b77c3bb2a14d" dependencies = [ "heck 0.5.0", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -9385,8 +9398,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "206e0aa0ebe004d778d79fb0966aa0de996c19894e2c0605ba2f8524dd4443d8" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -9794,7 +9807,7 @@ checksum = "cc33f9f0351468d26fbc53d9ce00a096c8522ecb42f19b50f34f2c422f76d21d" dependencies = [ "macro_magic_core", "macro_magic_macros", - "quote 1.0.38", + "quote 1.0.40", "syn 2.0.98", ] @@ -9807,8 +9820,8 @@ dependencies = [ "const-random", "derive-syn-parse", "macro_magic_core_macros", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -9818,8 +9831,8 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b02abfe41815b5bd98dbd4260173db2c116dda171dc0fe7838cb206333b83308" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -9830,7 +9843,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73ea28ee64b88876bf45277ed9a5817c1817df061a74f2b988971a12570e5869" dependencies = [ "macro_magic_core", - "quote 1.0.38", + "quote 1.0.40", "syn 2.0.98", ] @@ -9935,6 +9948,16 @@ dependencies = [ "hash-db", ] +[[package]] +name = "memory-db" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6da20dba965bd218a14c3b335b90d3e07c09ede190c7c19b50deb23d418a322" +dependencies = [ + "hash-db", + "hashbrown 0.15.3", +] + [[package]] name = "merkleized-metadata" version = "0.5.0" @@ -10120,8 +10143,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25ca3004c2efe9011bd4e461bd8256445052b9615405b4f7ea43fc8ca5c20898" dependencies = [ "cfg-if", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -10241,8 +10264,8 @@ checksum = "fc076939022111618a5026d3be019fd8b366e76314538ff9a1b59ffbcbf98bcd" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro-error", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 1.0.109", "synstructure 0.12.6", ] @@ -10289,8 +10312,8 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91761aed67d03ad966ef783ae962ef9bbaca728d2dd7ceb7939ec110fffad998" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 1.0.109", ] @@ -10703,8 +10726,8 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -10878,8 +10901,8 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -10935,8 +10958,8 @@ dependencies = [ "itertools 0.11.0", "petgraph", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 1.0.109", ] @@ -11690,8 +11713,8 @@ dependencies = [ name = "pallet-contracts-proc-macro" version = "18.0.0" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -11864,8 +11887,10 @@ version = "27.0.0" dependencies = [ "frame-benchmarking", "frame-election-provider-support", + "frame-remote-externalities", "frame-support", "frame-system", + "hex", "log", "pallet-balances", "parity-scale-codec", @@ -11879,6 +11904,7 @@ dependencies = [ "sp-runtime 31.0.1", "sp-tracing 16.0.0", "strum 0.26.3", + "tokio", ] [[package]] @@ -12742,7 +12768,7 @@ dependencies = [ "staging-xcm", "staging-xcm-builder", "substrate-bn", - "subxt-signer 0.38.0", + "subxt-signer 0.41.0", ] [[package]] @@ -12777,8 +12803,8 @@ dependencies = [ "static_init", "substrate-cli-test-utils", "substrate-prometheus-endpoint", - "subxt 0.38.1", - "subxt-signer 0.38.0", + "subxt 0.41.0", + "subxt-signer 0.41.0", "thiserror 1.0.65", "tokio", ] @@ -12829,8 +12855,8 @@ dependencies = [ name = "pallet-revive-proc-macro" version = "0.1.0" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -13372,8 +13398,8 @@ name = "pallet-staking-reward-curve" version = "11.0.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "sp-runtime 31.0.1", "syn 2.0.98", ] @@ -13691,6 +13717,7 @@ dependencies = [ "staging-xcm-executor", "tracing", "xcm-runtime-apis", + "xcm-simulator", ] [[package]] @@ -13911,9 +13938,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.7.4" +version = "3.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9fde3d0718baf5bc92f577d652001da0f8d54cd03a7974e118d04fc888dc23d" +checksum = "799781ae679d79a948e13d4824a40970bfa500058d245760dd857301059810fa" dependencies = [ "arrayvec 0.7.4", "bitvec", @@ -13928,13 +13955,13 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.7.4" +version = "3.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581c837bb6b9541ce7faa9377c20616e4fb7650f6b0f68bc93c827ee504fb7b3" +checksum = "34b4653168b563151153c9e4c08ebed57fb8262bebfa79711552fa983c623e7a" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -14369,8 +14396,8 @@ checksum = "68ca01446f50dbda87c1786af8770d535423fa8a53aec03b8f4e3d7eb10e0929" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -14410,8 +14437,8 @@ version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -15511,7 +15538,7 @@ dependencies = [ "substrate-frame-rpc-system", "substrate-prometheus-endpoint", "substrate-state-trie-migration-rpc", - "subxt-metadata 0.38.0", + "subxt-metadata 0.41.0", "tokio", "wait-timeout", ] @@ -16728,7 +16755,6 @@ dependencies = [ "substrate-build-script-utils", "subwasmlib", "subxt 0.38.1", - "subxt-signer 0.38.0", "tokio", "tokio-util", "zombienet-orchestrator", @@ -16750,28 +16776,28 @@ dependencies = [ [[package]] name = "polkavm" -version = "0.18.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd044ab1d3b11567ab6b98ca71259a992b4034220d5972988a0e96518e5d343d" +checksum = "cfd34e2f74206fff33482ae1718e275f11365ef8c4de7f0e69217f8845303867" dependencies = [ "libc", "log", - "polkavm-assembler 0.18.0", - "polkavm-common 0.18.0", - "polkavm-linux-raw 0.18.0", + "polkavm-assembler 0.21.0", + "polkavm-common 0.21.0", + "polkavm-linux-raw 0.21.0", ] [[package]] name = "polkavm" -version = "0.21.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfd34e2f74206fff33482ae1718e275f11365ef8c4de7f0e69217f8845303867" +checksum = "f2a01db119bb3a86572c0641ba6e7c9786fbd2ac89c25b43b688c4e353787526" dependencies = [ "libc", "log", - "polkavm-assembler 0.21.0", - "polkavm-common 0.21.0", - "polkavm-linux-raw 0.21.0", + "polkavm-assembler 0.24.0", + "polkavm-common 0.24.0", + "polkavm-linux-raw 0.24.0", ] [[package]] @@ -16785,18 +16811,18 @@ dependencies = [ [[package]] name = "polkavm-assembler" -version = "0.18.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaad38dc420bfed79e6f731471c973ce5ff5e47ab403e63cf40358fef8a6368f" +checksum = "f512bc80cb10439391a7c13a9eb2d37cf66b7305e7df0a06d662eff4f5b07625" dependencies = [ "log", ] [[package]] name = "polkavm-assembler" -version = "0.21.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f512bc80cb10439391a7c13a9eb2d37cf66b7305e7df0a06d662eff4f5b07625" +checksum = "eea6105f3f344abe0bf0151d67b3de6f5d24353f2393355ecf3f5f6e06d7fd0b" dependencies = [ "log", ] @@ -16812,23 +16838,23 @@ dependencies = [ [[package]] name = "polkavm-common" -version = "0.18.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31ff33982a807d8567645d4784b9b5d7ab87bcb494f534a57cadd9012688e102" +checksum = "5c16b809cfd398f861261c045a8745e6c78b71ea7e0d3ef6f7cc553eb27bc17e" dependencies = [ + "blake3", "log", - "polkavm-assembler 0.18.0", + "polkavm-assembler 0.21.0", ] [[package]] name = "polkavm-common" -version = "0.21.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c16b809cfd398f861261c045a8745e6c78b71ea7e0d3ef6f7cc553eb27bc17e" +checksum = "d91ed9e5af472f729fcf3b3c1cf17508ddbb3505259dd6e2ee0fb5a29e105d22" dependencies = [ - "blake3", "log", - "polkavm-assembler 0.21.0", + "polkavm-assembler 0.24.0", ] [[package]] @@ -16842,20 +16868,20 @@ dependencies = [ [[package]] name = "polkavm-derive" -version = "0.18.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2eb703f3b6404c13228402e98a5eae063fd16b8f58afe334073ec105ee4117e" +checksum = "47239245f87329541932c0d7fec750a66a75b13aa87dfe4fbfd637bab86ad387" dependencies = [ - "polkavm-derive-impl-macro 0.18.0", + "polkavm-derive-impl-macro 0.21.0", ] [[package]] name = "polkavm-derive" -version = "0.21.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47239245f87329541932c0d7fec750a66a75b13aa87dfe4fbfd637bab86ad387" +checksum = "176144f8661117ea95fa7cf868c9a62d6b143e8a2ebcb7582464c3faade8669a" dependencies = [ - "polkavm-derive-impl-macro 0.21.0", + "polkavm-derive-impl-macro 0.24.0", ] [[package]] @@ -16865,32 +16891,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c4fdfc49717fb9a196e74a5d28e0bc764eb394a2c803eb11133a31ac996c60c" dependencies = [ "polkavm-common 0.9.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] [[package]] name = "polkavm-derive-impl" -version = "0.18.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12d2840cc62a0550156b1676fed8392271ddf2fab4a00661db56231424674624" +checksum = "24fd6c6215450c3e57511df5c38a82eb4bde208de15ee15046ac33852f3c3eaa" dependencies = [ - "polkavm-common 0.18.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "polkavm-common 0.21.0", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] [[package]] name = "polkavm-derive-impl" -version = "0.21.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24fd6c6215450c3e57511df5c38a82eb4bde208de15ee15046ac33852f3c3eaa" +checksum = "c5a21844afdfcc10c92b9ef288ccb926211af27478d1730fcd55e4aec710179d" dependencies = [ - "polkavm-common 0.21.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "polkavm-common 0.24.0", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -16906,52 +16932,52 @@ dependencies = [ [[package]] name = "polkavm-derive-impl-macro" -version = "0.18.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48c16669ddc7433e34c1007d31080b80901e3e8e523cb9d4b441c3910cf9294b" +checksum = "36837f6b7edfd6f4498f8d25d81da16cf03bd6992c3e56f3d477dfc90f4fefca" dependencies = [ - "polkavm-derive-impl 0.18.0", + "polkavm-derive-impl 0.21.0", "syn 2.0.98", ] [[package]] name = "polkavm-derive-impl-macro" -version = "0.21.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36837f6b7edfd6f4498f8d25d81da16cf03bd6992c3e56f3d477dfc90f4fefca" +checksum = "ba0ef0f17ad81413ea1ca5b1b67553aedf5650c88269b673d3ba015c83bc2651" dependencies = [ - "polkavm-derive-impl 0.21.0", + "polkavm-derive-impl 0.24.0", "syn 2.0.98", ] [[package]] name = "polkavm-linker" -version = "0.18.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9bfe793b094d9ea5c99b7c43ba46e277b0f8f48f4bbfdbabf8d3ebf701a4bd3" +checksum = "23bc764986c4a63f9ab9890c3f4eb9b4c13b6ff80d79685bd48ade147234aab4" dependencies = [ "dirs", "gimli 0.31.1", "hashbrown 0.14.5", "log", "object 0.36.1", - "polkavm-common 0.18.0", + "polkavm-common 0.21.0", "regalloc2 0.9.3", "rustc-demangle", ] [[package]] name = "polkavm-linker" -version = "0.21.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23bc764986c4a63f9ab9890c3f4eb9b4c13b6ff80d79685bd48ade147234aab4" +checksum = "06c95a521a1331024ebe5823ffdfba9ea6df40b934b0804049d5171887579806" dependencies = [ "dirs", "gimli 0.31.1", "hashbrown 0.14.5", "log", "object 0.36.1", - "polkavm-common 0.21.0", + "polkavm-common 0.24.0", "regalloc2 0.9.3", "rustc-demangle", ] @@ -16964,15 +16990,15 @@ checksum = "26e85d3456948e650dff0cfc85603915847faf893ed1e66b020bb82ef4557120" [[package]] name = "polkavm-linux-raw" -version = "0.18.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23eff02c070c70f31878a3d915e88a914ecf3e153741e2fb572dde28cce20fde" +checksum = "be6cd1d48c5e7814d287a3e12a339386a5dfa2f3ac72f932335f4cf56467f1b3" [[package]] name = "polkavm-linux-raw" -version = "0.21.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be6cd1d48c5e7814d287a3e12a339386a5dfa2f3ac72f932335f4cf56467f1b3" +checksum = "4ec0b13e26ec7234dba213ca17118c70c562809bdce0eefe84f92613d5c8da26" [[package]] name = "polling" @@ -17118,7 +17144,7 @@ version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ - "proc-macro2 1.0.93", + "proc-macro2 1.0.95", "syn 2.0.98", ] @@ -17192,8 +17218,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 1.0.109", "version_check", ] @@ -17204,8 +17230,8 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "version_check", ] @@ -17215,8 +17241,8 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", ] [[package]] @@ -17226,8 +17252,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" dependencies = [ "proc-macro-error-attr2", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -17243,8 +17269,8 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b698b0b09d40e9b7c1a47b132d66a8b54bcd20583d9b6d06e4535e383b4405c" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -17259,9 +17285,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.93" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" dependencies = [ "unicode-ident", ] @@ -17324,8 +17350,8 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -17420,8 +17446,8 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools 0.10.5", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 1.0.109", ] @@ -17433,8 +17459,8 @@ checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", "itertools 0.12.1", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -17446,8 +17472,8 @@ checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" dependencies = [ "anyhow", "itertools 0.13.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -17634,11 +17660,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.38" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" dependencies = [ - "proc-macro2 1.0.93", + "proc-macro2 1.0.95", ] [[package]] @@ -17875,8 +17901,8 @@ version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -18451,8 +18477,8 @@ checksum = "d428f8247852f894ee1be110b375111b586d4fa431f6c46e64ba5a0dcccbe605" dependencies = [ "cfg-if", "glob", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "regex", "relative-path", "rustc_version 0.4.0", @@ -18988,8 +19014,8 @@ name = "sc-chain-spec-derive" version = "11.0.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -19093,6 +19119,7 @@ dependencies = [ "sp-trie 29.0.0", "substrate-prometheus-endpoint", "substrate-test-runtime-client", + "sysinfo", "tempfile", ] @@ -19521,7 +19548,7 @@ dependencies = [ name = "sc-executor-common" version = "0.29.0" dependencies = [ - "polkavm 0.18.0", + "polkavm 0.24.0", "sc-allocator 23.0.0", "sp-maybe-compressed-blob 11.0.0", "sp-wasm-interface 20.0.0", @@ -19548,7 +19575,7 @@ name = "sc-executor-polkavm" version = "0.29.0" dependencies = [ "log", - "polkavm 0.18.0", + "polkavm 0.24.0", "sc-executor-common 0.29.0", "sp-wasm-interface 20.0.0", ] @@ -20067,6 +20094,7 @@ dependencies = [ "sp-rpc", "sp-runtime 31.0.1", "sp-version 29.0.0", + "substrate-prometheus-endpoint", "substrate-test-runtime", "substrate-test-runtime-client", "substrate-test-runtime-transaction-pool", @@ -20101,7 +20129,7 @@ dependencies = [ "sp-state-machine 0.35.0", "sp-version 29.0.0", "sp-wasm-interface 20.0.0", - "subxt 0.38.1", + "subxt 0.41.0", "thiserror 1.0.65", ] @@ -20337,8 +20365,8 @@ name = "sc-tracing-proc-macro" version = "11.0.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -20475,8 +20503,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ed9401effa946b493f9f84dc03714cca98119b230497df6f3df6b84a2b03648" dependencies = [ "darling", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -20487,8 +20515,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f4b54a1211260718b92832b661025d1f1a4b6930fbadd6908e00edd265fa5f7" dependencies = [ "darling", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -20530,8 +20558,8 @@ checksum = "102fbc6236de6c53906c0b262f12c7aa69c2bdc604862c12728f5f4d370bc137" dependencies = [ "darling", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -20543,8 +20571,8 @@ checksum = "78a3993a13b4eafa89350604672c8757b7ea84c7c5947d4b3691e3169c96379b" dependencies = [ "darling", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -20569,8 +20597,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6630024bf739e2179b91fb424b28898baf819414262c5d376677dbff1fe7ebf" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -20590,8 +20618,8 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc4c70c7fea2eef1740f0081d3fe385d8bee1eef11e9272d3bec7dc8e5438e0" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "scale-info", "syn 2.0.98", "thiserror 1.0.65", @@ -20599,12 +20627,12 @@ dependencies = [ [[package]] name = "scale-typegen" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6aebea322734465f39e4ad8100e1f9708c6c6c325d92b8780015d30c44fae791" +checksum = "05c61b6b706a3eaad63b506ab50a1d2319f817ae01cf753adcc3f055f9f0fcd6" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "scale-info", "syn 2.0.98", "thiserror 2.0.12", @@ -20676,8 +20704,8 @@ version = "0.8.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0f696e21e10fa546b7ffb1c9672c6de8fbc7a81acf59524386d8639bf12737" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "serde_derive_internals", "syn 1.0.109", ] @@ -20926,9 +20954,9 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.217" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" dependencies = [ "serde_derive", ] @@ -20963,12 +20991,12 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.217" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -20978,8 +21006,8 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 1.0.109", ] @@ -22231,8 +22259,8 @@ dependencies = [ "blake2 0.10.6", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -22246,8 +22274,8 @@ dependencies = [ "blake2 0.10.6", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -22322,6 +22350,7 @@ dependencies = [ "sp-application-crypto 30.0.0", "sp-core 28.0.0", "sp-keystore 0.34.0", + "sp-tracing 16.0.0", "substrate-test-runtime-client", ] @@ -22559,6 +22588,7 @@ dependencies = [ "secrecy 0.8.0", "serde", "serde_json", + "sha2 0.10.8", "sp-crypto-hashing 0.1.0", "sp-debug-derive 14.0.0", "sp-externalities 0.25.0", @@ -22788,7 +22818,7 @@ dependencies = [ name = "sp-crypto-hashing-proc-macro" version = "0.1.0" dependencies = [ - "quote 1.0.38", + "quote 1.0.40", "sp-crypto-hashing 0.1.0", "syn 2.0.98", ] @@ -22799,7 +22829,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b85d0f1f1e44bd8617eb2a48203ee854981229e3e79e6f468c7175d5fd37489b" dependencies = [ - "quote 1.0.38", + "quote 1.0.40", "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "syn 2.0.98", ] @@ -22816,8 +22846,8 @@ dependencies = [ name = "sp-debug-derive" version = "14.0.0" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -22827,8 +22857,8 @@ version = "14.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48d09fa0a5f7299fb81ee25ae3853d26200f7a348148aed6de76be905c007dbe" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -22897,7 +22927,7 @@ dependencies = [ "libsecp256k1", "log", "parity-scale-codec", - "polkavm-derive 0.18.0", + "polkavm-derive 0.24.0", "rustversion", "secp256k1 0.28.2", "sp-core 28.0.0", @@ -23228,7 +23258,7 @@ dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec", - "polkavm-derive 0.18.0", + "polkavm-derive 0.24.0", "primitive-types 0.13.1", "rustversion", "sp-externalities 0.25.0", @@ -23291,8 +23321,8 @@ dependencies = [ "Inflector", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -23305,8 +23335,8 @@ dependencies = [ "Inflector", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -23583,7 +23613,7 @@ dependencies = [ "array-bytes 6.2.2", "criterion", "hash-db", - "memory-db", + "memory-db 0.33.0", "nohash-hasher", "parity-scale-codec", "parking_lot 0.12.3", @@ -23611,7 +23641,7 @@ dependencies = [ "ahash 0.8.11", "hash-db", "lazy_static", - "memory-db", + "memory-db 0.32.0", "nohash-hasher", "parity-scale-codec", "parking_lot 0.12.3", @@ -23635,7 +23665,7 @@ dependencies = [ "ahash 0.8.11", "hash-db", "lazy_static", - "memory-db", + "memory-db 0.32.0", "nohash-hasher", "parity-scale-codec", "parking_lot 0.12.3", @@ -23690,8 +23720,8 @@ version = "13.0.0" dependencies = [ "parity-scale-codec", "proc-macro-warning", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "sp-version 29.0.0", "syn 2.0.98", ] @@ -23703,8 +23733,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aee8f6730641a65fcf0c8f9b1e448af4b3bb083d08058b47528188bccc7b7a7" dependencies = [ "parity-scale-codec", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -23865,8 +23895,8 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cac0692bcc9de3b073e8d747391827297e075c7710ff6276d9f7a1f3d58c6657" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "sqlx-core", "sqlx-macros-core", "syn 2.0.98", @@ -23883,8 +23913,8 @@ dependencies = [ "heck 0.5.0", "hex", "once_cell", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "serde", "serde_json", "sha2 0.10.8", @@ -24009,8 +24039,8 @@ checksum = "5e6915280e2d0db8911e5032a5c275571af6bdded2916abd691a659be25d3439" dependencies = [ "Inflector", "num-format", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "serde", "serde_json", "unicode-xid 0.2.4", @@ -24034,8 +24064,8 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f07d54c4d01a1713eb363b55ba51595da15f6f1211435b71466460da022aa140" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 1.0.109", ] @@ -24089,7 +24119,7 @@ dependencies = [ "sp-keyring", "staging-node-inspect", "substrate-cli-test-utils", - "subxt-signer 0.38.0", + "subxt-signer 0.41.0", "tempfile", "tokio", "tokio-util", @@ -24180,6 +24210,7 @@ dependencies = [ "staging-xcm", "staging-xcm-executor", "tracing", + "xcm-simulator", ] [[package]] @@ -24230,8 +24261,8 @@ checksum = "70a2595fc3aa78f2d0e45dd425b22282dd863273761cc77780914b2cf3003acf" dependencies = [ "cfg_aliases 0.1.1", "memchr", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 1.0.109", ] @@ -24288,8 +24319,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "rustversion", "syn 1.0.109", ] @@ -24301,8 +24332,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "rustversion", "syn 2.0.98", ] @@ -24602,6 +24633,7 @@ dependencies = [ "sp-consensus-grandpa", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", + "sp-debug-derive 14.0.0", "sp-externalities 0.25.0", "sp-genesis-builder", "sp-inherents", @@ -24706,7 +24738,7 @@ dependencies = [ "merkleized-metadata", "parity-scale-codec", "parity-wasm", - "polkavm-linker 0.18.0", + "polkavm-linker 0.24.0", "sc-executor 0.32.0", "shlex", "sp-core 28.0.0", @@ -24774,7 +24806,6 @@ dependencies = [ "async-trait", "derive-where", "either", - "finito", "frame-metadata 17.0.0", "futures", "hex", @@ -24848,8 +24879,8 @@ checksum = "3cfcfb7d9589f3df0ac87c4988661cf3fb370761fcb19f2fd33104cc59daf22a" dependencies = [ "heck 0.5.0", "parity-scale-codec", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "scale-info", "scale-typegen 0.9.0", "subxt-metadata 0.38.0", @@ -24865,10 +24896,10 @@ checksum = "324c52c09919fec8c22a4b572a466878322e99fe14a9e3d50d6c3700a226ec25" dependencies = [ "heck 0.5.0", "parity-scale-codec", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "scale-info", - "scale-typegen 0.11.0", + "scale-typegen 0.11.1", "subxt-metadata 0.41.0", "syn 2.0.98", "thiserror 2.0.12", @@ -24912,7 +24943,7 @@ dependencies = [ "base58", "blake2 0.10.6", "derive-where", - "frame-decode 0.7.0", + "frame-decode 0.7.1", "frame-metadata 20.0.0", "hashbrown 0.14.5", "hex", @@ -24976,7 +25007,7 @@ dependencies = [ "darling", "parity-scale-codec", "proc-macro-error2", - "quote 1.0.38", + "quote 1.0.40", "scale-typegen 0.9.0", "subxt-codegen 0.38.0", "subxt-utils-fetchmetadata 0.38.0", @@ -24992,8 +25023,8 @@ dependencies = [ "darling", "parity-scale-codec", "proc-macro-error2", - "quote 1.0.38", - "scale-typegen 0.11.0", + "quote 1.0.40", + "scale-typegen 0.11.1", "subxt-codegen 0.41.0", "subxt-utils-fetchmetadata 0.41.0", "syn 2.0.98", @@ -25019,7 +25050,7 @@ version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fff4591673600c4388e21305788282414d26c791b4dee21b7cb0b19c10076f98" dependencies = [ - "frame-decode 0.7.0", + "frame-decode 0.7.1", "frame-metadata 20.0.0", "hashbrown 0.14.5", "parity-scale-codec", @@ -25035,6 +25066,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ba7494d250d65dc3439365ac5e8e0fbb9c3992e6e84b7aa01d69e082249b8b8" dependencies = [ "derive-where", + "finito", "frame-metadata 20.0.0", "futures", "hex", @@ -25047,6 +25079,7 @@ dependencies = [ "subxt-core 0.41.0", "subxt-lightclient 0.41.0", "thiserror 2.0.12", + "tokio", "tokio-util", "tracing", "url", @@ -25059,13 +25092,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e7a336d6a1f86f126100a4a717be58352de4c8214300c4f7807f974494efdb9" dependencies = [ "base64 0.22.1", - "bip32", "bip39", "cfg-if", "crypto_secretbox", "hex", "hmac 0.12.1", - "keccak-hash", "parity-scale-codec", "pbkdf2", "polkadot-sdk 0.7.0", @@ -25241,8 +25272,8 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "unicode-ident", ] @@ -25252,8 +25283,8 @@ version = "2.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "unicode-ident", ] @@ -25264,8 +25295,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "219389c1ebe89f8333df8bdfb871f6631c552ff399c23cac02480b6088aad8f0" dependencies = [ "paste", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -25284,8 +25315,8 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 1.0.109", "unicode-xid 0.2.4", ] @@ -25296,8 +25327,8 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -25461,8 +25492,8 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5999e24eaa32083191ba4e425deb75cdf25efefabe5aaccb7446dd0d4122a3f5" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -25609,8 +25640,8 @@ version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10ac1c5050e43014d16b2f94d0d2ce79e65ffdd8b38d8048f9c8f6a8a6da62ac" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 1.0.109", ] @@ -25620,8 +25651,8 @@ version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -25631,8 +25662,8 @@ version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -25803,8 +25834,8 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -26067,8 +26098,8 @@ version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -26109,8 +26140,8 @@ dependencies = [ "assert_matches", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -26148,14 +26179,14 @@ dependencies = [ [[package]] name = "trie-bench" -version = "0.40.0" +version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaafa99707db4419f193b97e825aca722c6910a9dff31d8f41df304b6091ef17" +checksum = "0445f19cd0e58d9aef1eef590739fc10c4291611722c98f8995b70ce8529f198" dependencies = [ "criterion", "hash-db", "keccak-hasher", - "memory-db", + "memory-db 0.33.0", "parity-scale-codec", "trie-db 0.30.0", "trie-root", @@ -26604,9 +26635,9 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "w3f-bls" -version = "0.1.3" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7335e4c132c28cc43caef6adb339789e599e39adbe78da0c4d547fad48cbc331" +checksum = "e6bfb937b3d12077654a9e43e32a4e9c20177dd9fea0f3aba673e7840bb54f32" dependencies = [ "ark-bls12-377", "ark-bls12-381 0.4.0", @@ -26615,14 +26646,12 @@ dependencies = [ "ark-serialize 0.4.2", "ark-serialize-derive 0.4.2", "arrayref", - "constcat", "digest 0.10.7", "rand 0.8.5", "rand_chacha 0.3.1", "rand_core 0.6.4", "sha2 0.10.8", "sha3 0.10.8", - "thiserror 1.0.65", "zeroize", ] @@ -26749,8 +26778,8 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", "wasm-bindgen-shared", ] @@ -26773,7 +26802,7 @@ version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" dependencies = [ - "quote 1.0.38", + "quote 1.0.40", "wasm-bindgen-macro-support", ] @@ -26783,8 +26812,8 @@ version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", "wasm-bindgen-backend", "wasm-bindgen-shared", @@ -27928,6 +27957,7 @@ dependencies = [ "sp-tracing 16.0.0", "staging-xcm", "staging-xcm-executor", + "xcm-simulator", ] [[package]] @@ -27960,8 +27990,8 @@ version = "7.0.0" dependencies = [ "Inflector", "frame-support", - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "staging-xcm", "syn 2.0.98", "trybuild", @@ -27987,6 +28017,7 @@ dependencies = [ "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", + "xcm-simulator", ] [[package]] @@ -28206,8 +28237,8 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", "synstructure 0.13.1", ] @@ -28236,8 +28267,8 @@ version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -28247,8 +28278,8 @@ version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eea57037071898bf96a6da35fd626f4f27e9cee3ead2a6c703cf09d472b2e700" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -28267,8 +28298,8 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", "synstructure 0.13.1", ] @@ -28288,8 +28319,8 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] @@ -28310,8 +28341,8 @@ version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ - "proc-macro2 1.0.93", - "quote 1.0.38", + "proc-macro2 1.0.95", + "quote 1.0.40", "syn 2.0.98", ] diff --git a/Cargo.toml b/Cargo.toml index 3c25cdda2bd27..ec43dd12065f7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -554,6 +554,7 @@ members = [ "substrate/utils/frame/rpc/state-trie-migration-rpc", "substrate/utils/frame/rpc/support", "substrate/utils/frame/rpc/system", + "substrate/utils/frame/storage-access-test-runtime", "substrate/utils/prometheus", "substrate/utils/substrate-bip39", "substrate/utils/wasm-builder", @@ -586,6 +587,7 @@ unexpected_cfgs = { level = "warn", check-cfg = [ 'cfg(build_profile, values("debug", "release"))', 'cfg(enable_alloc_error_handler)', 'cfg(fuzzing)', + 'cfg(ignore_flaky_test)', 'cfg(substrate_runtime)', ] } @@ -707,7 +709,7 @@ clap = { version = "4.5.13" } clap_complete = { version = "4.5.13" } cmd_lib = { version = "1.9.5" } coarsetime = { version = "0.1.22" } -codec = { version = "3.7.4", default-features = false, package = "parity-scale-codec" } +codec = { version = "3.7.5", default-features = false, package = "parity-scale-codec" } collectives-westend-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend" } collectives-westend-runtime = { path = "cumulus/parachains/runtimes/collectives/collectives-westend" } color-eyre = { version = "0.6.3", default-features = false } @@ -806,6 +808,7 @@ frame-election-provider-support = { path = "substrate/frame/election-provider-su frame-executive = { path = "substrate/frame/executive", default-features = false } frame-metadata = { version = "23.0.0", default-features = false } frame-metadata-hash-extension = { path = "substrate/frame/metadata-hash-extension", default-features = false } +frame-storage-access-test-runtime = { path = "substrate/utils/frame/storage-access-test-runtime", default-features = false } frame-support = { path = "substrate/frame/support", default-features = false } frame-support-procedural = { path = "substrate/frame/support/procedural", default-features = false } frame-support-procedural-tools = { path = "substrate/frame/support/procedural/tools", default-features = false } @@ -831,6 +834,7 @@ gum-proc-macro = { path = "polkadot/node/gum/proc-macro", default-features = fal handlebars = { version = "5.1.0" } hash-db = { version = "0.16.0", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } +hashbrown = "0.15.3" hex = { version = "0.4.3", default-features = false } hex-literal = { version = "0.4.1", default-features = false } hkdf = { version = "0.12.0" } @@ -886,7 +890,7 @@ log = { version = "0.4.22", default-features = false } macro_magic = { version = "0.5.1" } maplit = { version = "1.0.2" } memmap2 = { version = "0.9.3" } -memory-db = { version = "0.32.0", default-features = false } +memory-db = { version = "0.33.0", default-features = false } merkleized-metadata = { version = "0.5.0" } merlin = { version = "3.0", default-features = false } messages-relay = { path = "bridges/relays/messages" } @@ -1136,9 +1140,9 @@ polkadot-subsystem-bench = { path = "polkadot/node/subsystem-bench" } polkadot-test-client = { path = "polkadot/node/test/client" } polkadot-test-runtime = { path = "polkadot/runtime/test-runtime" } polkadot-test-service = { path = "polkadot/node/test/service" } -polkavm = { version = "0.18.0", default-features = false } -polkavm-derive = "0.18.0" -polkavm-linker = "0.18.0" +polkavm = { version = "0.24.0", default-features = false } +polkavm-derive = "0.24.0" +polkavm-linker = "0.24.0" portpicker = { version = "0.1.1" } pretty_assertions = { version = "1.3.0" } primitive-types = { version = "0.13.1", default-features = false, features = ["num-traits"] } @@ -1376,9 +1380,9 @@ substrate-test-runtime-client = { path = "substrate/test-utils/runtime/client" } substrate-test-runtime-transaction-pool = { path = "substrate/test-utils/runtime/transaction-pool" } substrate-test-utils = { path = "substrate/test-utils" } substrate-wasm-builder = { path = "substrate/utils/wasm-builder", default-features = false } -subxt = { version = "0.38.1", default-features = false } -subxt-metadata = { version = "0.38.0", default-features = false } -subxt-signer = { version = "0.38" } +subxt = { version = "0.41", default-features = false } +subxt-metadata = { version = "0.41", default-features = false } +subxt-signer = { version = "0.41" } syn = { version = "2.0.87" } sysinfo = { version = "0.30" } tar = { version = "0.4" } @@ -1413,7 +1417,7 @@ tracing-futures = { version = "0.2.4" } tracing-log = { version = "0.2.0" } tracing-subscriber = { version = "0.3.18" } tracking-allocator = { path = "polkadot/node/tracking-allocator", default-features = false, package = "staging-tracking-allocator" } -trie-bench = { version = "0.40.0" } +trie-bench = { version = "0.41.0" } trie-db = { version = "0.30.0", default-features = false } trie-root = { version = "0.18.0", default-features = false } trie-standardmap = { version = "0.16.0" } @@ -1425,7 +1429,7 @@ txtesttool = { version = "0.5.0", package = "substrate-txtesttool" } unsigned-varint = { version = "0.7.2" } url = { version = "2.5.4" } void = { version = "1.0.2" } -w3f-bls = { version = "0.1.3", default-features = false } +w3f-bls = { version = "0.1.9", default-features = false } wait-timeout = { version = "0.2" } walkdir = { version = "2.5.0" } wasm-instrument = { version = "0.4", default-features = false } diff --git a/bridges/modules/xcm-bridge-hub/src/dispatcher.rs b/bridges/modules/xcm-bridge-hub/src/dispatcher.rs index 1a6be1f18e6b3..eed82ae9e616e 100644 --- a/bridges/modules/xcm-bridge-hub/src/dispatcher.rs +++ b/bridges/modules/xcm-bridge-hub/src/dispatcher.rs @@ -68,7 +68,7 @@ where fn is_active(lane: Self::LaneId) -> bool { Pallet::::bridge_by_lane_id(&lane) - .and_then(|(_, bridge)| bridge.bridge_origin_relative_location.try_as().cloned().ok()) + .and_then(|(_, bridge)| (*bridge.bridge_origin_relative_location).try_into().ok()) .map(|recipient: Location| !T::LocalXcmChannelManager::is_congested(&recipient)) .unwrap_or(false) } diff --git a/bridges/modules/xcm-bridge-hub/src/exporter.rs b/bridges/modules/xcm-bridge-hub/src/exporter.rs index 6e59f0f8d2625..6fd56f40b7f71 100644 --- a/bridges/modules/xcm-bridge-hub/src/exporter.rs +++ b/bridges/modules/xcm-bridge-hub/src/exporter.rs @@ -228,8 +228,9 @@ impl, I: 'static> Pallet { } // else - suspend the bridge - let bridge_origin_relative_location = match bridge.bridge_origin_relative_location.try_as() - { + let result_bridge_origin_relative_location = + (*bridge.bridge_origin_relative_location).clone().try_into(); + let bridge_origin_relative_location = match &result_bridge_origin_relative_location { Ok(bridge_origin_relative_location) => bridge_origin_relative_location, Err(_) => { log::debug!( diff --git a/bridges/modules/xcm-bridge-hub/src/lib.rs b/bridges/modules/xcm-bridge-hub/src/lib.rs index 0262cc426e23a..e45d4a05bae7e 100644 --- a/bridges/modules/xcm-bridge-hub/src/lib.rs +++ b/bridges/modules/xcm-bridge-hub/src/lib.rs @@ -665,15 +665,11 @@ pub mod pallet { ); // check that `locations` are convertible to the `latest` XCM. - let bridge_origin_relative_location_as_latest: &Location = - bridge.bridge_origin_relative_location.try_as().map_err(|_| { - "`bridge.bridge_origin_relative_location` cannot be converted to the `latest` XCM, needs migration!" - })?; - let bridge_origin_universal_location_as_latest: &InteriorLocation = bridge.bridge_origin_universal_location - .try_as() + let bridge_origin_relative_location_as_latest: &Location = &(*bridge.bridge_origin_relative_location).try_into() + .map_err(|_| "`bridge.bridge_origin_relative_location` cannot be converted to the `latest` XCM, needs migration!")?; + let bridge_origin_universal_location_as_latest: &InteriorLocation = &(*bridge.bridge_origin_universal_location).try_into() .map_err(|_| "`bridge.bridge_origin_universal_location` cannot be converted to the `latest` XCM, needs migration!")?; - let bridge_destination_universal_location_as_latest: &InteriorLocation = bridge.bridge_destination_universal_location - .try_as() + let bridge_destination_universal_location_as_latest: &InteriorLocation = &(*bridge.bridge_destination_universal_location).try_into() .map_err(|_| "`bridge.bridge_destination_universal_location` cannot be converted to the `latest` XCM, needs migration!")?; // check `BridgeId` does not change @@ -1649,6 +1645,38 @@ mod tests { ); cleanup(bridge_id, vec![lane_id, lane_id_mismatch]); + // ok state with old XCM version + test_bridge_state( + bridge_id, + Bridge { + bridge_origin_relative_location: Box::new( + VersionedLocation::from(bridge_origin_relative_location.clone()) + .into_version(XCM_VERSION - 1) + .unwrap(), + ), + bridge_origin_universal_location: Box::new( + VersionedInteriorLocation::from(bridge_origin_universal_location.clone()) + .into_version(XCM_VERSION - 1) + .unwrap(), + ), + bridge_destination_universal_location: Box::new( + VersionedInteriorLocation::from( + bridge_destination_universal_location.clone(), + ) + .into_version(XCM_VERSION - 1) + .unwrap(), + ), + state: BridgeState::Opened, + bridge_owner_account: bridge_owner_account.clone(), + deposit: Zero::zero(), + lane_id, + }, + (lane_id, bridge_id), + (lane_id, lane_id), + None, + ); + cleanup(bridge_id, vec![lane_id]); + // missing bridge for inbound lane let lanes_manager = LanesManagerOf::::new(); assert!(lanes_manager.create_inbound_lane(lane_id).is_ok()); diff --git a/bridges/snowbridge/pallets/inbound-queue-v2/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue-v2/src/mock.rs index 3650f5ff2cbb7..b1ea349b4a081 100644 --- a/bridges/snowbridge/pallets/inbound-queue-v2/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue-v2/src/mock.rs @@ -12,7 +12,7 @@ use snowbridge_core::TokenId; use snowbridge_inbound_queue_primitives::{v2::MessageToXcm, Log, Proof, VerificationError}; use sp_core::H160; use sp_runtime::{ - traits::{IdentityLookup, MaybeEquivalence}, + traits::{IdentityLookup, MaybeConvert}, BuildStorage, }; use sp_std::{convert::From, default::Default, marker::PhantomData}; @@ -75,13 +75,10 @@ impl BenchmarkHelper for Test { } pub struct MockTokenIdConvert; -impl MaybeEquivalence for MockTokenIdConvert { - fn convert(_id: &TokenId) -> Option { +impl MaybeConvert for MockTokenIdConvert { + fn maybe_convert(_id: TokenId) -> Option { Some(Location::parent()) } - fn convert_back(_loc: &Location) -> Option { - None - } } pub struct MockAccountLocationConverter(PhantomData); diff --git a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs index 1cad021f8eb7a..4a81e4243c12d 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/mock.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/mock.rs @@ -13,7 +13,7 @@ use snowbridge_core::{ use snowbridge_inbound_queue_primitives::{v1::MessageToXcm, Log, Proof, VerificationError}; use sp_core::{H160, H256}; use sp_runtime::{ - traits::{IdentifyAccount, IdentityLookup, MaybeEquivalence, Verify}, + traits::{IdentifyAccount, IdentityLookup, MaybeConvert, Verify}, BuildStorage, FixedU128, MultiSignature, }; use sp_std::{convert::From, default::Default}; @@ -214,13 +214,10 @@ impl TransactAsset for SuccessfulTransactor { } pub struct MockTokenIdConvert; -impl MaybeEquivalence for MockTokenIdConvert { - fn convert(_id: &TokenId) -> Option { +impl MaybeConvert for MockTokenIdConvert { + fn maybe_convert(_id: TokenId) -> Option { Some(Location::parent()) } - fn convert_back(_loc: &Location) -> Option { - None - } } impl inbound_queue::Config for Test { diff --git a/bridges/snowbridge/pallets/system-v2/src/lib.rs b/bridges/snowbridge/pallets/system-v2/src/lib.rs index cf600c6c5ef4c..100727cae752c 100644 --- a/bridges/snowbridge/pallets/system-v2/src/lib.rs +++ b/bridges/snowbridge/pallets/system-v2/src/lib.rs @@ -43,10 +43,10 @@ use snowbridge_outbound_queue_primitives::{ v2::{Command, Initializer, Message, SendMessage}, OperatingMode, SendError, }; -use snowbridge_pallet_system::{ForeignToNativeId, NativeToForeignId}; +use snowbridge_pallet_system::ForeignToNativeId; use sp_core::{H160, H256}; use sp_io::hashing::blake2_256; -use sp_runtime::traits::MaybeEquivalence; +use sp_runtime::traits::MaybeConvert; use sp_std::prelude::*; use xcm::prelude::*; use xcm_executor::traits::ConvertLocation; @@ -226,7 +226,6 @@ pub mod pallet { .ok_or(Error::::LocationConversionFailed)?; if !ForeignToNativeId::::contains_key(token_id) { - NativeToForeignId::::insert(location.clone(), token_id); ForeignToNativeId::::insert(token_id, location.clone()); } @@ -313,12 +312,9 @@ pub mod pallet { } } - impl MaybeEquivalence for Pallet { - fn convert(foreign_id: &TokenId) -> Option { - ForeignToNativeId::::get(foreign_id) - } - fn convert_back(location: &Location) -> Option { - NativeToForeignId::::get(location) + impl MaybeConvert for Pallet { + fn maybe_convert(foreign_id: TokenId) -> Option { + snowbridge_pallet_system::Pallet::::maybe_convert(foreign_id) } } } diff --git a/bridges/snowbridge/pallets/system-v2/src/tests.rs b/bridges/snowbridge/pallets/system-v2/src/tests.rs index 569bc614bcf71..6edf9e2243708 100644 --- a/bridges/snowbridge/pallets/system-v2/src/tests.rs +++ b/bridges/snowbridge/pallets/system-v2/src/tests.rs @@ -136,10 +136,6 @@ fn register_all_tokens_succeeds() { let foreign_token_id = EthereumSystemV2::location_to_message_origin(tc.native.clone()).unwrap(); - assert_eq!( - NativeToForeignId::::get(reanchored_location.clone()), - Some(foreign_token_id) - ); assert_eq!( ForeignToNativeId::::get(foreign_token_id), Some(reanchored_location.clone()) diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index 7d043d80eb3e5..d277186bd5b9d 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -52,7 +52,7 @@ use snowbridge_outbound_queue_primitives::{ }; use sp_core::{RuntimeDebug, H160, H256}; use sp_io::hashing::blake2_256; -use sp_runtime::{traits::MaybeEquivalence, DispatchError, SaturatedConversion}; +use sp_runtime::{traits::MaybeConvert, DispatchError, SaturatedConversion}; use sp_std::prelude::*; use xcm::prelude::*; use xcm_executor::traits::ConvertLocation; @@ -231,12 +231,7 @@ pub mod pallet { /// Lookup table for foreign token ID to native location relative to ethereum #[pallet::storage] pub type ForeignToNativeId = - StorageMap<_, Blake2_128Concat, TokenId, xcm::v5::Location, OptionQuery>; - - /// Lookup table for native location relative to ethereum to foreign token ID - #[pallet::storage] - pub type NativeToForeignId = - StorageMap<_, Blake2_128Concat, xcm::v5::Location, TokenId, OptionQuery>; + StorageMap<_, Blake2_128Concat, TokenId, Location, OptionQuery>; #[pallet::genesis_config] #[derive(frame_support::DefaultNoBound)] @@ -494,7 +489,6 @@ pub mod pallet { .ok_or(Error::::LocationConversionFailed)?; if !ForeignToNativeId::::contains_key(token_id) { - NativeToForeignId::::insert(location.clone(), token_id); ForeignToNativeId::::insert(token_id, location.clone()); } @@ -535,12 +529,9 @@ pub mod pallet { } } - impl MaybeEquivalence for Pallet { - fn convert(foreign_id: &TokenId) -> Option { + impl MaybeConvert for Pallet { + fn maybe_convert(foreign_id: TokenId) -> Option { ForeignToNativeId::::get(foreign_id) } - fn convert_back(location: &Location) -> Option { - NativeToForeignId::::get(location) - } } } diff --git a/bridges/snowbridge/pallets/system/src/tests.rs b/bridges/snowbridge/pallets/system/src/tests.rs index 93a1fba38d584..f41f0533fa60a 100644 --- a/bridges/snowbridge/pallets/system/src/tests.rs +++ b/bridges/snowbridge/pallets/system/src/tests.rs @@ -269,7 +269,6 @@ fn register_all_tokens_succeeds() { Default::default() )); - assert_eq!(NativeToForeignId::::get(tc.reanchored.clone()), Some(tc.foreign)); assert_eq!(ForeignToNativeId::::get(tc.foreign), Some(tc.reanchored.clone())); System::assert_last_event(RuntimeEvent::EthereumSystem(Event::::RegisterToken { @@ -301,3 +300,71 @@ fn register_ethereum_native_token_fails() { ); }); } + +#[test] +fn check_pna_token_id_compatibility() { + let test_cases = vec![ + // DOT + RegisterTokenTestCase { + native: Location::parent(), + reanchored: Location::new(1, GlobalConsensus(Polkadot)), + foreign: hex!("4e241583d94b5d48a27a22064cd49b2ed6f5231d2d950e432f9b7c2e0ade52b2") + .into(), + }, + // GLMR (Some Polkadot parachain currency) + RegisterTokenTestCase { + native: Location::new(1, [Parachain(2004)]), + reanchored: Location::new(1, [GlobalConsensus(Polkadot), Parachain(2004)]), + foreign: hex!("34c08fc90409b6924f0e8eabb7c2aaa0c749e23e31adad9f6d217b577737fafb") + .into(), + }, + // USDT + RegisterTokenTestCase { + native: Location::new(1, [Parachain(1000), PalletInstance(50), GeneralIndex(1984)]), + reanchored: Location::new( + 1, + [ + GlobalConsensus(Polkadot), + Parachain(1000), + PalletInstance(50), + GeneralIndex(1984), + ], + ), + foreign: hex!("14b0579be12d7d7f9971f1d4b41f0e88384b9b74799b0150d4aa6cd01afb4444") + .into(), + }, + // KSM + RegisterTokenTestCase { + native: Location::new(2, [GlobalConsensus(Kusama)]), + reanchored: Location::new(1, [GlobalConsensus(Kusama)]), + foreign: hex!("03b6054d0c576dd8391e34e1609cf398f68050c23009d19ce93c000922bcd852") + .into(), + }, + // KAR (Some Kusama parachain currency) + RegisterTokenTestCase { + native: Location::new(2, [GlobalConsensus(Kusama), Parachain(2000)]), + reanchored: Location::new(1, [GlobalConsensus(Kusama), Parachain(2000)]), + foreign: hex!("d3e39ad6ea4cee68c9741181e94098823b2ea34a467577d0875c036f0fce5be0") + .into(), + }, + ]; + for tc in test_cases.iter() { + new_test_ext(true).execute_with(|| { + let origin = RuntimeOrigin::root(); + let versioned_location: VersionedLocation = tc.native.clone().into(); + + assert_ok!(EthereumSystem::register_token( + origin, + Box::new(versioned_location), + Default::default() + )); + + assert_eq!(ForeignToNativeId::::get(tc.foreign), Some(tc.reanchored.clone())); + + System::assert_last_event(RuntimeEvent::EthereumSystem(Event::::RegisterToken { + location: tc.reanchored.clone().into(), + foreign_token_id: tc.foreign, + })); + }); + } +} diff --git a/bridges/snowbridge/primitives/inbound-queue/src/v1.rs b/bridges/snowbridge/primitives/inbound-queue/src/v1.rs index cc0edda11913b..6eba682f72f74 100644 --- a/bridges/snowbridge/primitives/inbound-queue/src/v1.rs +++ b/bridges/snowbridge/primitives/inbound-queue/src/v1.rs @@ -9,7 +9,7 @@ use frame_support::{traits::tokens::Balance as BalanceT, PalletError}; use scale_info::TypeInfo; use snowbridge_core::TokenId; use sp_core::{Get, RuntimeDebug, H160, H256}; -use sp_runtime::{traits::MaybeEquivalence, MultiAddress}; +use sp_runtime::{traits::MaybeConvert, MultiAddress}; use sp_std::prelude::*; use xcm::prelude::{Junction::AccountKey20, *}; @@ -104,7 +104,7 @@ pub struct MessageToXcm< CreateAssetCall: Get, CreateAssetDeposit: Get, Balance: BalanceT, - ConvertAssetId: MaybeEquivalence, + ConvertAssetId: MaybeConvert, EthereumUniversalLocation: Get, GlobalAssetHubLocation: Get, { @@ -171,7 +171,7 @@ where InboundQueuePalletInstance: Get, Balance: BalanceT + From, AccountId: Into<[u8; 32]>, - ConvertAssetId: MaybeEquivalence, + ConvertAssetId: MaybeConvert, EthereumUniversalLocation: Get, GlobalAssetHubLocation: Get, { @@ -230,7 +230,7 @@ where InboundQueuePalletInstance: Get, Balance: BalanceT + From, AccountId: Into<[u8; 32]>, - ConvertAssetId: MaybeEquivalence, + ConvertAssetId: MaybeConvert, EthereumUniversalLocation: Get, GlobalAssetHubLocation: Get, { @@ -426,7 +426,7 @@ where let total_fee_asset: Asset = (Location::parent(), asset_hub_fee).into(); let asset_loc = - ConvertAssetId::convert(&token_id).ok_or(ConvertMessageError::InvalidToken)?; + ConvertAssetId::maybe_convert(token_id).ok_or(ConvertMessageError::InvalidToken)?; let mut reanchored_asset_loc = asset_loc.clone(); reanchored_asset_loc diff --git a/bridges/snowbridge/primitives/inbound-queue/src/v2/converter.rs b/bridges/snowbridge/primitives/inbound-queue/src/v2/converter.rs index 6b0e9b6efb2f2..43c086e2f7448 100644 --- a/bridges/snowbridge/primitives/inbound-queue/src/v2/converter.rs +++ b/bridges/snowbridge/primitives/inbound-queue/src/v2/converter.rs @@ -10,7 +10,7 @@ use frame_support::ensure; use snowbridge_core::TokenId; use sp_core::{Get, RuntimeDebug, H160}; use sp_io::hashing::blake2_256; -use sp_runtime::{traits::MaybeEquivalence, MultiAddress}; +use sp_runtime::{traits::MaybeConvert, MultiAddress}; use sp_std::prelude::*; use xcm::{ prelude::{Junction::*, *}, @@ -103,7 +103,7 @@ where CreateAssetDeposit: Get, EthereumNetwork: Get, InboundQueueLocation: Get, - ConvertAssetId: MaybeEquivalence, + ConvertAssetId: MaybeConvert, GatewayProxyAddress: Get, EthereumUniversalLocation: Get, AssetHubFromEthereum: Get, @@ -163,7 +163,7 @@ where assets.push(AssetTransfer::ReserveDeposit(asset)); }, EthereumAsset::ForeignTokenERC20 { token_id, value } => { - let asset_loc = ConvertAssetId::convert(&token_id) + let asset_loc = ConvertAssetId::maybe_convert(*token_id) .ok_or(ConvertMessageError::InvalidAsset)?; let reanchored_asset_loc = asset_loc .reanchored(&AssetHubFromEthereum::get(), &EthereumUniversalLocation::get()) @@ -329,7 +329,7 @@ where CreateAssetDeposit: Get, EthereumNetwork: Get, InboundQueueLocation: Get, - ConvertAssetId: MaybeEquivalence, + ConvertAssetId: MaybeConvert, GatewayProxyAddress: Get, EthereumUniversalLocation: Get, AssetHubFromEthereum: Get, @@ -402,7 +402,6 @@ mod tests { add_location_override, reanchor_to_ethereum, LocationIdConvert, }; use sp_core::{H160, H256}; - use sp_runtime::traits::MaybeEquivalence; const GATEWAY_ADDRESS: [u8; 20] = hex!["eda338e4dc46038493b885327842fd3e301cab39"]; parameter_types! { @@ -420,11 +419,8 @@ mod tests { } pub struct MockFailedTokenConvert; - impl MaybeEquivalence for MockFailedTokenConvert { - fn convert(_id: &TokenId) -> Option { - None - } - fn convert_back(_loc: &Location) -> Option { + impl MaybeConvert for MockFailedTokenConvert { + fn maybe_convert(_id: TokenId) -> Option { None } } diff --git a/bridges/snowbridge/primitives/outbound-queue/src/v1/converter/mod.rs b/bridges/snowbridge/primitives/outbound-queue/src/v1/converter/mod.rs index 5a9631acfb5d9..7f6275fa331e7 100644 --- a/bridges/snowbridge/primitives/outbound-queue/src/v1/converter/mod.rs +++ b/bridges/snowbridge/primitives/outbound-queue/src/v1/converter/mod.rs @@ -13,7 +13,7 @@ use super::message::{Command, Message, SendMessage}; use frame_support::{ensure, traits::Get}; use snowbridge_core::{AgentId, ChannelId, ParaId, TokenId, TokenIdOf}; use sp_core::{H160, H256}; -use sp_runtime::traits::MaybeEquivalence; +use sp_runtime::traits::MaybeConvert; use sp_std::{iter::Peekable, marker::PhantomData, prelude::*}; use xcm::prelude::*; use xcm_executor::traits::{ConvertLocation, ExportXcm}; @@ -48,7 +48,7 @@ where EthereumNetwork: Get, OutboundQueue: SendMessage, AgentHashedDescription: ConvertLocation, - ConvertAssetId: MaybeEquivalence, + ConvertAssetId: MaybeConvert, { type Ticket = (Vec, XcmHash); @@ -194,7 +194,7 @@ struct XcmConverter<'a, ConvertAssetId, Call> { } impl<'a, ConvertAssetId, Call> XcmConverter<'a, ConvertAssetId, Call> where - ConvertAssetId: MaybeEquivalence, + ConvertAssetId: MaybeConvert, { fn new(message: &'a Xcm, ethereum_network: NetworkId, agent_id: AgentId) -> Self { Self { @@ -413,9 +413,7 @@ where let token_id = TokenIdOf::convert_location(&asset_id).ok_or(InvalidAsset)?; - let expected_asset_id = ConvertAssetId::convert(&token_id).ok_or(InvalidAsset)?; - - ensure!(asset_id == expected_asset_id, InvalidAsset); + ConvertAssetId::maybe_convert(token_id).ok_or(InvalidAsset)?; // Check if there is a SetTopic and skip over it if found. let topic_id = match_expression!(self.next()?, SetTopic(id), id).ok_or(SetTopicExpected)?; diff --git a/bridges/snowbridge/primitives/outbound-queue/src/v1/converter/tests.rs b/bridges/snowbridge/primitives/outbound-queue/src/v1/converter/tests.rs index b2b6e3c1d9954..9ce5bb8b29d30 100644 --- a/bridges/snowbridge/primitives/outbound-queue/src/v1/converter/tests.rs +++ b/bridges/snowbridge/primitives/outbound-queue/src/v1/converter/tests.rs @@ -62,13 +62,10 @@ impl SendMessageFeeProvider for MockErrOutboundQueue { } pub struct MockTokenIdConvert; -impl MaybeEquivalence for MockTokenIdConvert { - fn convert(_id: &TokenId) -> Option { +impl MaybeConvert for MockTokenIdConvert { + fn maybe_convert(_id: TokenId) -> Option { Some(Location::new(1, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))])) } - fn convert_back(_loc: &Location) -> Option { - None - } } #[test] diff --git a/bridges/snowbridge/primitives/outbound-queue/src/v2/converter/convert.rs b/bridges/snowbridge/primitives/outbound-queue/src/v2/converter/convert.rs index e8b38c4e698f4..f64554e42756a 100644 --- a/bridges/snowbridge/primitives/outbound-queue/src/v2/converter/convert.rs +++ b/bridges/snowbridge/primitives/outbound-queue/src/v2/converter/convert.rs @@ -14,7 +14,7 @@ use crate::v2::{ use crate::v2::convert::XcmConverterError::{AssetResolutionFailed, FilterDoesNotConsumeAllAssets}; use sp_core::H160; -use sp_runtime::traits::MaybeEquivalence; +use sp_runtime::traits::MaybeConvert; use sp_std::{iter::Peekable, marker::PhantomData, prelude::*}; use xcm::prelude::*; use xcm_executor::traits::ConvertLocation; @@ -64,7 +64,7 @@ pub struct XcmConverter<'a, ConvertAssetId, Call> { } impl<'a, ConvertAssetId, Call> XcmConverter<'a, ConvertAssetId, Call> where - ConvertAssetId: MaybeEquivalence, + ConvertAssetId: MaybeConvert, { pub fn new(message: &'a Xcm, ethereum_network: NetworkId) -> Self { Self { @@ -174,8 +174,7 @@ where // Ensure PNA already registered let token_id = TokenIdOf::convert_location(&asset_id).ok_or(InvalidAsset)?; - let expected_asset_id = ConvertAssetId::convert(&token_id).ok_or(InvalidAsset)?; - ensure!(asset_id == expected_asset_id, InvalidAsset); + ConvertAssetId::maybe_convert(token_id).ok_or(InvalidAsset)?; commands.push(Command::MintForeignToken { token_id, recipient, amount }); } diff --git a/bridges/snowbridge/primitives/outbound-queue/src/v2/converter/mod.rs b/bridges/snowbridge/primitives/outbound-queue/src/v2/converter/mod.rs index 5da68a01626ca..c4459a05c7c51 100644 --- a/bridges/snowbridge/primitives/outbound-queue/src/v2/converter/mod.rs +++ b/bridges/snowbridge/primitives/outbound-queue/src/v2/converter/mod.rs @@ -15,7 +15,7 @@ use frame_support::{ traits::{Contains, Get, ProcessMessageError}, }; use snowbridge_core::{ParaId, TokenId}; -use sp_runtime::traits::MaybeEquivalence; +use sp_runtime::traits::MaybeConvert; use sp_std::{marker::PhantomData, ops::ControlFlow, prelude::*}; use xcm::prelude::*; use xcm_builder::{CreateMatcher, ExporterFor, MatchXcm}; @@ -53,7 +53,7 @@ where UniversalLocation: Get, EthereumNetwork: Get, OutboundQueue: SendMessage, - ConvertAssetId: MaybeEquivalence, + ConvertAssetId: MaybeConvert, AssetHubParaId: Get, { type Ticket = (Vec, XcmHash); diff --git a/bridges/snowbridge/primitives/outbound-queue/src/v2/converter/tests.rs b/bridges/snowbridge/primitives/outbound-queue/src/v2/converter/tests.rs index 58501066743bf..bb5a88c73c809 100644 --- a/bridges/snowbridge/primitives/outbound-queue/src/v2/converter/tests.rs +++ b/bridges/snowbridge/primitives/outbound-queue/src/v2/converter/tests.rs @@ -62,13 +62,10 @@ impl SendMessageFeeProvider for MockErrOutboundQueue { } pub struct MockTokenIdConvert; -impl MaybeEquivalence for MockTokenIdConvert { - fn convert(_id: &TokenId) -> Option { +impl MaybeConvert for MockTokenIdConvert { + fn maybe_convert(_id: TokenId) -> Option { Some(Location::new(1, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))])) } - fn convert_back(_loc: &Location) -> Option { - None - } } #[test] diff --git a/bridges/snowbridge/test-utils/src/mock_converter.rs b/bridges/snowbridge/test-utils/src/mock_converter.rs index 333460814ec99..d40e3cf97bd49 100644 --- a/bridges/snowbridge/test-utils/src/mock_converter.rs +++ b/bridges/snowbridge/test-utils/src/mock_converter.rs @@ -2,7 +2,7 @@ // SPDX-FileCopyrightText: 2023 Snowfork use codec::Encode; -use frame_support::sp_runtime::traits::MaybeEquivalence; +use frame_support::sp_runtime::traits::MaybeConvert; use snowbridge_core::TokenIdOf; use sp_core::H256; use std::{cell::RefCell, collections::HashMap}; @@ -35,12 +35,8 @@ pub fn reanchor_to_ethereum( } pub struct LocationIdConvert; -impl MaybeEquivalence for LocationIdConvert { - fn convert(id: &H256) -> Option { - IDENTIFIER_TO_LOCATION.with(|b| b.borrow().get(id).and_then(|l| Option::from(l.clone()))) - } - fn convert_back(lol: &Location) -> Option { - LOCATION_TO_IDENTIFIER - .with(|b| b.borrow().get(&lol.encode()).and_then(|id| Option::from(*id))) +impl MaybeConvert for LocationIdConvert { + fn maybe_convert(id: H256) -> Option { + IDENTIFIER_TO_LOCATION.with(|b| b.borrow().get(&id).and_then(|l| Option::from(l.clone()))) } } diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index bd94982e7e542..09a6d71089949 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -64,3 +64,4 @@ cumulus-test-client = { workspace = true } cumulus-test-relay-sproof-builder = { workspace = true } rstest = { workspace = true } sp-keyring = { workspace = true } +sp-tracing = { workspace = true } diff --git a/cumulus/client/consensus/aura/src/equivocation_import_queue.rs b/cumulus/client/consensus/aura/src/equivocation_import_queue.rs index ab0e62ac591d1..748e11bc45d03 100644 --- a/cumulus/client/consensus/aura/src/equivocation_import_queue.rs +++ b/cumulus/client/consensus/aura/src/equivocation_import_queue.rs @@ -23,43 +23,45 @@ use codec::Codec; use cumulus_client_consensus_common::ParachainBlockImportMarker; use parking_lot::Mutex; -use schnellru::{ByLength, LruMap}; - +use polkadot_primitives::Hash as RHash; use sc_consensus::{ import_queue::{BasicQueue, Verifier as VerifierT}, BlockImport, BlockImportParams, ForkChoiceStrategy, }; use sc_consensus_aura::standalone as aura_internal; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; +use schnellru::{ByLength, LruMap}; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder as BlockBuilderApi; -use sp_consensus::error::Error as ConsensusError; +use sp_consensus::{error::Error as ConsensusError, BlockOrigin}; use sp_consensus_aura::{AuraApi, Slot, SlotDuration}; use sp_core::crypto::Pair; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use std::{fmt::Debug, sync::Arc}; -const LRU_WINDOW: u32 = 256; +const LRU_WINDOW: u32 = 512; const EQUIVOCATION_LIMIT: usize = 16; -struct NaiveEquivocationDefender { - cache: LruMap, +struct NaiveEquivocationDefender { + /// We distinguish blocks by `(Slot, BlockNumber, RelayParent)`. + cache: LruMap<(u64, N, RHash), usize>, } -impl Default for NaiveEquivocationDefender { +impl Default for NaiveEquivocationDefender { fn default() -> Self { NaiveEquivocationDefender { cache: LruMap::new(ByLength::new(LRU_WINDOW)) } } } -impl NaiveEquivocationDefender { - // return `true` if equivocation is beyond the limit. - fn insert_and_check(&mut self, slot: Slot) -> bool { +impl NaiveEquivocationDefender { + // Returns `true` if equivocation is beyond the limit. + fn insert_and_check(&mut self, slot: Slot, block_number: N, relay_chain_parent: RHash) -> bool { let val = self .cache - .get_or_insert(*slot, || 0) + .get_or_insert((*slot, block_number, relay_chain_parent), || 0) .expect("insertion with ByLength limiter always succeeds; qed"); + if *val == EQUIVOCATION_LIMIT { true } else { @@ -70,10 +72,10 @@ impl NaiveEquivocationDefender { } /// A parachain block import verifier that checks for equivocation limits within each slot. -pub struct Verifier { +pub struct Verifier { client: Arc, create_inherent_data_providers: CIDP, - defender: Mutex, + defender: Mutex>>, telemetry: Option, _phantom: std::marker::PhantomData (Block, P)>, } @@ -163,13 +165,33 @@ where "pre_header" => ?pre_header, ); + // We need some kind of identifier for the relay parent, in the worst case we + // take the all `0` hash. + let relay_parent = + cumulus_primitives_core::rpsr_digest::extract_relay_parent_storage_root( + pre_header.digest(), + ) + .map(|r| r.0) + .unwrap_or_else(|| { + cumulus_primitives_core::extract_relay_parent(pre_header.digest()) + .unwrap_or_default() + }); + block_params.header = pre_header; block_params.post_digests.push(seal_digest); block_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); block_params.post_hash = Some(post_hash); // Check for and reject egregious amounts of equivocations. - if self.defender.lock().insert_and_check(slot) { + // + // If the `origin` is `ConsensusBroadcast`, we ignore the result of the + // equivocation check. This `origin` is for example used by pov-recovery. + if self.defender.lock().insert_and_check( + slot, + *block_params.header.number(), + relay_parent, + ) && !matches!(block_params.origin, BlockOrigin::ConsensusBroadcast) + { return Err(format!( "Rejecting block {:?} due to excessive equivocations at slot", post_hash, @@ -199,7 +221,7 @@ where } } - // check inherents. + // Check inherents. if let Some(body) = block_params.body.clone() { let block = Block::new(block_params.header.clone(), body); let create_inherent_data_providers = self @@ -282,3 +304,93 @@ where BasicQueue::new(verifier, Box::new(block_import), None, spawner, registry) } + +#[cfg(test)] +mod test { + use super::*; + use codec::Encode; + use cumulus_test_client::{ + runtime::Block, seal_block, Client, InitBlockBuilder, TestClientBuilder, + TestClientBuilderExt, + }; + use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; + use futures::FutureExt; + use polkadot_primitives::{HeadData, PersistedValidationData}; + use sc_client_api::HeaderBackend; + use sp_consensus_aura::sr25519; + use sp_tracing::try_init_simple; + use std::{collections::HashSet, sync::Arc}; + + #[test] + fn import_equivocated_blocks_from_recovery() { + try_init_simple(); + + let client = Arc::new(TestClientBuilder::default().build()); + + let verifier = Verifier:: { + client: client.clone(), + create_inherent_data_providers: |_, _| async move { + Ok(sp_timestamp::InherentDataProvider::from_system_time()) + }, + defender: Mutex::new(NaiveEquivocationDefender::default()), + telemetry: None, + _phantom: std::marker::PhantomData, + }; + + let genesis = client.info().best_hash; + let mut sproof = RelayStateSproofBuilder::default(); + sproof.included_para_head = Some(HeadData(client.header(genesis).unwrap().encode())); + sproof.para_id = cumulus_test_client::runtime::PARACHAIN_ID.into(); + + let validation_data = PersistedValidationData { + relay_parent_number: 1, + parent_head: client.header(genesis).unwrap().encode().into(), + ..Default::default() + }; + + let block_builder = client.init_block_builder(Some(validation_data), sproof); + let block = block_builder.block_builder.build().unwrap(); + + let mut blocks = Vec::new(); + for _ in 0..EQUIVOCATION_LIMIT + 1 { + blocks.push(seal_block(block.block.clone(), &client)) + } + + // sr25519 should generate a different signature every time you sign something and thus, all + // blocks get a different hash (even if they are the same block). + assert_eq!(blocks.iter().map(|b| b.hash()).collect::>().len(), blocks.len()); + + blocks.iter().take(EQUIVOCATION_LIMIT).for_each(|block| { + let mut params = + BlockImportParams::new(BlockOrigin::NetworkBroadcast, block.header().clone()); + params.body = Some(block.extrinsics().to_vec()); + verifier.verify(params).now_or_never().unwrap().unwrap(); + }); + + // Now let's try some previously verified block and a block we have not verified yet. + // + // Verify should fail, because we are above the limit. However, when we change the origin to + // `ConsensusBroadcast`, it should work. + let extra_blocks = + vec![blocks[EQUIVOCATION_LIMIT / 2].clone(), blocks.last().unwrap().clone()]; + + extra_blocks.into_iter().for_each(|block| { + let mut params = + BlockImportParams::new(BlockOrigin::NetworkBroadcast, block.header().clone()); + params.body = Some(block.extrinsics().to_vec()); + assert!(verifier + .verify(params) + .now_or_never() + .unwrap() + .map(drop) + .unwrap_err() + .contains("excessive equivocations at slot")); + + // When it comes from `pov-recovery`, we will accept it + let mut params = + BlockImportParams::new(BlockOrigin::ConsensusBroadcast, block.header().clone()); + params.body = Some(block.extrinsics().to_vec()); + assert!(verifier.verify(params).now_or_never().unwrap().is_ok()); + }); + } +} diff --git a/cumulus/client/pov-recovery/src/lib.rs b/cumulus/client/pov-recovery/src/lib.rs index 3d093ee05db07..5ddc990861527 100644 --- a/cumulus/client/pov-recovery/src/lib.rs +++ b/cumulus/client/pov-recovery/src/lib.rs @@ -530,6 +530,8 @@ where } self.parachain_import_queue + // Use `ConsensusBroadcast` to inform the import pipeline that this blocks needs to be + // imported. .import_blocks(BlockOrigin::ConsensusBroadcast, incoming_blocks); } diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs index 431b8e9401d79..4a719c66bbf00 100644 --- a/cumulus/client/service/src/lib.rs +++ b/cumulus/client/service/src/lib.rs @@ -740,10 +740,13 @@ impl ParachainInformantMetrics { ))?; prometheus_registry.register(Box::new(parachain_block_authorship_duration.clone()))?; - let unincluded_segment_size = Histogram::with_opts(HistogramOpts::new( - "parachain_unincluded_segment_size", - "Number of blocks between best block and last included block", - ))?; + let unincluded_segment_size = Histogram::with_opts( + HistogramOpts::new( + "parachain_unincluded_segment_size", + "Number of blocks between best block and last included block", + ) + .buckets((0..=24).into_iter().map(|i| i as f64).collect()), + )?; prometheus_registry.register(Box::new(unincluded_segment_size.clone()))?; Ok(Self { diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 9752abe2914ea..fa8a0c11af921 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -15,6 +15,7 @@ workspace = true bytes = { workspace = true } codec = { features = ["derive"], workspace = true } environmental = { workspace = true } +hashbrown = { workspace = true } impl-trait-for-tuples = { workspace = true } log = { workspace = true } scale-info = { features = ["derive"], workspace = true } diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index 3f65de06a6534..c2349b7dd59e1 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -31,13 +31,15 @@ use codec::{Decode, Encode}; use cumulus_primitives_core::relay_chain::vstaging::{UMPSignal, UMP_SEPARATOR}; use frame_support::{ - traits::{ExecuteBlock, ExtrinsicCall, Get, IsSubType}, + traits::{ExecuteBlock, Get, IsSubType}, BoundedVec, }; use sp_core::storage::{ChildInfo, StateVersion}; use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::KillStorageResult; -use sp_runtime::traits::{Block as BlockT, ExtrinsicLike, HashingFor, Header as HeaderT}; +use sp_runtime::traits::{ + Block as BlockT, ExtrinsicCall, ExtrinsicLike, HashingFor, Header as HeaderT, +}; use sp_state_machine::OverlayedChanges; use sp_trie::ProofSizeProvider; use trie_recorder::SizeOnlyRecorderProvider; diff --git a/cumulus/pallets/parachain-system/src/validate_block/mod.rs b/cumulus/pallets/parachain-system/src/validate_block/mod.rs index 8b10d7ca4e50c..28b744f7eb6b5 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/mod.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/mod.rs @@ -24,11 +24,11 @@ mod tests; #[cfg(not(feature = "std"))] #[doc(hidden)] -mod trie_cache; +pub mod trie_cache; #[cfg(any(test, not(feature = "std")))] #[doc(hidden)] -mod trie_recorder; +pub mod trie_recorder; #[cfg(not(feature = "std"))] #[doc(hidden)] diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 4d9abcc2b39f1..028bd64566d25 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -22,7 +22,7 @@ use cumulus_test_client::{ runtime::{ self as test_runtime, Block, Hash, Header, TestPalletCall, UncheckedExtrinsic, WASM_BINARY, }, - seal_block, transfer, BlockData, BlockOrigin, BuildParachainBlockData, Client, + seal_parachain_block_data, transfer, BlockData, BlockOrigin, BuildParachainBlockData, Client, ClientBlockImportExt, DefaultTestClientBuilderExt, HeadData, InitBlockBuilder, Sr25519Keyring::{Alice, Bob, Charlie}, TestClientBuilder, TestClientBuilderExt, ValidationParams, @@ -208,7 +208,7 @@ fn validate_block_works() { let TestBlockData { block, validation_data } = build_block_with_witness(&client, Vec::new(), parent_head.clone(), Default::default()); - let block = seal_block(block, &client); + let block = seal_parachain_block_data(block, &client); let header = block.blocks()[0].header().clone(); let res_header = call_validate_block(parent_head, block, validation_data.relay_parent_storage_root) @@ -225,7 +225,7 @@ fn validate_multiple_blocks_work() { let TestBlockData { block, validation_data } = build_multiple_blocks_with_witness(&client, parent_head.clone(), Default::default(), 4); - let block = seal_block(block, &client); + let block = seal_parachain_block_data(block, &client); let header = block.blocks().last().unwrap().header().clone(); let res_header = call_validate_block_elastic_scaling( parent_head, @@ -253,7 +253,7 @@ fn validate_block_with_extra_extrinsics() { parent_head.clone(), Default::default(), ); - let block = seal_block(block, &client); + let block = seal_parachain_block_data(block, &client); let header = block.blocks()[0].header().clone(); let res_header = @@ -290,7 +290,7 @@ fn validate_block_returns_custom_head_data() { let header = block.blocks()[0].header().clone(); assert_ne!(expected_header, header.encode()); - let block = seal_block(block, &client); + let block = seal_parachain_block_data(block, &client); let res_header = call_validate_block_validation_result( WASM_BINARY.expect("You need to build the WASM binaries to run the tests!"), parent_head, @@ -443,7 +443,7 @@ fn validate_block_works_with_child_tries() { Default::default(), ); - let block = seal_block(block, &client); + let block = seal_parachain_block_data(block, &client); let header = block.blocks()[0].header().clone(); let res_header = call_validate_block(parent_head, block, validation_data.relay_parent_storage_root) @@ -467,7 +467,7 @@ fn validate_block_handles_ump_signal() { Default::default(), ); - let block = seal_block(block, &client); + let block = seal_parachain_block_data(block, &client); let upward_messages = call_validate_block_validation_result( test_runtime::elastic_scaling::WASM_BINARY .expect("You need to build the WASM binaries to run the tests!"), diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs index 9590af993e9f9..0931a2f0c4935 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs @@ -15,11 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use alloc::{ - boxed::Box, - collections::btree_map::{BTreeMap, Entry}, -}; +use alloc::boxed::Box; + use core::cell::{RefCell, RefMut}; +use hashbrown::{hash_map::Entry, HashMap}; use sp_state_machine::TrieCacheProvider; use sp_trie::NodeCodec; use trie_db::{node::NodeOwned, Hasher}; @@ -27,9 +26,9 @@ use trie_db::{node::NodeOwned, Hasher}; /// Special purpose trie cache implementation that is able to cache an unlimited number /// of values. To be used in `validate_block` to serve values and nodes that /// have already been loaded and decoded from the storage proof. -pub(crate) struct TrieCache<'a, H: Hasher> { - node_cache: RefMut<'a, BTreeMap>>, - value_cache: Option, trie_db::CachedValue>>>, +pub struct TrieCache<'a, H: Hasher> { + node_cache: RefMut<'a, HashMap>>, + value_cache: Option, trie_db::CachedValue>>>, } impl<'a, H: Hasher> trie_db::TrieCache> for TrieCache<'a, H> { @@ -65,15 +64,15 @@ impl<'a, H: Hasher> trie_db::TrieCache> for TrieCache<'a, H> { } /// Provider of [`TrieCache`] instances. -pub(crate) struct CacheProvider { - node_cache: RefCell>>, +pub struct CacheProvider { + node_cache: RefCell>>, /// Cache: `storage_root` => `storage_key` => `value`. /// /// One `block` can for example use multiple tries (child tries) and we need to distinguish the /// cached (`storage_key`, `value`) between them. For this we are using the `storage_root` to /// distinguish them (even if the storage root is the same for two child tries, it just means /// that both are exactly the same trie and there would happen no collision). - value_cache: RefCell, trie_db::CachedValue>>>, + value_cache: RefCell, trie_db::CachedValue>>>, } impl CacheProvider { diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs index c164cebd351f1..dc70aae10163c 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -22,11 +22,10 @@ use codec::Encode; -use alloc::{ - collections::{btree_map::BTreeMap, btree_set::BTreeSet}, - rc::Rc, -}; +use alloc::rc::Rc; + use core::cell::{RefCell, RefMut}; +use hashbrown::{HashMap, HashSet}; use sp_trie::{NodeCodec, ProofSizeProvider, StorageProof}; use trie_db::{Hasher, RecordedForKey, TrieAccess}; @@ -34,10 +33,10 @@ use trie_db::{Hasher, RecordedForKey, TrieAccess}; /// /// The internal size counting logic should align /// with ['sp_trie::recorder::Recorder']. -pub(crate) struct SizeOnlyRecorder<'a, H: Hasher> { - seen_nodes: RefMut<'a, BTreeSet>, +pub struct SizeOnlyRecorder<'a, H: Hasher> { + seen_nodes: RefMut<'a, HashSet>, encoded_size: RefMut<'a, usize>, - recorded_keys: RefMut<'a, BTreeMap, RecordedForKey>>, + recorded_keys: RefMut<'a, HashMap, RecordedForKey>>, } impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeOnlyRecorder<'a, H> { @@ -90,10 +89,10 @@ impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeOnlyRecorder< } #[derive(Clone)] -pub(crate) struct SizeOnlyRecorderProvider { - seen_nodes: Rc>>, +pub struct SizeOnlyRecorderProvider { + seen_nodes: Rc>>, encoded_size: Rc>, - recorded_keys: Rc, RecordedForKey>>>, + recorded_keys: Rc, RecordedForKey>>>, } impl Default for SizeOnlyRecorderProvider { diff --git a/cumulus/pallets/xcmp-queue/Cargo.toml b/cumulus/pallets/xcmp-queue/Cargo.toml index 47dd3b3b9ddbe..7a8b13c5db7cb 100644 --- a/cumulus/pallets/xcmp-queue/Cargo.toml +++ b/cumulus/pallets/xcmp-queue/Cargo.toml @@ -34,6 +34,8 @@ xcm-executor = { workspace = true } # Cumulus cumulus-primitives-core = { workspace = true } +# Optional import for weight accuracy testing +approx = { workspace = true } # Optional import for benchmarking bounded-collections = { workspace = true } frame-benchmarking = { optional = true, workspace = true } @@ -53,6 +55,7 @@ cumulus-pallet-parachain-system = { workspace = true, default-features = true } [features] default = ["std"] std = [ + "approx/std", "bounded-collections/std", "bp-xcm-bridge-hub-router?/std", "codec/std", diff --git a/cumulus/pallets/xcmp-queue/src/benchmarking.rs b/cumulus/pallets/xcmp-queue/src/benchmarking.rs index ba4b3ac7801f0..55916c5513b57 100644 --- a/cumulus/pallets/xcmp-queue/src/benchmarking.rs +++ b/cumulus/pallets/xcmp-queue/src/benchmarking.rs @@ -15,7 +15,7 @@ //! Benchmarking setup for cumulus-pallet-xcmp-queue -use crate::*; +use crate::{weights_ext::get_average_page_pos, *}; use alloc::vec; use codec::DecodeAll; @@ -107,6 +107,40 @@ mod benchmarks { } } + /// Add an XCMP message of 0 bytes to the message queue at the provided position + /// on an existing page. + #[benchmark] + fn enqueue_empty_xcmp_message_at( + n: Linear<0, { crate::MaxXcmpMessageLenOf::::get() - 10 }>, + ) { + #[cfg(test)] + { + mock::EnqueuedMessages::set(vec![]); + } + + assert_ok!(Pallet::::enqueue_xcmp_messages( + 0.into(), + &[BoundedVec::try_from(vec![0; n as usize]).unwrap()], + &mut WeightMeter::new() + )); + + #[cfg(not(test))] + let fp_before = T::XcmpQueue::footprint(0.into()); + #[block] + { + assert_ok!(Pallet::::enqueue_xcmp_messages( + 0.into(), + &[Default::default()], + &mut WeightMeter::new() + )); + } + #[cfg(not(test))] + { + let fp_after = T::XcmpQueue::footprint(0.into()); + assert_eq!(fp_after.ready_pages, fp_before.ready_pages); + } + } + /// Add `n` pages to the message queue. /// /// We add one page by enqueueing a maximal size message which fills it. @@ -157,6 +191,17 @@ mod benchmarks { }); } + assert_ok!(Pallet::::enqueue_xcmp_messages( + 0.into(), + &[BoundedVec::try_from(vec![ + 0; + get_average_page_pos(MaxXcmpMessageLenOf::::get()) + as usize + ]) + .unwrap()], + &mut WeightMeter::new() + )); + let mut msgs = vec![]; for _i in 0..1000 { msgs.push(BoundedVec::try_from(vec![0; 3]).unwrap()); @@ -175,7 +220,7 @@ mod benchmarks { #[cfg(not(test))] { let fp_after = T::XcmpQueue::footprint(0.into()); - assert_eq!(fp_after.ready_pages, fp_before.ready_pages + 1); + assert_eq!(fp_after.ready_pages, fp_before.ready_pages); } } diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index f25df746aefc7..b703ae7fc944c 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -51,8 +51,6 @@ pub mod weights; pub mod weights_ext; pub use weights::WeightInfo; -#[cfg(feature = "std")] -pub use weights_ext::check_weight_info_ext_accuracy; pub use weights_ext::WeightInfoExt; extern crate alloc; @@ -68,8 +66,8 @@ use cumulus_primitives_core::{ use frame_support::{ defensive, defensive_assert, traits::{ - BatchFootprint, Defensive, EnqueueMessage, EnsureOrigin, Get, QueueFootprint, - QueueFootprintQuery, QueuePausedQuery, + Defensive, EnqueueMessage, EnsureOrigin, Get, QueueFootprint, QueueFootprintQuery, + QueuePausedQuery, }, weights::{Weight, WeightMeter}, BoundedVec, @@ -79,7 +77,7 @@ use polkadot_runtime_common::xcm_sender::PriceForMessageDelivery; use polkadot_runtime_parachains::{FeeTracker, GetMinFeeFactor}; use scale_info::TypeInfo; use sp_core::MAX_POSSIBLE_ALLOCATION; -use sp_runtime::{FixedU128, RuntimeDebug, WeakBoundedVec}; +use sp_runtime::{FixedU128, RuntimeDebug, SaturatedConversion, WeakBoundedVec}; use xcm::{latest::prelude::*, VersionedLocation, VersionedXcm, WrapVersion, MAX_XCM_DECODE_DEPTH}; use xcm_builder::InspectMessageQueues; use xcm_executor::traits::ConvertOrigin; @@ -264,9 +262,13 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { fn integrity_test() { + assert!(!T::MaxPageSize::get().is_zero(), "MaxPageSize too low"); + let w = Self::on_idle_weight(); assert!(w != Weight::zero()); assert!(w.all_lte(T::BlockWeights::get().max_block)); + + ::check_accuracy::>(0.15); } fn on_idle(_block: BlockNumberFor, limit: Weight) -> Weight { @@ -643,13 +645,10 @@ impl Pallet { drop_threshold, ); - // `batches_footprints[n]` contains the footprint of the batch `xcms[0..n]`, - // so as `n` increases `batches_footprints[n]` contains the footprint of a bigger batch. - let best_batch_idx = batches_footprints.binary_search_by(|batch_info| { + let best_batch_footprint = batches_footprints.search_best_by(|batch_info| { let required_weight = T::WeightInfo::enqueue_xcmp_messages( - batch_info.new_pages_count, - batch_info.msgs_count, - batch_info.size_in_bytes, + batches_footprints.first_page_pos.saturated_into(), + batch_info, ); match meter.can_consume(required_weight) { @@ -657,25 +656,10 @@ impl Pallet { false => core::cmp::Ordering::Greater, } }); - let best_batch_idx = match best_batch_idx { - Ok(last_ok_idx) => { - // We should never reach this branch since we never return `Ordering::Equal`. - defensive!("Unexpected best_batch_idx found: Ok({})", last_ok_idx); - Some(last_ok_idx) - }, - Err(first_err_idx) => first_err_idx.checked_sub(1), - }; - let best_batch_footprint = match best_batch_idx { - Some(best_batch_idx) => batches_footprints.get(best_batch_idx).ok_or_else(|| { - defensive!("Invalid best_batch_idx: {}", best_batch_idx); - })?, - None => &BatchFootprint { msgs_count: 0, size_in_bytes: 0, new_pages_count: 0 }, - }; meter.consume(T::WeightInfo::enqueue_xcmp_messages( - best_batch_footprint.new_pages_count, - best_batch_footprint.msgs_count, - best_batch_footprint.size_in_bytes, + batches_footprints.first_page_pos.saturated_into(), + best_batch_footprint, )); T::XcmpQueue::enqueue_messages( xcms.iter() diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index 7c8bdad50fa2c..f975c50918153 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -20,7 +20,7 @@ use cumulus_pallet_parachain_system::AnyRelayNumber; use cumulus_primitives_core::{ChannelInfo, IsSystem, ParaId}; use frame_support::{ derive_impl, parameter_types, - traits::{ConstU32, Everything, OriginTrait}, + traits::{BatchesFootprints, ConstU32, Everything, OriginTrait}, BoundedSlice, }; use frame_system::EnsureRoot; @@ -193,24 +193,16 @@ impl> QueueFootprintQuery for EnqueueToLocalSt origin: ParaId, msgs: impl Iterator>, total_pages_limit: u32, - ) -> Vec { + ) -> BatchesFootprints { // Let's consider that we add one message per page let footprint = Self::footprint(origin); - let mut batches_footprints = vec![]; - let mut new_pages_count = 0; - let mut total_size = 0; + let mut batches_footprints = BatchesFootprints::default(); for (idx, msg) in msgs.enumerate() { - new_pages_count += 1; - if footprint.pages + new_pages_count > total_pages_limit { + if footprint.pages + idx as u32 + 1 > total_pages_limit { break; } - total_size += msg.len(); - batches_footprints.push(BatchFootprint { - msgs_count: idx + 1, - size_in_bytes: total_size, - new_pages_count, - }) + batches_footprints.push(msg.into(), true); } batches_footprints } diff --git a/cumulus/pallets/xcmp-queue/src/tests.rs b/cumulus/pallets/xcmp-queue/src/tests.rs index a87bf6c1dbbe5..cefbe401df1d7 100644 --- a/cumulus/pallets/xcmp-queue/src/tests.rs +++ b/cumulus/pallets/xcmp-queue/src/tests.rs @@ -22,7 +22,8 @@ use XcmpMessageFormat::*; use codec::Input; use cumulus_primitives_core::{ParaId, XcmpMessageHandler}; use frame_support::{ - assert_err, assert_noop, assert_ok, assert_storage_noop, hypothetically, traits::Hooks, + assert_err, assert_noop, assert_ok, assert_storage_noop, hypothetically, + traits::{BatchFootprint, Hooks}, StorageNoopGuard, }; use mock::{new_test_ext, ParachainSystem, RuntimeOrigin as Origin, Test, XcmpQueue}; @@ -210,9 +211,12 @@ fn xcm_enqueueing_starts_dropping_on_out_of_weight() { total_size += xcm.len(); let required_weight = <::WeightInfo>::enqueue_xcmp_messages( - idx as u32 + 1, - idx + 1, - total_size, + 0, + &BatchFootprint { + msgs_count: idx + 1, + size_in_bytes: total_size, + new_pages_count: idx as u32 + 1, + }, ); let mut weight_meter = WeightMeter::with_limit(required_weight); diff --git a/cumulus/pallets/xcmp-queue/src/weights.rs b/cumulus/pallets/xcmp-queue/src/weights.rs index 6a199ede60866..5ae717a164bbb 100644 --- a/cumulus/pallets/xcmp-queue/src/weights.rs +++ b/cumulus/pallets/xcmp-queue/src/weights.rs @@ -16,9 +16,9 @@ //! Autogenerated weights for `cumulus_pallet_xcmp_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-04-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `serban-ROG-Zephyrus`, CPU: `AMD Ryzen 9 7945HX with Radeon Graphics` +//! HOSTNAME: `Serbans-MacBook-Pro.local`, CPU: `` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: `1024` // Executed Command: @@ -50,6 +50,7 @@ pub trait WeightInfo { fn set_config_with_u32() -> Weight; fn enqueue_n_bytes_xcmp_message(n: u32, ) -> Weight; fn enqueue_n_empty_xcmp_messages(n: u32, ) -> Weight; + fn enqueue_empty_xcmp_message_at(n: u32, ) -> Weight; fn enqueue_n_full_pages(n: u32, ) -> Weight; fn enqueue_1000_small_xcmp_messages() -> Weight; fn suspend_channel() -> Weight; @@ -68,8 +69,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `1497` - // Minimum execution time: 3_562_000 picoseconds. - Weight::from_parts(4_749_000, 1497) + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(3_000_000, 1497) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -88,10 +89,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `151` // Estimated: `5487` - // Minimum execution time: 10_685_000 picoseconds. - Weight::from_parts(8_747_868, 5487) - // Standard Error: 6 - .saturating_add(Weight::from_parts(696, 0).saturating_mul(n.into())) + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_115_841, 5487) + // Standard Error: 1 + .saturating_add(Weight::from_parts(155, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -110,10 +111,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `151` // Estimated: `5487` - // Minimum execution time: 9_498_000 picoseconds. - Weight::from_parts(14_113_868, 5487) - // Standard Error: 298 - .saturating_add(Weight::from_parts(109_652, 0).saturating_mul(n.into())) + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(8_987_577, 5487) + // Standard Error: 313 + .saturating_add(Weight::from_parts(94_980, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -121,6 +122,26 @@ impl WeightInfo for SubstrateWeight { /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 105457]`. + fn enqueue_empty_xcmp_message_at(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `334 + n * (1 ±0)` + // Estimated: `108986` + // Minimum execution time: 12_000_000 picoseconds. + Weight::from_parts(11_015_940, 108986) + // Standard Error: 32 + .saturating_add(Weight::from_parts(911, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) @@ -132,10 +153,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `186` // Estimated: `5487` - // Minimum execution time: 10_685_000 picoseconds. - Weight::from_parts(10_686_000, 5487) - // Standard Error: 51_426 - .saturating_add(Weight::from_parts(64_394_224, 0).saturating_mul(n.into())) + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(7_000_000, 5487) + // Standard Error: 20_150 + .saturating_add(Weight::from_parts(20_690_483, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -144,20 +165,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn enqueue_1000_small_xcmp_messages() -> Weight { // Proof Size summary in bytes: - // Measured: `151` - // Estimated: `5487` - // Minimum execution time: 118_730_000 picoseconds. - Weight::from_parts(123_480_000, 5487) + // Measured: `53067` + // Estimated: `108986` + // Minimum execution time: 139_000_000 picoseconds. + Weight::from_parts(148_000_000, 108986) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) @@ -165,8 +184,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `109` // Estimated: `2767` - // Minimum execution time: 2_374_000 picoseconds. - Weight::from_parts(3_562_000, 2767) + // Minimum execution time: 1_000_000 picoseconds. + Weight::from_parts(2_000_000, 2767) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -176,8 +195,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `144` // Estimated: `2767` - // Minimum execution time: 3_561_000 picoseconds. - Weight::from_parts(4_749_000, 2767) + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(3_000_000, 2767) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -185,8 +204,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_749_000 picoseconds. - Weight::from_parts(5_937_000, 0) + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(7_000_000, 0) } /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) @@ -206,8 +225,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `105716` // Estimated: `109181` - // Minimum execution time: 131_791_000 picoseconds. - Weight::from_parts(134_166_000, 109181) + // Minimum execution time: 66_000_000 picoseconds. + Weight::from_parts(71_000_000, 109181) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -229,8 +248,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `65785` // Estimated: `69250` - // Minimum execution time: 80_737_000 picoseconds. - Weight::from_parts(83_111_000, 69250) + // Minimum execution time: 39_000_000 picoseconds. + Weight::from_parts(45_000_000, 69250) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -244,8 +263,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `109` // Estimated: `1497` - // Minimum execution time: 3_562_000 picoseconds. - Weight::from_parts(4_749_000, 1497) + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(3_000_000, 1497) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -264,10 +283,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `151` // Estimated: `5487` - // Minimum execution time: 10_685_000 picoseconds. - Weight::from_parts(8_747_868, 5487) - // Standard Error: 6 - .saturating_add(Weight::from_parts(696, 0).saturating_mul(n.into())) + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_115_841, 5487) + // Standard Error: 1 + .saturating_add(Weight::from_parts(155, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -286,10 +305,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `151` // Estimated: `5487` - // Minimum execution time: 9_498_000 picoseconds. - Weight::from_parts(14_113_868, 5487) - // Standard Error: 298 - .saturating_add(Weight::from_parts(109_652, 0).saturating_mul(n.into())) + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(8_987_577, 5487) + // Standard Error: 313 + .saturating_add(Weight::from_parts(94_980, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -297,6 +316,26 @@ impl WeightInfo for () { /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 105457]`. + fn enqueue_empty_xcmp_message_at(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `334 + n * (1 ±0)` + // Estimated: `108986` + // Minimum execution time: 12_000_000 picoseconds. + Weight::from_parts(11_015_940, 108986) + // Standard Error: 32 + .saturating_add(Weight::from_parts(911, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) @@ -308,10 +347,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `186` // Estimated: `5487` - // Minimum execution time: 10_685_000 picoseconds. - Weight::from_parts(10_686_000, 5487) - // Standard Error: 51_426 - .saturating_add(Weight::from_parts(64_394_224, 0).saturating_mul(n.into())) + // Minimum execution time: 7_000_000 picoseconds. + Weight::from_parts(7_000_000, 5487) + // Standard Error: 20_150 + .saturating_add(Weight::from_parts(20_690_483, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -320,20 +359,18 @@ impl WeightInfo for () { /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn enqueue_1000_small_xcmp_messages() -> Weight { // Proof Size summary in bytes: - // Measured: `151` - // Estimated: `5487` - // Minimum execution time: 118_730_000 picoseconds. - Weight::from_parts(123_480_000, 5487) + // Measured: `53067` + // Estimated: `108986` + // Minimum execution time: 139_000_000 picoseconds. + Weight::from_parts(148_000_000, 108986) .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) @@ -341,8 +378,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `109` // Estimated: `2767` - // Minimum execution time: 2_374_000 picoseconds. - Weight::from_parts(3_562_000, 2767) + // Minimum execution time: 1_000_000 picoseconds. + Weight::from_parts(2_000_000, 2767) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -352,8 +389,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `144` // Estimated: `2767` - // Minimum execution time: 3_561_000 picoseconds. - Weight::from_parts(4_749_000, 2767) + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(3_000_000, 2767) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -361,8 +398,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_749_000 picoseconds. - Weight::from_parts(5_937_000, 0) + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(7_000_000, 0) } /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) @@ -382,8 +419,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `105716` // Estimated: `109181` - // Minimum execution time: 131_791_000 picoseconds. - Weight::from_parts(134_166_000, 109181) + // Minimum execution time: 66_000_000 picoseconds. + Weight::from_parts(71_000_000, 109181) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -405,8 +442,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `65785` // Estimated: `69250` - // Minimum execution time: 80_737_000 picoseconds. - Weight::from_parts(83_111_000, 69250) + // Minimum execution time: 39_000_000 picoseconds. + Weight::from_parts(45_000_000, 69250) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } diff --git a/cumulus/pallets/xcmp-queue/src/weights_ext.rs b/cumulus/pallets/xcmp-queue/src/weights_ext.rs index 707b9ff3af252..15b9e3eaf1d64 100644 --- a/cumulus/pallets/xcmp-queue/src/weights_ext.rs +++ b/cumulus/pallets/xcmp-queue/src/weights_ext.rs @@ -17,31 +17,31 @@ use crate::weights::WeightInfo; -use frame_support::weights::Weight; +use frame_support::{traits::BatchFootprint, weights::Weight}; use sp_runtime::SaturatedConversion; +pub(crate) fn get_average_page_pos(max_message_len: u32) -> u32 { + max_message_len / 2 +} + /// Extended weight info. pub trait WeightInfoExt: WeightInfo { fn uncached_enqueue_xcmp_messages() -> Weight { Self::enqueue_n_full_pages(0) } - fn enqueue_xcmp_messages( - new_pages_count: u32, - message_count: usize, - size_in_bytes: usize, - ) -> Weight { - let message_count = message_count.saturated_into(); - let size_in_bytes = size_in_bytes.saturated_into(); + fn enqueue_xcmp_messages(first_page_pos: u32, batch_footprint: &BatchFootprint) -> Weight { + let message_count = batch_footprint.msgs_count.saturated_into(); + let size_in_bytes = batch_footprint.size_in_bytes.saturated_into(); // The cost of adding `n` empty pages on the message queue. let pages_overhead = { let full_message_overhead = Self::enqueue_n_full_pages(1) .saturating_sub(Self::enqueue_n_empty_xcmp_messages(1)); let n_full_messages_overhead = - full_message_overhead.saturating_mul(new_pages_count as u64); + full_message_overhead.saturating_mul(batch_footprint.new_pages_count as u64); - Self::enqueue_n_full_pages(new_pages_count) + Self::enqueue_n_full_pages(batch_footprint.new_pages_count) .saturating_sub(Self::enqueue_n_full_pages(0)) .saturating_sub(n_full_messages_overhead) }; @@ -58,36 +58,36 @@ pub trait WeightInfoExt: WeightInfo { .saturating_sub(Self::enqueue_n_bytes_xcmp_message(0)) }; - pages_overhead.saturating_add(messages_overhead).saturating_add(bytes_overhead) - } -} - -impl WeightInfoExt for T {} - -#[cfg(feature = "std")] -pub fn check_weight_info_ext_accuracy(err_margin: u8) { - assert!(err_margin < 100); - let err_margin = err_margin as u64; - - let estimated_weight = - T::uncached_enqueue_xcmp_messages().saturating_add(T::enqueue_xcmp_messages(1, 1000, 3000)); - let actual_weight = T::enqueue_1000_small_xcmp_messages(); - - // Check that the ref_time diff is less than {err_margin}% - let diff_ref_time = estimated_weight.ref_time().abs_diff(actual_weight.ref_time()); - assert!(diff_ref_time < estimated_weight.ref_time() * err_margin / 100); - assert!(diff_ref_time < actual_weight.ref_time() * err_margin / 100); - - // The proof sizes should be the same - assert_eq!(estimated_weight.proof_size(), actual_weight.proof_size()); -} + // If the messages are not added to the beginning of the first page, the page will be + // decoded and re-encoded once. Let's account for this. + let pos_overhead = { + Self::enqueue_empty_xcmp_message_at(first_page_pos) + .saturating_sub(Self::enqueue_empty_xcmp_message_at(0)) + }; -#[cfg(test)] -mod tests { - use super::*; + pages_overhead + .saturating_add(messages_overhead) + .saturating_add(bytes_overhead) + .saturating_add(pos_overhead) + } - #[test] - fn weight_info_ext_accuracy_is_high() { - check_weight_info_ext_accuracy::<()>(5); + fn check_accuracy>(err_margin: f64) { + assert!(err_margin < 1f64); + + let estimated_weight = + Self::uncached_enqueue_xcmp_messages().saturating_add(Self::enqueue_xcmp_messages( + get_average_page_pos(MaxMessageLen::get()), + &BatchFootprint { msgs_count: 1000, size_in_bytes: 3000, new_pages_count: 0 }, + )); + let actual_weight = Self::enqueue_1000_small_xcmp_messages(); + + // Check that the ref_time diff is less than err_margin + approx::assert_relative_eq!( + estimated_weight.ref_time() as f64, + actual_weight.ref_time() as f64, + max_relative = err_margin + ); } } + +impl WeightInfoExt for T {} diff --git a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml index f0d49b400bea2..43cfe6c66f393 100644 --- a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml @@ -30,11 +30,12 @@ sp-keyring = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } # Polkadot -pallet-xcm = { workspace = true, default-features = true } +pallet-xcm = { features = ["test-utils"], workspace = true, default-features = true } polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } polkadot-runtime-parachains = { workspace = true, default-features = true } xcm = { workspace = true, default-features = true } +xcm-simulator = { workspace = true, default-features = true } # Cumulus asset-test-utils = { workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index 83c85d4e03393..e6907049e82b1 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -18,6 +18,7 @@ pub mod macros; pub mod xcm_helpers; pub use xcm_emulator; +pub use xcm_simulator; // Substrate use frame_support::parameter_types; diff --git a/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs b/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs index a7bcb4ad90438..309a62ec1e12f 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs @@ -17,7 +17,9 @@ use parachains_common::AccountId; // Polkadot +use sp_core::H256; use xcm::{prelude::*, DoubleEncoded}; +use xcm_emulator::Chain; /// Helper method to build a XCM with a `Transact` instruction and paying for its execution pub fn xcm_transact_paid_execution( @@ -84,3 +86,29 @@ pub fn get_amount_from_versioned_assets(assets: VersionedAssets) -> u128 { }; amount } + +/// Helper method to find the ID of the first `Event::Processed` event in the chain's events. +pub fn find_mq_processed_id() -> Option +where + ::Runtime: pallet_message_queue::Config, + C::RuntimeEvent: TryInto::Runtime>>, +{ + C::events().into_iter().find_map(|event| { + if let Ok(pallet_message_queue::Event::Processed { id, .. }) = event.try_into() { + Some(id) + } else { + None + } + }) +} + +/// Helper method to find the message ID of the first `Event::Sent` event in the chain's events. +pub fn find_xcm_sent_message_id< + C: Chain::Runtime as pallet_xcm::Config>::RuntimeEvent>, +>() -> Option +where + C::Runtime: pallet_xcm::Config, + C::RuntimeEvent: TryInto>, +{ + pallet_xcm::xcm_helpers::find_xcm_sent_message_id::<::Runtime>(C::events()) +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs index 76f7229031dd5..101090a939517 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs @@ -13,6 +13,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +use emulated_integration_tests_common::xcm_helpers::{ + find_mq_processed_id, find_xcm_sent_message_id, +}; use westend_system_emulated_network::westend_emulated_chain::westend_runtime::Dmp; use super::reserve_transfer::*; @@ -21,7 +24,7 @@ use crate::{ tests::teleport::do_bidirectional_teleport_foreign_assets_between_para_and_asset_hub_using_xt, }; -fn para_to_para_assethub_hop_assertions(t: ParaToParaThroughAHTest) { +fn para_to_para_assethub_hop_assertions(mut t: ParaToParaThroughAHTest) { type RuntimeEvent = ::RuntimeEvent; let sov_penpal_a_on_ah = AssetHubWestend::sovereign_account_id_of( AssetHubWestend::sibling_location_of(PenpalA::para_id()), @@ -51,6 +54,11 @@ fn para_to_para_assethub_hop_assertions(t: ParaToParaThroughAHTest) { ) => {}, ] ); + + let mq_prc_id = find_mq_processed_id::().expect("Missing Processed Event"); + t.insert_unique_topic_id("AssetHubWestend", mq_prc_id); + let msg_sent_id = find_xcm_sent_message_id::().expect("Missing Sent Event"); + t.insert_unique_topic_id("AssetHubWestend", msg_sent_id.into()); } fn ah_to_para_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { @@ -99,7 +107,7 @@ fn para_to_para_transfer_assets_through_ah(t: ParaToParaThroughAHTest) -> Dispat assets: Wild(AllCounted(t.args.assets.len() as u32)), beneficiary: t.args.beneficiary, }]); - ::PolkadotXcm::transfer_assets_using_type_and_then( + let result = ::PolkadotXcm::transfer_assets_using_type_and_then( t.signed_origin, bx!(t.args.dest.into()), bx!(t.args.assets.into()), @@ -108,7 +116,15 @@ fn para_to_para_transfer_assets_through_ah(t: ParaToParaThroughAHTest) -> Dispat bx!(TransferType::RemoteReserve(asset_hub_location.into())), bx!(VersionedXcm::from(custom_xcm_on_dest)), t.args.weight_limit, - ) + ); + + let msg_sent_id = find_xcm_sent_message_id::().expect("Missing Sent Event"); + t.topic_id_tracker + .lock() + .unwrap() + .insert_and_assert_unique("PenpalA", msg_sent_id.into()); + + result } fn para_to_asset_hub_teleport_foreign_assets(t: ParaToSystemParaTest) -> DispatchResult { @@ -588,6 +604,9 @@ fn transfer_foreign_assets_from_para_to_para_through_asset_hub() { test.set_dispatchable::(para_to_para_transfer_assets_through_ah); test.assert(); + // assert unique topic across all chains + test.assert_unique_topic_id(); + // Query final balances let sender_wnds_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs index d95cd24e97215..666ca83bf47a3 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -14,6 +14,9 @@ // limitations under the License. use crate::{create_pool_with_wnd_on, foreign_balance_on, imports::*}; +use emulated_integration_tests_common::xcm_helpers::{ + find_mq_processed_id, find_xcm_sent_message_id, +}; use sp_core::{crypto::get_public_from_string_or_panic, sr25519}; use westend_system_emulated_network::westend_emulated_chain::westend_runtime::Dmp; @@ -283,7 +286,7 @@ pub fn para_to_system_para_receiver_assertions(t: ParaToSystemParaTest) { fn system_para_to_para_assets_sender_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; AssetHubWestend::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( - 864_610_000, + 487_426_000, 8799, ))); assert_expected_events!( @@ -413,10 +416,13 @@ fn relay_to_para_assets_receiver_assertions(t: RelayToParaTest) { ); } -pub fn para_to_para_through_hop_sender_assertions(t: Test) { +pub fn para_to_para_through_hop_sender_assertions(mut t: Test) { type RuntimeEvent = ::RuntimeEvent; PenpalA::assert_xcm_pallet_attempted_complete(None); + let msg_sent_id = find_xcm_sent_message_id::().expect("Missing Sent Event"); + t.insert_unique_topic_id("PenpalA", msg_sent_id.into()); + for asset in t.args.assets.into_inner() { let expected_id = asset.id.0.clone().try_into().unwrap(); let amount = if let Fungible(a) = asset.fun { Some(a) } else { None }.unwrap(); @@ -491,10 +497,16 @@ fn para_to_para_asset_hub_hop_assertions(t: ParaToParaThroughAHTest) { ); } -pub fn para_to_para_through_hop_receiver_assertions(t: Test) { +pub fn para_to_para_through_hop_receiver_assertions( + mut t: Test, +) { type RuntimeEvent = ::RuntimeEvent; PenpalB::assert_xcmp_queue_success(None); + + let mq_prc_id = find_mq_processed_id::().expect("Missing Processed Event"); + t.insert_unique_topic_id("PenpalB", mq_prc_id); + for asset in t.args.assets.into_inner().into_iter() { let expected_id = asset.id.0.try_into().unwrap(); assert_expected_events!( diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs index 0dd3177dff4c5..70d1758ec8f87 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs @@ -24,7 +24,7 @@ fn send_transact_as_superuser_from_relay_to_asset_hub_works() { ASSET_MIN_BALANCE, true, AssetHubWestendSender::get().into(), - Some(Weight::from_parts(144_759_000, 3675)), + Some(Weight::from_parts(78_628_000, 3675)), ) } @@ -121,7 +121,7 @@ fn send_xcm_from_para_to_asset_hub_paying_fee_with_sufficient_asset() { ASSET_MIN_BALANCE, true, para_sovereign_account.clone(), - Some(Weight::from_parts(144_759_000, 3675)), + Some(Weight::from_parts(78_628_000, 3675)), ASSET_MIN_BALANCE * 1000000000, ); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_xcm_versions.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_xcm_versions.rs index 4405ed2988a97..c21489022c942 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_xcm_versions.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_xcm_versions.rs @@ -67,7 +67,7 @@ fn system_para_sets_relay_xcm_supported_version() { AssetHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; - AssetHubWestend::assert_dmp_queue_complete(Some(Weight::from_parts(115_688_000, 0))); + AssetHubWestend::assert_dmp_queue_complete(Some(Weight::from_parts(47_887_000, 0))); assert_expected_events!( AssetHubWestend, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs index 06bfe75983a34..28718cce1997a 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs @@ -27,7 +27,7 @@ fn para_origin_assertions(t: SystemParaToRelayTest) { type RuntimeEvent = ::RuntimeEvent; AssetHubWestend::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( - 730_053_000, + 302_568_000, 4_000, ))); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs index 4001e124745d9..fe4ed3d662107 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs @@ -14,7 +14,11 @@ // limitations under the License. use crate::tests::{snowbridge_common::snowbridge_sovereign, *}; -use emulated_integration_tests_common::macros::Dmp; +use emulated_integration_tests_common::{ + macros::Dmp, + xcm_helpers::{find_mq_processed_id, find_xcm_sent_message_id}, + xcm_simulator::helpers::TopicIdTracker, +}; use xcm::latest::AssetTransferFilter; fn send_assets_over_bridge(send_fn: F) { @@ -1028,6 +1032,7 @@ fn send_back_rocs_from_penpal_westend_through_asset_hub_westend_to_asset_hub_roc let amount = ASSET_HUB_WESTEND_ED * 10_000_000; let sender = PenpalBSender::get(); let receiver = RococoReceiver::get(); + let mut topic_id_tracker = TopicIdTracker::new(); // set up ROCs for transfer let penpal_location = AssetHubWestend::sibling_location_of(PenpalB::para_id()); @@ -1132,11 +1137,17 @@ fn send_back_rocs_from_penpal_westend_through_asset_hub_westend_to_asset_hub_roc // send message over bridge assert_ok!(PenpalB::execute_with(|| { let signed_origin = ::RuntimeOrigin::signed(sender.clone()); - ::PolkadotXcm::execute( + let result = ::PolkadotXcm::execute( signed_origin, bx!(xcm::VersionedXcm::V5(xcm.into())), Weight::MAX, - ) + ); + + let msg_sent_id = + find_xcm_sent_message_id::().expect("Missing Sent Event"); + topic_id_tracker.insert("PenpalB", msg_sent_id.into()); + + result })); AssetHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; @@ -1159,6 +1170,9 @@ fn send_back_rocs_from_penpal_westend_through_asset_hub_westend_to_asset_hub_roc ) => {}, ] ); + let mq_prc_id = + find_mq_processed_id::().expect("Missing Processed Event"); + topic_id_tracker.insert("AssetHubWestend", mq_prc_id); }); }); } @@ -1185,7 +1199,10 @@ fn send_back_rocs_from_penpal_westend_through_asset_hub_westend_to_asset_hub_roc ) => {}, ] ); + let mq_prc_id = find_mq_processed_id::().expect("Missing Processed Event"); + topic_id_tracker.insert("AssetHubRococo", mq_prc_id); }); + topic_id_tracker.assert_unique(); let sender_rocs_after = PenpalB::execute_with(|| { type ForeignAssets = ::ForeignAssets; @@ -1210,6 +1227,7 @@ fn dry_run_transfer_to_rococo_sends_xcm_to_bridge_hub() { } fn do_send_pens_and_wnds_from_penpal_westend_via_ahw_to_asset_hub_rococo( + topic_id_tracker: &mut TopicIdTracker, wnds: (Location, u128), pens: (Location, u128), ) { @@ -1301,14 +1319,25 @@ fn do_send_pens_and_wnds_from_penpal_westend_via_ahw_to_asset_hub_rococo( }, ]); - ::PolkadotXcm::execute( + let result = ::PolkadotXcm::execute( signed_origin, bx!(xcm::VersionedXcm::V5(xcm.into())), Weight::MAX, - ) + ); + + let msg_sent_id = find_xcm_sent_message_id::().expect("Missing Sent Event"); + topic_id_tracker.insert_and_assert_unique("PenpalB", msg_sent_id.into()); + + result })); AssetHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; + let mq_prc_id = + find_mq_processed_id::().expect("Missing Processed Event"); + topic_id_tracker.insert_and_assert_unique("AssetHubWestend", mq_prc_id); + let msg_sent_id = + find_xcm_sent_message_id::().expect("Missing Sent Event"); + topic_id_tracker.insert_and_assert_unique("AssetHubWestend", msg_sent_id.into()); assert_expected_events!( AssetHubWestend, vec![ @@ -1329,6 +1358,12 @@ fn do_send_pens_and_wnds_from_penpal_westend_via_ahw_to_asset_hub_rococo( ] ); }); + + BridgeHubWestend::ext_wrapper(|| { + let mq_prc_id = + find_mq_processed_id::().expect("Missing Processed Event"); + topic_id_tracker.insert_and_assert_unique("BridgeHubWestend", mq_prc_id); + }); }); } @@ -1433,8 +1468,12 @@ fn send_pens_and_wnds_from_penpal_westend_via_ahw_to_ahr() { ) }); + // init topic ID tracker + let mut topic_id_tracker = TopicIdTracker::new(); + // transfer assets do_send_pens_and_wnds_from_penpal_westend_via_ahw_to_asset_hub_rococo( + &mut topic_id_tracker, (wnd_at_westend_parachains.clone(), wnds_to_send), (pens_location_on_penpal.try_into().unwrap(), pens_to_send), ); @@ -1442,6 +1481,8 @@ fn send_pens_and_wnds_from_penpal_westend_via_ahw_to_ahr() { let wnd = Location::new(2, [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))]); AssetHubRococo::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; + let mq_prc_id = find_mq_processed_id::().expect("Missing Processed Event"); + topic_id_tracker.insert_and_assert_unique("AssetHubRococo", mq_prc_id); assert_expected_events!( AssetHubRococo, vec![ @@ -1458,6 +1499,9 @@ fn send_pens_and_wnds_from_penpal_westend_via_ahw_to_ahr() { ); }); + // assert unique topic across all chains + topic_id_tracker.assert_unique(); + // account balances after let sender_wnds_after = PenpalB::execute_with(|| { type ForeignAssets = ::ForeignAssets; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs index 1383a5513e183..8545984038182 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs @@ -185,7 +185,7 @@ pub(crate) fn assert_bridge_hub_westend_message_accepted(expected_processed: boo ] ); } - }); + }) } pub(crate) fn assert_bridge_hub_rococo_message_received() { diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs index e655f06a0f01c..512ab4e1c6d76 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs @@ -13,7 +13,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +use emulated_integration_tests_common::xcm_helpers::{ + find_mq_processed_id, find_xcm_sent_message_id, +}; use rococo_westend_system_emulated_network::westend_emulated_chain::westend_runtime::Dmp; +use std::collections::HashMap; use crate::tests::*; @@ -144,3 +148,50 @@ fn send_xcm_through_opened_lane_with_different_xcm_version_on_hops_works() { ); }); } + +#[test] +fn xcm_persists_set_topic_across_hops() { + for test_topic_id in [Some([42; 32]), None] { + // Reset tracked topic state before each run + let mut tracked_topic_ids = HashMap::new(); + + // Prepare test input + let sudo_origin = ::RuntimeOrigin::root(); + let destination = Westend::child_location_of(BridgeHubWestend::para_id()).into(); + let weight_limit = Unlimited; + let check_origin = None; + + // Construct XCM with optional SetTopic + let mut message = vec![UnpaidExecution { weight_limit, check_origin }, ClearOrigin]; + if let Some(topic_id) = test_topic_id { + message.push(SetTopic(topic_id)); + } + let xcm = VersionedXcm::from(Xcm(message)); + + // Send XCM from Westend to BridgeHubWestend + Westend::execute_with(|| { + Dmp::make_parachain_reachable(BridgeHubWestend::para_id()); + assert_ok!(::XcmPallet::send( + sudo_origin.clone(), + bx!(destination), + bx!(xcm), + )); + + let msg_sent_id = find_xcm_sent_message_id::().expect("Missing Sent Event"); + tracked_topic_ids.insert("Westend", msg_sent_id.into()); + }); + + BridgeHubWestend::execute_with(|| { + let mq_prc_id = + find_mq_processed_id::().expect("Missing Processed Event"); + tracked_topic_ids.insert("BridgeHubWestend", mq_prc_id); + }); + + // Assert exactly one consistent topic ID across all hops + let topic_id = tracked_topic_ids.get("Westend"); + assert_eq!(tracked_topic_ids.get("BridgeHubWestend"), topic_id); + if let Some(expected) = test_topic_id { + assert_eq!(topic_id, Some(&expected.into())); + } + } +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs index b6ba1257a64a6..fdad733aa5fcb 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs @@ -25,7 +25,8 @@ use crate::{ create_foreign_on_ah_rococo, penpal_emulated_chain::penpal_runtime, snowbridge_common::{ - bridged_roc_at_ah_westend, ethereum, register_roc_on_bh, snowbridge_sovereign, + bridge_hub, bridged_roc_at_ah_westend, ethereum, register_roc_on_bh, + snowbridge_sovereign, }, snowbridge_v2_outbound_from_rococo::create_foreign_on_ah_westend, }, @@ -2224,3 +2225,108 @@ fn transfer_roc_from_ah_with_transfer_and_then() { ); }); } + +#[test] +fn register_pna_in_v5_while_transfer_in_v4_should_work() { + let assethub_sovereign = BridgeHubWestend::sovereign_account_id_of( + BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()), + ); + BridgeHubWestend::fund_accounts(vec![(assethub_sovereign.clone(), INITIAL_FUND)]); + + let asset_id: Location = Location { parents: 1, interior: [].into() }; + let expected_asset_id: Location = Location { + parents: 1, + interior: [GlobalConsensus(ByGenesis(WESTEND_GENESIS_HASH))].into(), + }; + + let _expected_token_id = TokenIdOf::convert_location(&expected_asset_id).unwrap(); + + let ethereum_sovereign: AccountId = snowbridge_sovereign(); + + // Register token in V5 + BridgeHubWestend::execute_with(|| { + type RuntimeOrigin = ::RuntimeOrigin; + type RuntimeEvent = ::RuntimeEvent; + + assert_ok!(::Balances::force_set_balance( + RuntimeOrigin::root(), + sp_runtime::MultiAddress::Id(BridgeHubWestendSender::get()), + INITIAL_FUND * 10, + )); + + assert_ok!(::EthereumSystem::register_token( + RuntimeOrigin::root(), + Box::new(VersionedLocation::from(asset_id.clone())), + AssetMetadata { + name: "wnd".as_bytes().to_vec().try_into().unwrap(), + symbol: "wnd".as_bytes().to_vec().try_into().unwrap(), + decimals: 12, + }, + )); + // Check that a message was sent to Ethereum to create the agent + assert_expected_events!( + BridgeHubWestend, + vec![RuntimeEvent::EthereumSystem(snowbridge_pallet_system::Event::RegisterToken { .. }) => {},] + ); + }); + + AssetHubWestend::force_xcm_version(bridge_hub(), 4); + AssetHubWestend::force_xcm_version(ethereum(), 4); + AssetHubWestend::force_default_xcm_version(Some(4)); + BridgeHubWestend::force_default_xcm_version(Some(4)); + + // Send token to Ethereum in V4 fomat + AssetHubWestend::execute_with(|| { + // LTS is V4 + use xcm::lts::{Junction::*, NetworkId::*, *}; + type RuntimeOrigin = ::RuntimeOrigin; + type RuntimeEvent = ::RuntimeEvent; + + let assets = vec![Asset { + id: AssetId(Location::parent()), + fun: Fungibility::try_from(Fungible(TOKEN_AMOUNT)).unwrap(), + }]; + let versioned_assets = VersionedAssets::V4(Assets::from(assets)); + + let destination = VersionedLocation::V4(Location::new( + 2, + [GlobalConsensus(Ethereum { chain_id: SEPOLIA_ID })], + )); + + let beneficiary = VersionedLocation::V4(Location::new( + 0, + [AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }], + )); + + assert_ok!(::PolkadotXcm::limited_reserve_transfer_assets( + RuntimeOrigin::signed(AssetHubWestendSender::get()), + Box::new(destination), + Box::new(beneficiary), + Box::new(versioned_assets), + 0, + Unlimited, + )); + + let events = AssetHubWestend::events(); + // Check that the native asset transferred to some reserved account(sovereign of Ethereum) + assert!( + events.iter().any(|event| matches!( + event, + RuntimeEvent::Balances(pallet_balances::Event::Transfer { amount, to, ..}) + if *amount == TOKEN_AMOUNT && *to == ethereum_sovereign.clone(), + )), + "native token reserved to Ethereum sovereign account." + ); + }); + + // Check that the transfer token back to Ethereum message was queue in the Ethereum + // Outbound Queue + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + BridgeHubWestend, + vec![RuntimeEvent::EthereumOutboundQueue(snowbridge_pallet_outbound_queue::Event::MessageQueued{ .. }) => {},] + ); + }); +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_xcmp_queue.rs index 7f46872794d27..ddc76fe451ad6 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_xcmp_queue.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_xcmp_queue.rs @@ -16,9 +16,9 @@ //! Autogenerated weights for `cumulus_pallet_xcmp_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-05-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `341e66d5356e`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `16d5a52ef0dc`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: @@ -56,8 +56,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `109` // Estimated: `1497` - // Minimum execution time: 5_097_000 picoseconds. - Weight::from_parts(5_350_000, 0) + // Minimum execution time: 4_908_000 picoseconds. + Weight::from_parts(5_133_000, 0) .saturating_add(Weight::from_parts(0, 1497)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -77,11 +77,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `151` // Estimated: `5487` - // Minimum execution time: 13_341_000 picoseconds. - Weight::from_parts(9_919_192, 0) + // Minimum execution time: 13_653_000 picoseconds. + Weight::from_parts(9_457_298, 0) .saturating_add(Weight::from_parts(0, 5487)) // Standard Error: 6 - .saturating_add(Weight::from_parts(957, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_016, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -100,11 +100,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `151` // Estimated: `5487` - // Minimum execution time: 11_576_000 picoseconds. - Weight::from_parts(15_601_069, 0) + // Minimum execution time: 11_593_000 picoseconds. + Weight::from_parts(15_263_900, 0) .saturating_add(Weight::from_parts(0, 5487)) - // Standard Error: 140 - .saturating_add(Weight::from_parts(113_132, 0).saturating_mul(n.into())) + // Standard Error: 239 + .saturating_add(Weight::from_parts(136_065, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -112,6 +112,27 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 105457]`. + fn enqueue_empty_xcmp_message_at(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `334 + n * (1 ±0)` + // Estimated: `108986` + // Minimum execution time: 20_217_000 picoseconds. + Weight::from_parts(20_647_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) + // Standard Error: 12 + .saturating_add(Weight::from_parts(2_576, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) @@ -123,11 +144,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `186` // Estimated: `5487` - // Minimum execution time: 13_047_000 picoseconds. - Weight::from_parts(13_317_000, 0) + // Minimum execution time: 13_262_000 picoseconds. + Weight::from_parts(13_670_000, 0) .saturating_add(Weight::from_parts(0, 5487)) - // Standard Error: 88_289 - .saturating_add(Weight::from_parts(92_149_747, 0).saturating_mul(n.into())) + // Standard Error: 81_154 + .saturating_add(Weight::from_parts(105_979_285, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -136,21 +157,19 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn enqueue_1000_small_xcmp_messages() -> Weight { // Proof Size summary in bytes: - // Measured: `151` - // Estimated: `5487` - // Minimum execution time: 129_728_000 picoseconds. - Weight::from_parts(130_337_000, 0) - .saturating_add(Weight::from_parts(0, 5487)) + // Measured: `53067` + // Estimated: `108986` + // Minimum execution time: 289_304_000 picoseconds. + Weight::from_parts(299_215_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) @@ -158,8 +177,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `109` // Estimated: `2767` - // Minimum execution time: 3_209_000 picoseconds. - Weight::from_parts(3_333_000, 0) + // Minimum execution time: 3_121_000 picoseconds. + Weight::from_parts(3_254_000, 0) .saturating_add(Weight::from_parts(0, 2767)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -170,8 +189,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `144` // Estimated: `2767` - // Minimum execution time: 4_438_000 picoseconds. - Weight::from_parts(4_553_000, 0) + // Minimum execution time: 4_409_000 picoseconds. + Weight::from_parts(4_555_000, 0) .saturating_add(Weight::from_parts(0, 2767)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -180,8 +199,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_198_000 picoseconds. - Weight::from_parts(5_520_000, 0) + // Minimum execution time: 5_368_000 picoseconds. + Weight::from_parts(5_614_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) @@ -202,8 +221,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `105716` // Estimated: `109181` - // Minimum execution time: 205_600_000 picoseconds. - Weight::from_parts(210_403_000, 0) + // Minimum execution time: 231_957_000 picoseconds. + Weight::from_parts(242_676_000, 0) .saturating_add(Weight::from_parts(0, 109181)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) @@ -226,8 +245,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `65785` // Estimated: `69250` - // Minimum execution time: 125_352_000 picoseconds. - Weight::from_parts(129_052_000, 0) + // Minimum execution time: 134_722_000 picoseconds. + Weight::from_parts(138_495_000, 0) .saturating_add(Weight::from_parts(0, 69250)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 1a91b6a434e07..b337813d79558 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -42,7 +42,7 @@ use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use cumulus_primitives_core::{AggregateMessageOrigin, ClaimQueueOffset, CoreSelector, ParaId}; use frame_support::{ construct_runtime, derive_impl, - dispatch::{DispatchClass, DispatchInfo}, + dispatch::DispatchClass, genesis_builder_helper::{build_state, get_preset}, ord_parameter_types, parameter_types, traits::{ @@ -62,7 +62,7 @@ use frame_system::{ }; use pallet_asset_conversion_tx_payment::SwapAssetAdapter; use pallet_nfts::{DestroyWitness, PalletFeatures}; -use pallet_revive::{evm::runtime::EthExtra, AddressMapper}; +use pallet_revive::evm::runtime::EthExtra; use pallet_xcm::EnsureXcm; use parachains_common::{ impls::DealWithFees, message_queue::*, AccountId, AssetIdForTrustBackedAssets, AuraId, Balance, @@ -70,12 +70,10 @@ use parachains_common::{ NORMAL_DISPATCH_RATIO, }; use sp_api::impl_runtime_apis; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata, H160, U256}; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ generic, impl_opaque_keys, - traits::{ - AccountIdConversion, BlakeTwo256, Block as BlockT, Saturating, TransactionExtension, Verify, - }, + traits::{AccountIdConversion, BlakeTwo256, Block as BlockT, Saturating, Verify}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, Perbill, Permill, RuntimeDebug, }; @@ -99,7 +97,7 @@ use assets_common::{ matching::{FromNetwork, FromSiblingParachain}, }; use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; -use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; +use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, InMemoryDbWeight}; use xcm::{ latest::prelude::AssetId, prelude::{VersionedAsset, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}, @@ -182,7 +180,7 @@ impl frame_system::Config for Runtime { type Hash = Hash; type Block = Block; type BlockHashCount = BlockHashCount; - type DbWeight = RocksDbWeight; + type DbWeight = InMemoryDbWeight; type Version = Version; type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; @@ -1094,17 +1092,6 @@ impl pallet_revive::Config for Runtime { type FindAuthor = ::FindAuthor; } -impl TryFrom for pallet_revive::Call { - type Error = (); - - fn try_from(value: RuntimeCall) -> Result { - match value { - RuntimeCall::Revive(call) => Ok(call), - _ => Err(()), - } - } -} - parameter_types! { pub MbmServiceWeight: Weight = Perbill::from_percent(80) * RuntimeBlockWeights::get().max_block; } @@ -1493,7 +1480,11 @@ mod benches { ); } -impl_runtime_apis! { +pallet_revive::impl_runtime_apis_plus_revive!( + Runtime, + Executive, + EthExtraImpl, + impl sp_consensus_aura::AuraApi for Runtime { fn slot_duration() -> sp_consensus_aura::SlotDuration { sp_consensus_aura::SlotDuration::from_millis(SLOT_DURATION) @@ -2299,184 +2290,7 @@ impl_runtime_apis! { genesis_config_presets::preset_names() } } - - impl pallet_revive::ReviveApi for Runtime - { - fn balance(address: H160) -> U256 { - Revive::evm_balance(&address) - } - - fn block_gas_limit() -> U256 { - Revive::evm_block_gas_limit() - } - - fn gas_price() -> U256 { - Revive::evm_gas_price() - } - - fn nonce(address: H160) -> Nonce { - let account = ::AddressMapper::to_account_id(&address); - System::account_nonce(account) - } - - fn eth_transact(tx: pallet_revive::evm::GenericTransaction) -> Result, pallet_revive::EthTransactError> - { - let blockweights: BlockWeights = ::BlockWeights::get(); - let tx_fee = |pallet_call, mut dispatch_info: DispatchInfo| { - let call = RuntimeCall::Revive(pallet_call); - dispatch_info.extension_weight = EthExtraImpl::get_eth_extension(0, 0u32.into()).weight(&call); - let uxt: UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic::new_bare(call).into(); - - pallet_transaction_payment::Pallet::::compute_fee( - uxt.encoded_size() as u32, - &dispatch_info, - 0u32.into(), - ) - }; - - Revive::bare_eth_transact(tx, blockweights.max_block, tx_fee) - } - - fn call( - origin: AccountId, - dest: H160, - value: Balance, - gas_limit: Option, - storage_deposit_limit: Option, - input_data: Vec, - ) -> pallet_revive::ContractResult { - let blockweights= ::BlockWeights::get(); - Revive::bare_call( - RuntimeOrigin::signed(origin), - dest, - value, - gas_limit.unwrap_or(blockweights.max_block), - pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), - input_data, - ) - } - - fn instantiate( - origin: AccountId, - value: Balance, - gas_limit: Option, - storage_deposit_limit: Option, - code: pallet_revive::Code, - data: Vec, - salt: Option<[u8; 32]>, - ) -> pallet_revive::ContractResult - { - let blockweights= ::BlockWeights::get(); - Revive::bare_instantiate( - RuntimeOrigin::signed(origin), - value, - gas_limit.unwrap_or(blockweights.max_block), - pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), - code, - data, - salt, - ) - } - - fn upload_code( - origin: AccountId, - code: Vec, - storage_deposit_limit: Option, - ) -> pallet_revive::CodeUploadResult - { - Revive::bare_upload_code( - RuntimeOrigin::signed(origin), - code, - storage_deposit_limit.unwrap_or(u128::MAX), - ) - } - - fn get_storage( - address: H160, - key: [u8; 32], - ) -> pallet_revive::GetStorageResult { - Revive::get_storage( - address, - key - ) - } - - fn get_storage_var_key( - address: H160, - key: Vec, - ) -> pallet_revive::GetStorageResult { - Revive::get_storage_var_key( - address, - key - ) - } - - fn trace_block( - block: Block, - tracer_type: pallet_revive::evm::TracerType, - ) -> Vec<(u32, pallet_revive::evm::Trace)> { - use pallet_revive::tracing::trace; - let mut tracer = Revive::evm_tracer(tracer_type); - let mut traces = vec![]; - let (header, extrinsics) = block.deconstruct(); - Executive::initialize_block(&header); - for (index, ext) in extrinsics.into_iter().enumerate() { - trace(tracer.as_tracing(), || { - let _ = Executive::apply_extrinsic(ext); - }); - - if let Some(tx_trace) = tracer.collect_trace() { - traces.push((index as u32, tx_trace)); - } - } - - traces - } - - fn trace_tx( - block: Block, - tx_index: u32, - tracer_type: pallet_revive::evm::TracerType, - ) -> Option { - use pallet_revive::tracing::trace; - let mut tracer = Revive::evm_tracer(tracer_type); - let (header, extrinsics) = block.deconstruct(); - - Executive::initialize_block(&header); - for (index, ext) in extrinsics.into_iter().enumerate() { - if index as u32 == tx_index { - trace(tracer.as_tracing(), || { - let _ = Executive::apply_extrinsic(ext); - }); - break; - } else { - let _ = Executive::apply_extrinsic(ext); - } - } - - tracer.collect_trace() - } - - fn trace_call( - tx: pallet_revive::evm::GenericTransaction, - tracer_type: pallet_revive::evm::TracerType, - ) - -> Result - { - use pallet_revive::tracing::trace; - let mut tracer = Revive::evm_tracer(tracer_type); - let result = trace(tracer.as_tracing(), || Self::eth_transact(tx)); - - if let Some(trace) = tracer.collect_trace() { - Ok(trace) - } else if let Err(err) = result { - Err(err) - } else { - Ok(tracer.empty_trace()) - } - } - } -} +); cumulus_pallet_parachain_system::register_validate_block! { Runtime = Runtime, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_xcmp_queue.rs index d327ed420b844..a3e0129468d65 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_xcmp_queue.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_xcmp_queue.rs @@ -16,9 +16,9 @@ //! Autogenerated weights for `cumulus_pallet_xcmp_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-05-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `341e66d5356e`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `16d5a52ef0dc`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: @@ -56,8 +56,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `109` // Estimated: `1497` - // Minimum execution time: 5_079_000 picoseconds. - Weight::from_parts(5_370_000, 0) + // Minimum execution time: 4_883_000 picoseconds. + Weight::from_parts(5_223_000, 0) .saturating_add(Weight::from_parts(0, 1497)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -77,11 +77,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `151` // Estimated: `5487` - // Minimum execution time: 13_631_000 picoseconds. - Weight::from_parts(9_312_672, 0) + // Minimum execution time: 13_840_000 picoseconds. + Weight::from_parts(8_982_973, 0) .saturating_add(Weight::from_parts(0, 5487)) // Standard Error: 6 - .saturating_add(Weight::from_parts(1_016, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(995, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -100,11 +100,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `151` // Estimated: `5487` - // Minimum execution time: 11_412_000 picoseconds. - Weight::from_parts(15_650_229, 0) + // Minimum execution time: 12_129_000 picoseconds. + Weight::from_parts(15_937_292, 0) .saturating_add(Weight::from_parts(0, 5487)) - // Standard Error: 221 - .saturating_add(Weight::from_parts(114_484, 0).saturating_mul(n.into())) + // Standard Error: 225 + .saturating_add(Weight::from_parts(115_324, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -112,6 +112,27 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 105457]`. + fn enqueue_empty_xcmp_message_at(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `334 + n * (1 ±0)` + // Estimated: `108986` + // Minimum execution time: 20_484_000 picoseconds. + Weight::from_parts(20_648_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) + // Standard Error: 11 + .saturating_add(Weight::from_parts(2_365, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) @@ -123,11 +144,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `186` // Estimated: `5487` - // Minimum execution time: 12_757_000 picoseconds. - Weight::from_parts(1_315_914, 0) + // Minimum execution time: 12_963_000 picoseconds. + Weight::from_parts(13_277_000, 0) .saturating_add(Weight::from_parts(0, 5487)) - // Standard Error: 85_840 - .saturating_add(Weight::from_parts(94_451_417, 0).saturating_mul(n.into())) + // Standard Error: 63_306 + .saturating_add(Weight::from_parts(104_765_134, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -136,21 +157,19 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn enqueue_1000_small_xcmp_messages() -> Weight { // Proof Size summary in bytes: - // Measured: `151` - // Estimated: `5487` - // Minimum execution time: 129_219_000 picoseconds. - Weight::from_parts(130_403_000, 0) - .saturating_add(Weight::from_parts(0, 5487)) + // Measured: `53067` + // Estimated: `108986` + // Minimum execution time: 255_661_000 picoseconds. + Weight::from_parts(264_825_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) @@ -158,8 +177,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `109` // Estimated: `2767` - // Minimum execution time: 3_134_000 picoseconds. - Weight::from_parts(3_280_000, 0) + // Minimum execution time: 3_092_000 picoseconds. + Weight::from_parts(3_339_000, 0) .saturating_add(Weight::from_parts(0, 2767)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -170,8 +189,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `144` // Estimated: `2767` - // Minimum execution time: 4_413_000 picoseconds. - Weight::from_parts(4_663_000, 0) + // Minimum execution time: 4_352_000 picoseconds. + Weight::from_parts(4_577_000, 0) .saturating_add(Weight::from_parts(0, 2767)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -180,8 +199,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_388_000 picoseconds. - Weight::from_parts(5_509_000, 0) + // Minimum execution time: 5_050_000 picoseconds. + Weight::from_parts(5_270_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) @@ -202,8 +221,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `105716` // Estimated: `109181` - // Minimum execution time: 223_167_000 picoseconds. - Weight::from_parts(225_720_000, 0) + // Minimum execution time: 210_820_000 picoseconds. + Weight::from_parts(221_925_000, 0) .saturating_add(Weight::from_parts(0, 109181)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) @@ -226,8 +245,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `65785` // Estimated: `69250` - // Minimum execution time: 131_073_000 picoseconds. - Weight::from_parts(132_760_000, 0) + // Minimum execution time: 127_555_000 picoseconds. + Weight::from_parts(130_147_000, 0) .saturating_add(Weight::from_parts(0, 69250)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/inmemorydb_weights.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/inmemorydb_weights.rs new file mode 100644 index 0000000000000..fb94a7c3ca641 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/inmemorydb_weights.rs @@ -0,0 +1,122 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-05-25 (Y/M/D) +//! HOSTNAME: `versi-developer-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! +//! DATABASE: `InMemoryDb`, RUNTIME: `Polkadot Asset Hub` +//! BLOCK-NUM: `BlockId::Number(8404035)` +//! SKIP-WRITE: `false`, SKIP-READ: `false`, WARMUPS: `1` +//! STATE-VERSION: `V1`, STATE-CACHE-SIZE: `` +//! WEIGHT-PATH: `` +//! METRIC: `Average`, WEIGHT-MUL: `1.0`, WEIGHT-ADD: `0` + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// storage +// --warmups +// 1 +// --state-version +// 1 +// --base-path +// /opt/local-ssd/polkadot-asset-hub +// --chain +// cumulus/polkadot-parachain/chain-specs/asset-hub-polkadot.json +// --detailed-log-output +// --enable-trie-cache +// --trie-cache-size +// 10737418240 +// --batch-size +// 10000 +// --mode +// validate-block +// --validate-block-rounds +// 100 + +/// Storage DB weights for the `Polkadot Asset Hub` runtime and `InMemoryDb`. +pub mod constants { + use frame_support::weights::{constants, RuntimeDbWeight}; + use sp_core::parameter_types; + + parameter_types! { + /// `InMemoryDb` weights are measured in the context of the validation functions. + /// To avoid submitting overweight blocks to the relay chain this is the configuration + /// parachains should use. + pub const InMemoryDbWeight: RuntimeDbWeight = RuntimeDbWeight { + // Time to read one storage item. + // Calculated by multiplying the *Average* of all values with `1.0` and adding `0`. + // + // Stats nanoseconds: + // Min, Max: 12_883, 13_516 + // Average: 13_036 + // Median: 13_031 + // Std-Dev: 69.49 + // + // Percentiles nanoseconds: + // 99th: 13_242 + // 95th: 13_152 + // 75th: 13_070 + read: 13_036 * constants::WEIGHT_REF_TIME_PER_NANOS, + + // Time to write one storage item. + // Calculated by multiplying the *Average* of all values with `1.0` and adding `0`. + // + // Stats nanoseconds: + // Min, Max: 28_998, 32_249 + // Average: 31_215 + // Median: 31_667 + // Std-Dev: 1047.8 + // + // Percentiles nanoseconds: + // 99th: 32_195 + // 95th: 32_114 + // 75th: 31_852 + write: 31_215 * constants::WEIGHT_REF_TIME_PER_NANOS, + }; + } + + #[cfg(test)] + mod test_db_weights { + use super::InMemoryDbWeight as W; + use frame_support::weights::constants; + + /// Checks that all weights exist and have sane values. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn bound() { + // At least 1 µs. + assert!( + W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Read weight should be at least 1 µs." + ); + assert!( + W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Write weight should be at least 1 µs." + ); + // At most 1 ms. + assert!( + W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Read weight should be at most 1 ms." + ); + assert!( + W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Write weight should be at most 1 ms." + ); + } + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs index 13364d206a57b..039b10aebd29a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs @@ -20,6 +20,7 @@ pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; pub mod frame_system_extensions; +pub mod inmemorydb_weights; pub mod pallet_asset_conversion; pub mod pallet_asset_conversion_ops; pub mod pallet_asset_conversion_tx_payment; @@ -50,4 +51,4 @@ pub mod xcm; pub use block_weights::constants::BlockExecutionWeight; pub use extrinsic_weights::constants::ExtrinsicBaseWeight; -pub use rocksdb_weights::constants::RocksDbWeight; +pub use inmemorydb_weights::constants::InMemoryDbWeight; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_xcmp_queue.rs index 1ffad7087da69..40f13c7090b47 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_xcmp_queue.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_xcmp_queue.rs @@ -16,9 +16,9 @@ //! Autogenerated weights for `cumulus_pallet_xcmp_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-05-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `341e66d5356e`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `16d5a52ef0dc`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: @@ -56,8 +56,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `4` // Estimated: `1497` - // Minimum execution time: 4_386_000 picoseconds. - Weight::from_parts(4_636_000, 0) + // Minimum execution time: 4_326_000 picoseconds. + Weight::from_parts(4_616_000, 0) .saturating_add(Weight::from_parts(0, 1497)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -77,11 +77,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `80` // Estimated: `5487` - // Minimum execution time: 13_325_000 picoseconds. - Weight::from_parts(9_602_752, 0) + // Minimum execution time: 13_374_000 picoseconds. + Weight::from_parts(9_220_505, 0) .saturating_add(Weight::from_parts(0, 5487)) // Standard Error: 6 - .saturating_add(Weight::from_parts(948, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(989, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -100,11 +100,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `80` // Estimated: `5487` - // Minimum execution time: 11_096_000 picoseconds. - Weight::from_parts(15_516_116, 0) + // Minimum execution time: 11_436_000 picoseconds. + Weight::from_parts(15_588_934, 0) .saturating_add(Weight::from_parts(0, 5487)) - // Standard Error: 320 - .saturating_add(Weight::from_parts(116_120, 0).saturating_mul(n.into())) + // Standard Error: 215 + .saturating_add(Weight::from_parts(121_183, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -112,6 +112,27 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105549), added: 108024, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 105457]`. + fn enqueue_empty_xcmp_message_at(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `263 + n * (1 ±0)` + // Estimated: `109014` + // Minimum execution time: 21_929_000 picoseconds. + Weight::from_parts(1_150_129, 0) + .saturating_add(Weight::from_parts(0, 109014)) + // Standard Error: 16 + .saturating_add(Weight::from_parts(2_654, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) @@ -123,11 +144,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `115` // Estimated: `5487` - // Minimum execution time: 12_507_000 picoseconds. - Weight::from_parts(12_840_000, 0) + // Minimum execution time: 12_994_000 picoseconds. + Weight::from_parts(13_450_000, 0) .saturating_add(Weight::from_parts(0, 5487)) - // Standard Error: 70_751 - .saturating_add(Weight::from_parts(93_977_850, 0).saturating_mul(n.into())) + // Standard Error: 59_142 + .saturating_add(Weight::from_parts(101_994_623, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -136,21 +157,19 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105549), added: 108024, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105549), added: 108024, mode: `MaxEncodedLen`) fn enqueue_1000_small_xcmp_messages() -> Weight { // Proof Size summary in bytes: - // Measured: `80` - // Estimated: `5487` - // Minimum execution time: 130_861_000 picoseconds. - Weight::from_parts(131_705_000, 0) - .saturating_add(Weight::from_parts(0, 5487)) + // Measured: `52996` + // Estimated: `109014` + // Minimum execution time: 266_450_000 picoseconds. + Weight::from_parts(274_887_000, 0) + .saturating_add(Weight::from_parts(0, 109014)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) @@ -158,8 +177,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `4` // Estimated: `2767` - // Minimum execution time: 2_413_000 picoseconds. - Weight::from_parts(2_570_000, 0) + // Minimum execution time: 2_388_000 picoseconds. + Weight::from_parts(2_520_000, 0) .saturating_add(Weight::from_parts(0, 2767)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -171,7 +190,7 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Measured: `39` // Estimated: `2767` // Minimum execution time: 3_782_000 picoseconds. - Weight::from_parts(3_967_000, 0) + Weight::from_parts(3_940_000, 0) .saturating_add(Weight::from_parts(0, 2767)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -180,8 +199,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_149_000 picoseconds. - Weight::from_parts(5_402_000, 0) + // Minimum execution time: 5_418_000 picoseconds. + Weight::from_parts(5_613_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) @@ -202,8 +221,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `105645` // Estimated: `109110` - // Minimum execution time: 207_444_000 picoseconds. - Weight::from_parts(216_987_000, 0) + // Minimum execution time: 210_814_000 picoseconds. + Weight::from_parts(217_016_000, 0) .saturating_add(Weight::from_parts(0, 109110)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) @@ -226,8 +245,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `65714` // Estimated: `69179` - // Minimum execution time: 124_071_000 picoseconds. - Weight::from_parts(127_720_000, 0) + // Minimum execution time: 126_881_000 picoseconds. + Weight::from_parts(131_302_000, 0) .saturating_add(Weight::from_parts(0, 69179)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs index 351d0ff4749da..dc14c050f60f7 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_ethereum_config.rs @@ -374,7 +374,6 @@ pub mod benchmark_helpers { } pub(crate) mod migrations { - use alloc::vec::Vec; use frame_support::pallet_prelude::*; use snowbridge_core::TokenId; @@ -401,18 +400,6 @@ pub(crate) mod migrations { }; snowbridge_pallet_system::ForeignToNativeId::::translate_values(translate_westend); - let old_keys = OldNativeToForeignId::::iter_keys().collect::>(); - for old_key in old_keys { - if let Some(old_val) = OldNativeToForeignId::::get(&old_key) { - snowbridge_pallet_system::NativeToForeignId::::insert( - &xcm::v5::Location::try_from(old_key.clone()).expect("valid location"), - old_val, - ); - } - OldNativeToForeignId::::remove(old_key); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); - } - weight } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_xcmp_queue.rs index e17d6a18e0ed0..f8a44aa4779c9 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_xcmp_queue.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_xcmp_queue.rs @@ -16,9 +16,9 @@ //! Autogenerated weights for `cumulus_pallet_xcmp_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-05-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `341e66d5356e`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `16d5a52ef0dc`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: @@ -56,8 +56,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `142` // Estimated: `1497` - // Minimum execution time: 5_273_000 picoseconds. - Weight::from_parts(5_418_000, 0) + // Minimum execution time: 5_195_000 picoseconds. + Weight::from_parts(5_568_000, 0) .saturating_add(Weight::from_parts(0, 1497)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -77,11 +77,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `148` // Estimated: `5487` - // Minimum execution time: 13_320_000 picoseconds. - Weight::from_parts(9_541_133, 0) + // Minimum execution time: 13_430_000 picoseconds. + Weight::from_parts(9_574_197, 0) .saturating_add(Weight::from_parts(0, 5487)) // Standard Error: 6 - .saturating_add(Weight::from_parts(1_002, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(973, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -100,11 +100,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `148` // Estimated: `5487` - // Minimum execution time: 11_398_000 picoseconds. - Weight::from_parts(15_643_041, 0) + // Minimum execution time: 11_564_000 picoseconds. + Weight::from_parts(15_506_334, 0) .saturating_add(Weight::from_parts(0, 5487)) - // Standard Error: 172 - .saturating_add(Weight::from_parts(113_850, 0).saturating_mul(n.into())) + // Standard Error: 205 + .saturating_add(Weight::from_parts(114_494, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -112,6 +112,27 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105549), added: 108024, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 105457]`. + fn enqueue_empty_xcmp_message_at(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `330 + n * (1 ±0)` + // Estimated: `109014` + // Minimum execution time: 19_772_000 picoseconds. + Weight::from_parts(20_089_000, 0) + .saturating_add(Weight::from_parts(0, 109014)) + // Standard Error: 11 + .saturating_add(Weight::from_parts(2_427, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) @@ -123,11 +144,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `183` // Estimated: `5487` - // Minimum execution time: 12_456_000 picoseconds. - Weight::from_parts(12_898_000, 0) + // Minimum execution time: 12_651_000 picoseconds. + Weight::from_parts(13_268_000, 0) .saturating_add(Weight::from_parts(0, 5487)) - // Standard Error: 98_955 - .saturating_add(Weight::from_parts(96_236_732, 0).saturating_mul(n.into())) + // Standard Error: 55_621 + .saturating_add(Weight::from_parts(102_763_421, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -136,21 +157,19 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105549), added: 108024, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105549), added: 108024, mode: `MaxEncodedLen`) fn enqueue_1000_small_xcmp_messages() -> Weight { // Proof Size summary in bytes: - // Measured: `148` - // Estimated: `5487` - // Minimum execution time: 130_025_000 picoseconds. - Weight::from_parts(131_796_000, 0) - .saturating_add(Weight::from_parts(0, 5487)) + // Measured: `53063` + // Estimated: `109014` + // Minimum execution time: 252_294_000 picoseconds. + Weight::from_parts(260_540_000, 0) + .saturating_add(Weight::from_parts(0, 109014)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) @@ -158,8 +177,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `142` // Estimated: `2767` - // Minimum execution time: 3_333_000 picoseconds. - Weight::from_parts(3_554_000, 0) + // Minimum execution time: 3_337_000 picoseconds. + Weight::from_parts(3_498_000, 0) .saturating_add(Weight::from_parts(0, 2767)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -170,8 +189,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `177` // Estimated: `2767` - // Minimum execution time: 4_631_000 picoseconds. - Weight::from_parts(4_844_000, 0) + // Minimum execution time: 4_674_000 picoseconds. + Weight::from_parts(4_822_000, 0) .saturating_add(Weight::from_parts(0, 2767)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -180,8 +199,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_371_000 picoseconds. - Weight::from_parts(5_597_000, 0) + // Minimum execution time: 5_045_000 picoseconds. + Weight::from_parts(5_399_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) @@ -202,8 +221,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `105713` // Estimated: `109178` - // Minimum execution time: 221_896_000 picoseconds. - Weight::from_parts(227_177_000, 0) + // Minimum execution time: 211_435_000 picoseconds. + Weight::from_parts(222_691_000, 0) .saturating_add(Weight::from_parts(0, 109178)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) @@ -226,8 +245,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `65782` // Estimated: `69247` - // Minimum execution time: 131_406_000 picoseconds. - Weight::from_parts(132_324_000, 0) + // Minimum execution time: 126_438_000 picoseconds. + Weight::from_parts(129_423_000, 0) .saturating_add(Weight::from_parts(0, 69247)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_xcmp_queue.rs index 56c443cfd725e..bed529edcea33 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_xcmp_queue.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_xcmp_queue.rs @@ -16,9 +16,9 @@ //! Autogenerated weights for `cumulus_pallet_xcmp_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-05-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `341e66d5356e`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `16d5a52ef0dc`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: @@ -56,8 +56,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `142` // Estimated: `1497` - // Minimum execution time: 5_120_000 picoseconds. - Weight::from_parts(5_379_000, 0) + // Minimum execution time: 4_975_000 picoseconds. + Weight::from_parts(5_353_000, 0) .saturating_add(Weight::from_parts(0, 1497)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -77,11 +77,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `148` // Estimated: `5487` - // Minimum execution time: 13_413_000 picoseconds. - Weight::from_parts(9_072_883, 0) + // Minimum execution time: 13_084_000 picoseconds. + Weight::from_parts(8_989_453, 0) .saturating_add(Weight::from_parts(0, 5487)) - // Standard Error: 5 - .saturating_add(Weight::from_parts(1_003, 0).saturating_mul(n.into())) + // Standard Error: 6 + .saturating_add(Weight::from_parts(1_026, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -100,11 +100,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `148` // Estimated: `5487` - // Minimum execution time: 11_097_000 picoseconds. - Weight::from_parts(14_667_192, 0) + // Minimum execution time: 10_913_000 picoseconds. + Weight::from_parts(14_988_541, 0) .saturating_add(Weight::from_parts(0, 5487)) - // Standard Error: 286 - .saturating_add(Weight::from_parts(121_019, 0).saturating_mul(n.into())) + // Standard Error: 235 + .saturating_add(Weight::from_parts(130_851, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -112,6 +112,27 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 105457]`. + fn enqueue_empty_xcmp_message_at(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `330 + n * (1 ±0)` + // Estimated: `108986` + // Minimum execution time: 19_799_000 picoseconds. + Weight::from_parts(20_095_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) + // Standard Error: 12 + .saturating_add(Weight::from_parts(2_584, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) @@ -123,11 +144,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `183` // Estimated: `5487` - // Minimum execution time: 12_579_000 picoseconds. - Weight::from_parts(12_746_000, 0) + // Minimum execution time: 12_421_000 picoseconds. + Weight::from_parts(12_780_000, 0) .saturating_add(Weight::from_parts(0, 5487)) - // Standard Error: 80_865 - .saturating_add(Weight::from_parts(95_971_763, 0).saturating_mul(n.into())) + // Standard Error: 84_412 + .saturating_add(Weight::from_parts(106_740_006, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -136,21 +157,19 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn enqueue_1000_small_xcmp_messages() -> Weight { // Proof Size summary in bytes: - // Measured: `148` - // Estimated: `5487` - // Minimum execution time: 134_592_000 picoseconds. - Weight::from_parts(135_210_000, 0) - .saturating_add(Weight::from_parts(0, 5487)) + // Measured: `53063` + // Estimated: `108986` + // Minimum execution time: 279_435_000 picoseconds. + Weight::from_parts(285_825_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) @@ -158,8 +177,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `142` // Estimated: `2767` - // Minimum execution time: 3_204_000 picoseconds. - Weight::from_parts(3_438_000, 0) + // Minimum execution time: 3_187_000 picoseconds. + Weight::from_parts(3_390_000, 0) .saturating_add(Weight::from_parts(0, 2767)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -170,8 +189,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `177` // Estimated: `2767` - // Minimum execution time: 4_521_000 picoseconds. - Weight::from_parts(4_676_000, 0) + // Minimum execution time: 4_450_000 picoseconds. + Weight::from_parts(4_688_000, 0) .saturating_add(Weight::from_parts(0, 2767)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -180,8 +199,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_152_000 picoseconds. - Weight::from_parts(5_737_000, 0) + // Minimum execution time: 5_199_000 picoseconds. + Weight::from_parts(5_368_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) @@ -202,8 +221,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `105713` // Estimated: `109178` - // Minimum execution time: 219_716_000 picoseconds. - Weight::from_parts(223_748_000, 0) + // Minimum execution time: 229_715_000 picoseconds. + Weight::from_parts(232_586_000, 0) .saturating_add(Weight::from_parts(0, 109178)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) @@ -226,8 +245,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `65782` // Estimated: `69247` - // Minimum execution time: 129_234_000 picoseconds. - Weight::from_parts(131_014_000, 0) + // Minimum execution time: 133_351_000 picoseconds. + Weight::from_parts(135_787_000, 0) .saturating_add(Weight::from_parts(0, 69247)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/cumulus_pallet_xcmp_queue.rs index db54a4ce59c67..5051a8c8e96b2 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/cumulus_pallet_xcmp_queue.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/cumulus_pallet_xcmp_queue.rs @@ -16,9 +16,9 @@ //! Autogenerated weights for `cumulus_pallet_xcmp_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-05-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `341e66d5356e`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `16d5a52ef0dc`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: @@ -56,8 +56,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `76` // Estimated: `1497` - // Minimum execution time: 4_837_000 picoseconds. - Weight::from_parts(5_170_000, 0) + // Minimum execution time: 4_788_000 picoseconds. + Weight::from_parts(5_033_000, 0) .saturating_add(Weight::from_parts(0, 1497)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -77,11 +77,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `82` // Estimated: `5487` - // Minimum execution time: 13_084_000 picoseconds. - Weight::from_parts(8_739_344, 0) + // Minimum execution time: 12_649_000 picoseconds. + Weight::from_parts(8_733_180, 0) .saturating_add(Weight::from_parts(0, 5487)) // Standard Error: 6 - .saturating_add(Weight::from_parts(1_000, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(982, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -100,11 +100,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `82` // Estimated: `5487` - // Minimum execution time: 10_994_000 picoseconds. - Weight::from_parts(14_949_012, 0) + // Minimum execution time: 10_604_000 picoseconds. + Weight::from_parts(14_570_286, 0) .saturating_add(Weight::from_parts(0, 5487)) - // Standard Error: 148 - .saturating_add(Weight::from_parts(119_649, 0).saturating_mul(n.into())) + // Standard Error: 193 + .saturating_add(Weight::from_parts(117_011, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -112,6 +112,27 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 105457]`. + fn enqueue_empty_xcmp_message_at(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `264 + n * (1 ±0)` + // Estimated: `108986` + // Minimum execution time: 19_467_000 picoseconds. + Weight::from_parts(19_689_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) + // Standard Error: 11 + .saturating_add(Weight::from_parts(2_365, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) @@ -123,11 +144,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `117` // Estimated: `5487` - // Minimum execution time: 12_418_000 picoseconds. - Weight::from_parts(12_567_000, 0) + // Minimum execution time: 12_234_000 picoseconds. + Weight::from_parts(12_623_000, 0) .saturating_add(Weight::from_parts(0, 5487)) - // Standard Error: 68_698 - .saturating_add(Weight::from_parts(94_530_242, 0).saturating_mul(n.into())) + // Standard Error: 62_233 + .saturating_add(Weight::from_parts(102_929_572, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -136,21 +157,19 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn enqueue_1000_small_xcmp_messages() -> Weight { // Proof Size summary in bytes: - // Measured: `82` - // Estimated: `5487` - // Minimum execution time: 135_051_000 picoseconds. - Weight::from_parts(136_224_000, 0) - .saturating_add(Weight::from_parts(0, 5487)) + // Measured: `52997` + // Estimated: `108986` + // Minimum execution time: 254_750_000 picoseconds. + Weight::from_parts(262_602_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) @@ -158,8 +177,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `76` // Estimated: `2767` - // Minimum execution time: 3_018_000 picoseconds. - Weight::from_parts(3_160_000, 0) + // Minimum execution time: 2_920_000 picoseconds. + Weight::from_parts(3_228_000, 0) .saturating_add(Weight::from_parts(0, 2767)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -170,8 +189,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `111` // Estimated: `2767` - // Minimum execution time: 4_298_000 picoseconds. - Weight::from_parts(4_504_000, 0) + // Minimum execution time: 4_301_000 picoseconds. + Weight::from_parts(4_501_000, 0) .saturating_add(Weight::from_parts(0, 2767)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -180,8 +199,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_091_000 picoseconds. - Weight::from_parts(5_326_000, 0) + // Minimum execution time: 5_274_000 picoseconds. + Weight::from_parts(5_575_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) @@ -202,8 +221,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `105647` // Estimated: `109112` - // Minimum execution time: 220_697_000 picoseconds. - Weight::from_parts(228_625_000, 0) + // Minimum execution time: 212_251_000 picoseconds. + Weight::from_parts(220_677_000, 0) .saturating_add(Weight::from_parts(0, 109112)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) @@ -226,8 +245,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `65716` // Estimated: `69181` - // Minimum execution time: 128_465_000 picoseconds. - Weight::from_parts(130_685_000, 0) + // Minimum execution time: 125_236_000 picoseconds. + Weight::from_parts(127_502_000, 0) .saturating_add(Weight::from_parts(0, 69181)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/cumulus_pallet_xcmp_queue.rs index 9fca22f9e78ca..7aa9c8a0896c2 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/cumulus_pallet_xcmp_queue.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/cumulus_pallet_xcmp_queue.rs @@ -16,9 +16,9 @@ //! Autogenerated weights for `cumulus_pallet_xcmp_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-05-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `341e66d5356e`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `16d5a52ef0dc`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: @@ -56,8 +56,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `76` // Estimated: `1497` - // Minimum execution time: 4_897_000 picoseconds. - Weight::from_parts(5_245_000, 0) + // Minimum execution time: 4_864_000 picoseconds. + Weight::from_parts(5_179_000, 0) .saturating_add(Weight::from_parts(0, 1497)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -77,11 +77,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `82` // Estimated: `5487` - // Minimum execution time: 12_793_000 picoseconds. - Weight::from_parts(8_346_709, 0) + // Minimum execution time: 12_962_000 picoseconds. + Weight::from_parts(8_522_179, 0) .saturating_add(Weight::from_parts(0, 5487)) // Standard Error: 6 - .saturating_add(Weight::from_parts(1_006, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_028, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -100,11 +100,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `82` // Estimated: `5487` - // Minimum execution time: 10_859_000 picoseconds. - Weight::from_parts(14_746_768, 0) + // Minimum execution time: 10_903_000 picoseconds. + Weight::from_parts(14_761_815, 0) .saturating_add(Weight::from_parts(0, 5487)) - // Standard Error: 140 - .saturating_add(Weight::from_parts(114_347, 0).saturating_mul(n.into())) + // Standard Error: 200 + .saturating_add(Weight::from_parts(117_611, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -112,6 +112,27 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 105457]`. + fn enqueue_empty_xcmp_message_at(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `264 + n * (1 ±0)` + // Estimated: `108986` + // Minimum execution time: 19_215_000 picoseconds. + Weight::from_parts(19_684_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) + // Standard Error: 12 + .saturating_add(Weight::from_parts(2_568, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) @@ -123,11 +144,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `117` // Estimated: `5487` - // Minimum execution time: 12_393_000 picoseconds. - Weight::from_parts(12_544_000, 0) + // Minimum execution time: 12_217_000 picoseconds. + Weight::from_parts(12_477_000, 0) .saturating_add(Weight::from_parts(0, 5487)) - // Standard Error: 64_574 - .saturating_add(Weight::from_parts(94_393_450, 0).saturating_mul(n.into())) + // Standard Error: 55_582 + .saturating_add(Weight::from_parts(106_437_894, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -136,21 +157,19 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn enqueue_1000_small_xcmp_messages() -> Weight { // Proof Size summary in bytes: - // Measured: `82` - // Estimated: `5487` - // Minimum execution time: 129_834_000 picoseconds. - Weight::from_parts(131_193_000, 0) - .saturating_add(Weight::from_parts(0, 5487)) + // Measured: `52997` + // Estimated: `108986` + // Minimum execution time: 264_971_000 picoseconds. + Weight::from_parts(275_172_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) @@ -158,8 +177,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `76` // Estimated: `2767` - // Minimum execution time: 3_004_000 picoseconds. - Weight::from_parts(3_387_000, 0) + // Minimum execution time: 2_996_000 picoseconds. + Weight::from_parts(3_216_000, 0) .saturating_add(Weight::from_parts(0, 2767)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -170,8 +189,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `111` // Estimated: `2767` - // Minimum execution time: 4_366_000 picoseconds. - Weight::from_parts(4_522_000, 0) + // Minimum execution time: 4_320_000 picoseconds. + Weight::from_parts(4_499_000, 0) .saturating_add(Weight::from_parts(0, 2767)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -180,8 +199,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_076_000 picoseconds. - Weight::from_parts(5_281_000, 0) + // Minimum execution time: 5_044_000 picoseconds. + Weight::from_parts(5_145_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) @@ -202,8 +221,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `105647` // Estimated: `109112` - // Minimum execution time: 220_997_000 picoseconds. - Weight::from_parts(229_128_000, 0) + // Minimum execution time: 223_930_000 picoseconds. + Weight::from_parts(234_241_000, 0) .saturating_add(Weight::from_parts(0, 109112)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) @@ -226,8 +245,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `65716` // Estimated: `69181` - // Minimum execution time: 129_492_000 picoseconds. - Weight::from_parts(131_679_000, 0) + // Minimum execution time: 132_117_000 picoseconds. + Weight::from_parts(134_663_000, 0) .saturating_add(Weight::from_parts(0, 69181)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/cumulus_pallet_xcmp_queue.rs index b3d1aeadb2edd..32ae0f05dadba 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/cumulus_pallet_xcmp_queue.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/cumulus_pallet_xcmp_queue.rs @@ -16,9 +16,9 @@ //! Autogenerated weights for `cumulus_pallet_xcmp_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-05-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `341e66d5356e`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `16d5a52ef0dc`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: @@ -56,8 +56,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `76` // Estimated: `1497` - // Minimum execution time: 4_884_000 picoseconds. - Weight::from_parts(5_133_000, 0) + // Minimum execution time: 4_894_000 picoseconds. + Weight::from_parts(5_140_000, 0) .saturating_add(Weight::from_parts(0, 1497)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -77,11 +77,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `82` // Estimated: `5487` - // Minimum execution time: 12_631_000 picoseconds. - Weight::from_parts(8_912_789, 0) + // Minimum execution time: 13_062_000 picoseconds. + Weight::from_parts(8_854_073, 0) .saturating_add(Weight::from_parts(0, 5487)) - // Standard Error: 5 - .saturating_add(Weight::from_parts(943, 0).saturating_mul(n.into())) + // Standard Error: 6 + .saturating_add(Weight::from_parts(985, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -100,11 +100,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `82` // Estimated: `5487` - // Minimum execution time: 10_775_000 picoseconds. - Weight::from_parts(14_656_859, 0) + // Minimum execution time: 10_915_000 picoseconds. + Weight::from_parts(14_766_724, 0) .saturating_add(Weight::from_parts(0, 5487)) - // Standard Error: 150 - .saturating_add(Weight::from_parts(115_219, 0).saturating_mul(n.into())) + // Standard Error: 228 + .saturating_add(Weight::from_parts(117_905, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -112,6 +112,27 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 105457]`. + fn enqueue_empty_xcmp_message_at(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `264 + n * (1 ±0)` + // Estimated: `108986` + // Minimum execution time: 19_506_000 picoseconds. + Weight::from_parts(19_645_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) + // Standard Error: 12 + .saturating_add(Weight::from_parts(2_407, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) @@ -123,11 +144,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `117` // Estimated: `5487` - // Minimum execution time: 12_029_000 picoseconds. - Weight::from_parts(12_430_000, 0) + // Minimum execution time: 12_218_000 picoseconds. + Weight::from_parts(12_586_000, 0) .saturating_add(Weight::from_parts(0, 5487)) - // Standard Error: 71_234 - .saturating_add(Weight::from_parts(92_348_672, 0).saturating_mul(n.into())) + // Standard Error: 105_927 + .saturating_add(Weight::from_parts(101_388_985, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -136,21 +157,19 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn enqueue_1000_small_xcmp_messages() -> Weight { // Proof Size summary in bytes: - // Measured: `82` - // Estimated: `5487` - // Minimum execution time: 130_253_000 picoseconds. - Weight::from_parts(132_148_000, 0) - .saturating_add(Weight::from_parts(0, 5487)) + // Measured: `52997` + // Estimated: `108986` + // Minimum execution time: 253_077_000 picoseconds. + Weight::from_parts(257_994_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) @@ -158,8 +177,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `76` // Estimated: `2767` - // Minimum execution time: 3_011_000 picoseconds. - Weight::from_parts(3_207_000, 0) + // Minimum execution time: 3_102_000 picoseconds. + Weight::from_parts(3_289_000, 0) .saturating_add(Weight::from_parts(0, 2767)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -170,8 +189,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `111` // Estimated: `2767` - // Minimum execution time: 4_312_000 picoseconds. - Weight::from_parts(4_561_000, 0) + // Minimum execution time: 4_386_000 picoseconds. + Weight::from_parts(4_658_000, 0) .saturating_add(Weight::from_parts(0, 2767)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -180,8 +199,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_246_000 picoseconds. - Weight::from_parts(5_385_000, 0) + // Minimum execution time: 5_154_000 picoseconds. + Weight::from_parts(5_368_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) @@ -202,8 +221,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `105647` // Estimated: `109112` - // Minimum execution time: 203_231_000 picoseconds. - Weight::from_parts(212_877_000, 0) + // Minimum execution time: 209_858_000 picoseconds. + Weight::from_parts(220_504_000, 0) .saturating_add(Weight::from_parts(0, 109112)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) @@ -226,8 +245,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `65716` // Estimated: `69181` - // Minimum execution time: 123_434_000 picoseconds. - Weight::from_parts(125_488_000, 0) + // Minimum execution time: 127_163_000 picoseconds. + Weight::from_parts(132_512_000, 0) .saturating_add(Weight::from_parts(0, 69181)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/cumulus_pallet_xcmp_queue.rs index 34c4ffc1103b4..7159ed0809bd9 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/cumulus_pallet_xcmp_queue.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/cumulus_pallet_xcmp_queue.rs @@ -16,9 +16,9 @@ //! Autogenerated weights for `cumulus_pallet_xcmp_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-05-05, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `341e66d5356e`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `16d5a52ef0dc`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: @@ -56,8 +56,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `76` // Estimated: `1497` - // Minimum execution time: 5_118_000 picoseconds. - Weight::from_parts(5_317_000, 0) + // Minimum execution time: 4_910_000 picoseconds. + Weight::from_parts(5_170_000, 0) .saturating_add(Weight::from_parts(0, 1497)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -77,11 +77,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `82` // Estimated: `5487` - // Minimum execution time: 12_843_000 picoseconds. - Weight::from_parts(9_077_837, 0) + // Minimum execution time: 12_705_000 picoseconds. + Weight::from_parts(8_172_546, 0) .saturating_add(Weight::from_parts(0, 5487)) - // Standard Error: 7 - .saturating_add(Weight::from_parts(1_009, 0).saturating_mul(n.into())) + // Standard Error: 6 + .saturating_add(Weight::from_parts(1_028, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -100,11 +100,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `82` // Estimated: `5487` - // Minimum execution time: 10_809_000 picoseconds. - Weight::from_parts(14_865_726, 0) + // Minimum execution time: 10_949_000 picoseconds. + Weight::from_parts(14_462_029, 0) .saturating_add(Weight::from_parts(0, 5487)) - // Standard Error: 164 - .saturating_add(Weight::from_parts(114_420, 0).saturating_mul(n.into())) + // Standard Error: 201 + .saturating_add(Weight::from_parts(119_824, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -112,6 +112,27 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 105457]`. + fn enqueue_empty_xcmp_message_at(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `264 + n * (1 ±0)` + // Estimated: `108986` + // Minimum execution time: 19_543_000 picoseconds. + Weight::from_parts(19_802_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) + // Standard Error: 12 + .saturating_add(Weight::from_parts(2_565, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) @@ -123,11 +144,11 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `117` // Estimated: `5487` - // Minimum execution time: 12_268_000 picoseconds. - Weight::from_parts(12_507_000, 0) + // Minimum execution time: 12_500_000 picoseconds. + Weight::from_parts(12_672_000, 0) .saturating_add(Weight::from_parts(0, 5487)) - // Standard Error: 49_300 - .saturating_add(Weight::from_parts(95_127_663, 0).saturating_mul(n.into())) + // Standard Error: 221_916 + .saturating_add(Weight::from_parts(106_306_015, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) @@ -136,21 +157,19 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn enqueue_1000_small_xcmp_messages() -> Weight { // Proof Size summary in bytes: - // Measured: `82` - // Estimated: `5487` - // Minimum execution time: 129_028_000 picoseconds. - Weight::from_parts(130_841_000, 0) - .saturating_add(Weight::from_parts(0, 5487)) + // Measured: `52997` + // Estimated: `108986` + // Minimum execution time: 266_767_000 picoseconds. + Weight::from_parts(275_163_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) @@ -158,8 +177,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `76` // Estimated: `2767` - // Minimum execution time: 3_064_000 picoseconds. - Weight::from_parts(3_390_000, 0) + // Minimum execution time: 2_950_000 picoseconds. + Weight::from_parts(3_180_000, 0) .saturating_add(Weight::from_parts(0, 2767)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -170,8 +189,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `111` // Estimated: `2767` - // Minimum execution time: 4_341_000 picoseconds. - Weight::from_parts(4_593_000, 0) + // Minimum execution time: 4_530_000 picoseconds. + Weight::from_parts(4_798_000, 0) .saturating_add(Weight::from_parts(0, 2767)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -180,8 +199,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_245_000 picoseconds. - Weight::from_parts(5_368_000, 0) + // Minimum execution time: 5_231_000 picoseconds. + Weight::from_parts(5_399_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) @@ -202,8 +221,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `105647` // Estimated: `109112` - // Minimum execution time: 220_348_000 picoseconds. - Weight::from_parts(225_256_000, 0) + // Minimum execution time: 227_766_000 picoseconds. + Weight::from_parts(236_600_000, 0) .saturating_add(Weight::from_parts(0, 109112)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) @@ -226,8 +245,8 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn // Proof Size summary in bytes: // Measured: `65716` // Estimated: `69181` - // Minimum execution time: 130_008_000 picoseconds. - Weight::from_parts(131_252_000, 0) + // Minimum execution time: 132_541_000 picoseconds. + Weight::from_parts(135_781_000, 0) .saturating_add(Weight::from_parts(0, 69181)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 29061d8953af0..4fa910f0e6c24 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -860,17 +860,6 @@ impl pallet_revive::Config for Runtime { type FindAuthor = ::FindAuthor; } -impl TryFrom for pallet_revive::Call { - type Error = (); - - fn try_from(value: RuntimeCall) -> Result { - match value { - RuntimeCall::Revive(call) => Ok(call), - _ => Err(()), - } - } -} - impl pallet_sudo::Config for Runtime { type RuntimeEvent = RuntimeEvent; type RuntimeCall = RuntimeCall; diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index 288d694f2adbc..4579051f77705 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -238,36 +238,37 @@ fn get_keystore() -> sp_keystore::KeystorePtr { Arc::new(keystore) } -/// Given parachain block data and a slot, seal the block with an aura seal. Assumes that the -/// authorities of the test runtime are present in the keyring. -pub fn seal_block(block: ParachainBlockData, client: &Client) -> ParachainBlockData { +/// Seals the given block with an AURA seal. +/// +/// Assumes that the authorities of the test runtime are present in the keyring. +pub fn seal_block(mut block: Block, client: &Client) -> Block { + let parachain_slot = + find_pre_digest::::Signature>(&block.header).unwrap(); + let parent_hash = block.header.parent_hash; + let authorities = client.runtime_api().authorities(parent_hash).unwrap(); + let expected_author = slot_author::<::Pair>(parachain_slot, &authorities) + .expect("Should be able to find author"); + + let keystore = get_keystore(); + let seal_digest = seal::<_, sp_consensus_aura::sr25519::AuthorityPair>( + &block.header.hash(), + expected_author, + &keystore, + ) + .expect("Should be able to create seal"); + block.header.digest_mut().push(seal_digest); + + block +} + +/// Seals all the blocks in the given [`ParachainBlockData`] with an AURA seal. +/// +/// Assumes that the authorities of the test runtime are present in the keyring. +pub fn seal_parachain_block_data(block: ParachainBlockData, client: &Client) -> ParachainBlockData { let (blocks, proof) = block.into_inner(); ParachainBlockData::new( - blocks - .into_iter() - .map(|mut block| { - let parachain_slot = - find_pre_digest::::Signature>(&block.header) - .unwrap(); - let parent_hash = block.header.parent_hash; - let authorities = client.runtime_api().authorities(parent_hash).unwrap(); - let expected_author = - slot_author::<::Pair>(parachain_slot, &authorities) - .expect("Should be able to find author"); - - let keystore = get_keystore(); - let seal_digest = seal::<_, sp_consensus_aura::sr25519::AuthorityPair>( - &block.header.hash(), - expected_author, - &keystore, - ) - .expect("Should be able to create seal"); - block.header.digest_mut().push(seal_digest); - - block - }) - .collect::>(), + blocks.into_iter().map(|block| seal_block(block, &client)).collect::>(), proof, ) } diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 7d69f3fc8dabe..e6a5995c5003b 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -876,6 +876,7 @@ pub fn node_config( keystore: KeystoreConfig::InMemory, database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, trie_cache_maximum_size: Some(64 * 1024 * 1024), + warm_up_trie_cache: None, state_pruning: Some(PruningMode::ArchiveAll), blocks_pruning: BlocksPruning::KeepAll, chain_spec: spec, diff --git a/cumulus/xcm/xcm-emulator/Cargo.toml b/cumulus/xcm/xcm-emulator/Cargo.toml index 102d5056ad2ad..a57df58b03ec6 100644 --- a/cumulus/xcm/xcm-emulator/Cargo.toml +++ b/cumulus/xcm/xcm-emulator/Cargo.toml @@ -44,3 +44,4 @@ polkadot-primitives = { workspace = true, default-features = true } polkadot-runtime-parachains = { workspace = true, default-features = true } xcm = { workspace = true, default-features = true } xcm-executor = { workspace = true, default-features = true } +xcm-simulator = { workspace = true, default-features = true } diff --git a/cumulus/xcm/xcm-emulator/src/lib.rs b/cumulus/xcm/xcm-emulator/src/lib.rs index 54e2e11ff2332..373c78fe1ef35 100644 --- a/cumulus/xcm/xcm-emulator/src/lib.rs +++ b/cumulus/xcm/xcm-emulator/src/lib.rs @@ -27,7 +27,7 @@ pub use std::{ fmt, marker::PhantomData, ops::Deref, - sync::{LazyLock, Mutex}, + sync::{Arc, LazyLock, Mutex}, }; // Substrate @@ -79,12 +79,13 @@ pub use polkadot_runtime_parachains::inclusion::{AggregateMessageOrigin, UmpQueu // Polkadot pub use polkadot_parachain_primitives::primitives::RelayChainBlockNumber; -use sp_core::crypto::AccountId32; +use sp_core::{crypto::AccountId32, H256}; pub use xcm::latest::prelude::{ AccountId32 as AccountId32Junction, Ancestor, Assets, Here, Location, Parachain as ParachainJunction, Parent, WeightLimit, XcmHash, }; pub use xcm_executor::traits::ConvertLocation; +use xcm_simulator::helpers::TopicIdTracker; pub type AccountIdOf = ::AccountId; @@ -1583,11 +1584,30 @@ where pub hops_dispatchable: HashMap DispatchResult>, pub hops_calls: HashMap, pub args: Args, + pub topic_id_tracker: Arc>, _marker: PhantomData<(Destination, Hops)>, } /// `Test` implementation. impl Test +where + Args: Clone, + Origin: Chain + Clone, + Destination: Chain + Clone, + Origin::RuntimeOrigin: OriginTrait> + Clone, + Destination::RuntimeOrigin: OriginTrait> + Clone, + Hops: Clone, +{ + /// Asserts that a single unique topic ID exists across all chains. + pub fn assert_unique_topic_id(&self) { + self.topic_id_tracker.lock().unwrap().assert_unique(); + } + /// Inserts a topic ID for a specific chain and asserts it remains globally unique. + pub fn insert_unique_topic_id(&mut self, chain: &str, id: H256) { + self.topic_id_tracker.lock().unwrap().insert_and_assert_unique(chain, id); + } +} +impl Test where Args: Clone, Origin: Chain + Clone + CheckAssertion, @@ -1613,6 +1633,7 @@ where hops_dispatchable: Default::default(), hops_calls: Default::default(), args: test_args.args, + topic_id_tracker: Arc::new(Mutex::new(TopicIdTracker::new())), _marker: Default::default(), } } diff --git a/cumulus/zombienet/zombienet-sdk-helpers/Cargo.toml b/cumulus/zombienet/zombienet-sdk-helpers/Cargo.toml index b015c32868689..e5a0291697069 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/Cargo.toml +++ b/cumulus/zombienet/zombienet-sdk-helpers/Cargo.toml @@ -12,6 +12,5 @@ anyhow = { workspace = true, default-features = true } codec = { workspace = true, features = ["derive"] } log = { workspace = true } polkadot-primitives = { workspace = true, default-features = true } -subxt = { workspace = true, features = ["native"] } tokio = { workspace = true, features = ["rt-multi-thread", "macros", "time"] } - +zombienet-sdk = { workspace = true } diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 0f572c4b0619d..be78a2f159050 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -8,14 +8,14 @@ use std::{ collections::{HashMap, HashSet}, ops::Range, }; -use subxt::{ - blocks::Block, events::Events, ext::scale_value::value, tx::DynamicPayload, utils::H256, - OnlineClient, PolkadotConfig, -}; use tokio::{ join, time::{sleep, Duration}, }; +use zombienet_sdk::subxt::{ + blocks::Block, events::Events, ext::scale_value::value, tx::DynamicPayload, utils::H256, + OnlineClient, PolkadotConfig, +}; // Maximum number of blocks to wait for a session change. // If it does not arrive for whatever reason, we should not wait forever. @@ -30,7 +30,7 @@ pub fn create_assign_core_call(core_and_para: &[(u32, u32)]) -> DynamicPayload { }); } - subxt::tx::dynamic( + zombienet_sdk::subxt::tx::dynamic( "Sudo", "sudo", vec![value! { @@ -238,7 +238,7 @@ pub async fn assert_para_throughput( /// /// The session change is detected by inspecting the events in the block. pub async fn wait_for_first_session_change( - blocks_sub: &mut subxt::backend::StreamOfResults< + blocks_sub: &mut zombienet_sdk::subxt::backend::StreamOfResults< Block>, >, ) -> Result<(), anyhow::Error> { @@ -249,7 +249,7 @@ pub async fn wait_for_first_session_change( /// /// The session change is detected by inspecting the events in the block. pub async fn wait_for_nth_session_change( - blocks_sub: &mut subxt::backend::StreamOfResults< + blocks_sub: &mut zombienet_sdk::subxt::backend::StreamOfResults< Block>, >, mut sessions_to_wait: u32, diff --git a/cumulus/zombienet/zombienet-sdk/Cargo.toml b/cumulus/zombienet/zombienet-sdk/Cargo.toml index ba1f30f5763c3..546bda1293b4c 100644 --- a/cumulus/zombienet/zombienet-sdk/Cargo.toml +++ b/cumulus/zombienet/zombienet-sdk/Cargo.toml @@ -14,8 +14,6 @@ log = { workspace = true } polkadot-primitives = { workspace = true, default-features = true } serde = { workspace = true } serde_json = { workspace = true } -subxt = { workspace = true } -subxt-signer = { workspace = true } tokio = { workspace = true, features = ["rt-multi-thread"] } zombienet-sdk = { workspace = true } zombienet-orchestrator = { workspace = true } diff --git a/cumulus/zombienet/zombienet-sdk/tests/bootnodes/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/bootnodes/mod.rs index 6b1529f1b0243..7ee1ae45038a6 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/bootnodes/mod.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/bootnodes/mod.rs @@ -5,9 +5,11 @@ use anyhow::anyhow; use tokio::time::Duration; use cumulus_zombienet_sdk_helpers::wait_for_nth_session_change; -use subxt::{OnlineClient, PolkadotConfig}; use zombienet_orchestrator::network::node::LogLineCountOptions; -use zombienet_sdk::{NetworkConfig, NetworkConfigBuilder}; +use zombienet_sdk::{ + subxt::{OnlineClient, PolkadotConfig}, + NetworkConfig, NetworkConfigBuilder, +}; async fn build_network_config() -> Result { let images = zombienet_sdk::environment::get_images_from_env(); diff --git a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/elastic_scaling_multiple_blocks_per_slot.rs b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/elastic_scaling_multiple_blocks_per_slot.rs index 660440fb7998c..4612e4ff142f5 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/elastic_scaling_multiple_blocks_per_slot.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/elastic_scaling/elastic_scaling_multiple_blocks_per_slot.rs @@ -8,9 +8,11 @@ use cumulus_zombienet_sdk_helpers::{ }; use polkadot_primitives::Id as ParaId; use serde_json::json; -use subxt::{OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::dev; -use zombienet_sdk::{NetworkConfig, NetworkConfigBuilder}; +use zombienet_sdk::{ + subxt::{OnlineClient, PolkadotConfig}, + subxt_signer::sr25519::dev, + NetworkConfig, NetworkConfigBuilder, +}; const PARA_ID: u32 = 2400; diff --git a/cumulus/zombienet/zombienet-sdk/tests/sync_blocks/sync_blocks_from_tip_without_connected_collator.rs b/cumulus/zombienet/zombienet-sdk/tests/sync_blocks/sync_blocks_from_tip_without_connected_collator.rs index 916c9e29672c8..e9f1690380bb1 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/sync_blocks/sync_blocks_from_tip_without_connected_collator.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/sync_blocks/sync_blocks_from_tip_without_connected_collator.rs @@ -5,8 +5,10 @@ use anyhow::anyhow; use cumulus_zombienet_sdk_helpers::assert_para_throughput; use polkadot_primitives::Id as ParaId; -use subxt::{OnlineClient, PolkadotConfig}; -use zombienet_sdk::{LocalFileSystem, Network, NetworkConfigBuilder}; +use zombienet_sdk::{ + subxt::{OnlineClient, PolkadotConfig}, + LocalFileSystem, Network, NetworkConfigBuilder, +}; const PARA_ID: u32 = 2000; const BEST_BLOCK_METRIC: &str = "block_height{status=\"best\"}"; diff --git a/docs/sdk/src/guides/your_first_node.rs b/docs/sdk/src/guides/your_first_node.rs index 476c8ccbf35d4..899162fa60369 100644 --- a/docs/sdk/src/guides/your_first_node.rs +++ b/docs/sdk/src/guides/your_first_node.rs @@ -267,6 +267,7 @@ mod tests { } #[test] + #[ignore = "is flaky"] fn works_with_different_block_times() { test_runtime_preset(PARA_RUNTIME, 100, Some(DEV_RUNTIME_PRESET.into())); test_runtime_preset(PARA_RUNTIME, 3000, Some(DEV_RUNTIME_PRESET.into())); diff --git a/polkadot/node/network/bridge/src/rx/mod.rs b/polkadot/node/network/bridge/src/rx/mod.rs index f9ea26a7756c7..516f85271af74 100644 --- a/polkadot/node/network/bridge/src/rx/mod.rs +++ b/polkadot/node/network/bridge/src/rx/mod.rs @@ -973,6 +973,13 @@ fn update_our_view( finalized_number, ); + gum::debug!( + target: LOG_TARGET, + live_head_count = ?live_heads.len(), + "Our view updated, current view: {:?}", + our_view, + ); + dispatch_validation_event_to_all_unbounded( NetworkBridgeEvent::OurViewChange(our_view.clone()), ctx.sender(), diff --git a/polkadot/node/network/collator-protocol/src/collator_side/collation.rs b/polkadot/node/network/collator-protocol/src/collator_side/collation.rs index aa2fc52e90c52..bc5cec43eae94 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/collation.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/collation.rs @@ -31,6 +31,7 @@ use polkadot_primitives::{ }; /// The status of a collation as seen from the collator. +#[derive(Clone, Debug, PartialEq)] pub enum CollationStatus { /// The collation was created, but we did not advertise it to any validator. Created, @@ -54,6 +55,15 @@ impl CollationStatus { pub fn advance_to_requested(&mut self) { *self = Self::Requested; } + + /// Return label for metrics. + pub fn label(&self) -> &'static str { + match self { + CollationStatus::Created => "created", + CollationStatus::Advertised => "advertised", + CollationStatus::Requested => "requested", + } + } } /// A collation built by the collator. diff --git a/polkadot/node/network/collator-protocol/src/collator_side/metrics.rs b/polkadot/node/network/collator-protocol/src/collator_side/metrics.rs index 589c19b4f90d8..1781284fbeb71 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/metrics.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/metrics.rs @@ -14,12 +14,36 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use std::{ + collections::{HashMap, HashSet}, + time::{Duration, Instant}, +}; + +use polkadot_node_subsystem::prometheus::prometheus::HistogramTimer; use polkadot_node_subsystem_util::metrics::{self, prometheus}; +use polkadot_primitives::{vstaging::CandidateReceiptV2 as CandidateReceipt, BlockNumber, Hash}; +use sp_core::H256; + +use super::collation::CollationStatus; #[derive(Clone, Default)] pub struct Metrics(Option); impl Metrics { + /// Record the time a collation took to be backed. + pub fn on_collation_backed(&self, latency: f64) { + if let Some(metrics) = &self.0 { + metrics.collation_backing_latency.observe(latency); + } + } + + /// Record the time a collation took to be included. + pub fn on_collation_included(&self, latency: f64) { + if let Some(metrics) = &self.0 { + metrics.collation_inclusion_latency.observe(latency); + } + } + pub fn on_advertisement_made(&self) { if let Some(metrics) = &self.0 { metrics.advertisements_made.inc(); @@ -52,6 +76,26 @@ impl Metrics { metrics.collation_distribution_time.with_label_values(&[label]).start_timer() }) } + + /// Create a timer to measure how much time collations spend before being fetched. + pub fn time_collation_fetch_latency(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.collation_fetch_latency.start_timer()) + } + + /// Create a timer to measure how much time it takes for fetched collations to be backed. + pub fn time_collation_backing_latency(&self) -> Option { + self.0 + .as_ref() + .map(|metrics| metrics.collation_backing_latency_time.start_timer()) + } + + /// Record the time a collation took before expiring. + /// Collations can expire in the following states: "advertised, fetched or backed" + pub fn on_collation_expired(&self, latency: f64, state: &'static str) { + if let Some(metrics) = &self.0 { + metrics.collation_expired_total.with_label_values(&[state]).observe(latency); + } + } } #[derive(Clone)] @@ -61,6 +105,11 @@ struct MetricsInner { collations_send_requested: prometheus::Counter, process_msg: prometheus::Histogram, collation_distribution_time: prometheus::HistogramVec, + collation_fetch_latency: prometheus::Histogram, + collation_backing_latency_time: prometheus::Histogram, + collation_backing_latency: prometheus::Histogram, + collation_inclusion_latency: prometheus::Histogram, + collation_expired_total: prometheus::HistogramVec, } impl metrics::Metrics for Metrics { @@ -116,8 +165,323 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + collation_fetch_latency: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_collation_fetch_latency", + "How much time collations spend waiting to be fetched", + ) + .buckets(vec![ + 0.001, 0.01, 0.025, 0.05, 0.1, 0.15, 0.25, 0.35, 0.5, 0.75, 1.0, 2.0, 5.0, + ]), + )?, + registry, + )?, + collation_backing_latency_time: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_collation_backing_latency_time", + "How much time it takes for a fetched collation to be backed", + ) + .buckets(vec![ + 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 12.0, 15.0, 18.0, 24.0, 30.0, + ]), + )?, + registry, + )?, + collation_backing_latency: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_collation_backing_latency", + "How many blocks away from the relay parent are collations backed", + ) + .buckets(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]), + )?, + registry, + )?, + collation_inclusion_latency: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_collation_inclusion_latency", + "How many blocks it takes for a backed collation to be included", + ) + .buckets(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]), + )?, + registry, + )?, + collation_expired_total: prometheus::register( + prometheus::HistogramVec::new( + prometheus::HistogramOpts::new( + "polkadot_parachain_collation_expired", + "How many collations expired (not backed or not included)", + ) + .buckets(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]), + &["state"], + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) } } + +// Equal to claim queue length. +pub(crate) const MAX_BACKING_DELAY: BlockNumber = 3; +// Paras availability period. In practice, candidates time out in exceptional situations. +pub(crate) const MAX_AVAILABILITY_DELAY: BlockNumber = 10; + +// Collations are kept in the tracker, until they are included or expired +#[derive(Default)] +pub(crate) struct CollationTracker { + // Keep track of collation expiration block number. + expire: HashMap>, + // All un-expired collation entries + entries: HashMap, +} + +impl CollationTracker { + // Mark a tracked collation as backed and return the stats. + // After this call, the collation is no longer tracked. To measure + // inclusion time call `track` again with the returned stats. + // + // Block built on top of N is earliest backed at N + 1. + // Returns `None` if the collation is not tracked. + pub fn collation_backed( + &mut self, + block_number: BlockNumber, + leaf: H256, + receipt: CandidateReceipt, + metrics: &Metrics, + ) -> Option { + let head = receipt.descriptor.para_head(); + + self.entries.remove(&head).map(|mut entry| { + let para_id = receipt.descriptor.para_id(); + let relay_parent = receipt.descriptor.relay_parent(); + + entry.backed_at = Some(block_number); + + // Observe the backing latency since the collation was fetched. + let maybe_latency = + entry.backed_latency_metric.take().map(|metric| metric.stop_and_record()); + + gum::debug!( + target: crate::LOG_TARGET_STATS, + latency_blocks = ?entry.backed(), + latency_time = ?maybe_latency, + relay_block = ?leaf, + ?relay_parent, + ?para_id, + ?head, + "A fetched collation was backed on relay chain", + ); + + metrics.on_collation_backed( + (block_number.saturating_sub(entry.relay_parent_number)) as f64, + ); + + entry + }) + } + + // Mark a previously backed collation as included and return the stats. + // After this call, the collation is no longer trackable. + // + // Block built on top of N is earliest backed at N + 1. + // Returns `None` if there collation is not in tracker. + pub fn collation_included( + &mut self, + block_number: BlockNumber, + leaf: H256, + receipt: CandidateReceipt, + metrics: &Metrics, + ) -> Option { + let head = receipt.descriptor.para_head(); + + self.entries.remove(&head).map(|mut entry| { + entry.included_at = Some(block_number); + + if let Some(latency) = entry.included() { + metrics.on_collation_included(latency as f64); + + let para_id = receipt.descriptor.para_id(); + let relay_parent = receipt.descriptor.relay_parent(); + + gum::debug!( + target: crate::LOG_TARGET_STATS, + ?latency, + relay_block = ?leaf, + ?relay_parent, + ?para_id, + head = ?receipt.descriptor.para_head(), + "Collation included on relay chain", + ); + } + + entry + }) + } + + // Returns all the collations that have expired at `block_number`. + pub fn drain_expired(&mut self, block_number: BlockNumber) -> Vec { + let Some(expired) = self.expire.remove(&block_number) else { + // No collations built on all seen relay parents at height `block_number` + return Vec::new() + }; + + expired + .iter() + .filter_map(|head| self.entries.remove(head)) + .map(|mut entry| { + entry.expired_at = Some(block_number); + entry + }) + .collect::>() + } + + // Track a collation for a given period of time (TTL). TTL depends + // on the collation state. + // Collation is evicted after it expires. + pub fn track(&mut self, mut stats: CollationStats) { + // Check the state of collation to compute ttl. + let ttl = if stats.fetch_latency().is_none() { + // Disable the fetch timer, to prevent bogus observe on drop. + if let Some(fetch_latency_metric) = stats.fetch_latency_metric.take() { + fetch_latency_metric.stop_and_discard(); + } + // Collation was never fetched, expires ASAP + 0 + } else if stats.backed().is_none() { + MAX_BACKING_DELAY + } else if stats.included().is_none() { + // Set expiration date relative to relay parent block. + stats.backed().unwrap_or_default() + MAX_AVAILABILITY_DELAY + } else { + // If block included no reason to track it. + return + }; + + self.expire + .entry(stats.relay_parent_number + ttl) + .and_modify(|heads| { + heads.insert(stats.head); + }) + .or_insert_with(|| HashSet::from_iter(vec![stats.head].into_iter())); + self.entries.insert(stats.head, stats); + } +} + +// Information about how collations live their lives. +pub(crate) struct CollationStats { + // The pre-backing collation status information + pre_backing_status: CollationStatus, + // The block header hash. + head: Hash, + // The relay parent on top of which collation was built + relay_parent_number: BlockNumber, + // The expiration block number if expired. + expired_at: Option, + // The backed block number. + backed_at: Option, + // The included block number if backed. + included_at: Option, + // The collation fetch time. + fetched_at: Option, + // Advertisement time + advertised_at: Instant, + // The collation fetch latency (seconds). + fetch_latency_metric: Option, + // The collation backing latency (seconds). Duration since collation fetched + // until the import of a relay chain block where collation is backed. + backed_latency_metric: Option, +} + +impl CollationStats { + /// Create new empty instance. + pub fn new(head: Hash, relay_parent_number: BlockNumber, metrics: &Metrics) -> Self { + Self { + pre_backing_status: CollationStatus::Created, + head, + relay_parent_number, + advertised_at: std::time::Instant::now(), + backed_at: None, + expired_at: None, + fetched_at: None, + included_at: None, + fetch_latency_metric: metrics.time_collation_fetch_latency(), + backed_latency_metric: None, + } + } + + /// Returns the age at which the collation expired. + pub fn expired(&self) -> Option { + let expired_at = self.expired_at?; + Some(expired_at.saturating_sub(self.relay_parent_number)) + } + + /// Returns the age of the collation at the moment of backing. + pub fn backed(&self) -> Option { + let backed_at = self.backed_at?; + Some(backed_at.saturating_sub(self.relay_parent_number)) + } + + /// Returns the age of the collation at the moment of inclusion. + pub fn included(&self) -> Option { + let included_at = self.included_at?; + let backed_at = self.backed_at?; + Some(included_at.saturating_sub(backed_at)) + } + + /// Returns time the collation waited to be fetched. + pub fn fetch_latency(&self) -> Option { + let fetched_at = self.fetched_at?; + Some(fetched_at - self.advertised_at) + } + + /// Get parachain block header hash. + pub fn head(&self) -> H256 { + self.head + } + + /// Set the timestamp at which collation is fetched. + pub fn set_fetched_at(&mut self, fetched_at: Instant) { + self.fetched_at = Some(fetched_at); + } + + /// Sets the pre-backing status of the collation. + pub fn set_pre_backing_status(&mut self, status: CollationStatus) { + self.pre_backing_status = status; + } + + /// Returns the pre-backing status of the collation. + pub fn pre_backing_status(&self) -> &CollationStatus { + &self.pre_backing_status + } + + /// Take the fetch latency metric timer. + pub fn take_fetch_latency_metric(&mut self) -> Option { + self.fetch_latency_metric.take() + } + + /// Set the backing latency metric timer. + pub fn set_backed_latency_metric(&mut self, timer: Option) { + self.backed_latency_metric = timer; + } +} + +impl Drop for CollationStats { + fn drop(&mut self) { + if let Some(fetch_latency_metric) = self.fetch_latency_metric.take() { + // This metric is only observed when collation was sent fully to the validator. + // + // If `fetch_latency_metric` is Some it means that the metrics was observed. + // We don't want to observe it again and report a higher value at a later point in time. + fetch_latency_metric.stop_and_discard(); + } + // If timer still exists, drop it. It is measured in `collation_backed`. + if let Some(backed_latency_metric) = self.backed_latency_metric.take() { + backed_latency_metric.stop_and_discard(); + } + } +} diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index 277b9edf07bf0..11905e432fa3d 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -23,6 +23,7 @@ use bitvec::{bitvec, vec::BitVec}; use futures::{ channel::oneshot, future::Fuse, pin_mut, select, stream::FuturesUnordered, FutureExt, StreamExt, }; +use metrics::{CollationStats, CollationTracker}; use schnellru::{ByLength, LruMap}; use sp_core::Pair; @@ -46,15 +47,19 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_util::{ backing_implicit_view::View as ImplicitView, reputation::{ReputationAggregator, REPUTATION_CHANGE_INTERVAL}, - runtime::{fetch_claim_queue, get_group_rotation_info, ClaimQueueSnapshot, RuntimeInfo}, + runtime::{ + fetch_claim_queue, get_candidate_events, get_group_rotation_info, ClaimQueueSnapshot, + RuntimeInfo, + }, TimeoutExt, }; use polkadot_primitives::{ - vstaging::CandidateReceiptV2 as CandidateReceipt, AuthorityDiscoveryId, CandidateHash, - CollatorPair, CoreIndex, GroupIndex, Hash, HeadData, Id as ParaId, SessionIndex, + vstaging::{CandidateEvent, CandidateReceiptV2 as CandidateReceipt}, + AuthorityDiscoveryId, BlockNumber, CandidateHash, CollatorPair, CoreIndex, GroupIndex, Hash, + HeadData, Id as ParaId, SessionIndex, }; -use crate::{modify_reputation, LOG_TARGET}; +use crate::{modify_reputation, LOG_TARGET, LOG_TARGET_STATS}; mod collation; mod error; @@ -199,23 +204,32 @@ struct PeerData { unknown_heads: LruMap, } -/// A type wrapping a collation and it's designated core index. -struct CollationWithCoreIndex(Collation, CoreIndex); +/// A type wrapping a collation, it's designated core index and stats. +struct CollationData { + collation: Collation, + core_index: CoreIndex, + stats: Option, +} -impl CollationWithCoreIndex { +impl CollationData { /// Returns inner collation ref. pub fn collation(&self) -> &Collation { - &self.0 + &self.collation } /// Returns inner collation mut ref. pub fn collation_mut(&mut self) -> &mut Collation { - &mut self.0 + &mut self.collation } /// Returns inner core index. pub fn core_index(&self) -> &CoreIndex { - &self.1 + &self.core_index + } + + /// Takes the stats and returns them. + pub fn take_stats(&mut self) -> Option { + self.stats.take() } } @@ -224,13 +238,19 @@ struct PerRelayParent { /// on top of this relay parent. validator_group: HashMap, /// Distributed collations. - collations: HashMap, + collations: HashMap, /// Number of assignments per core assignments: HashMap, + /// The relay parent block number + block_number: Option, } impl PerRelayParent { - fn new(para_id: ParaId, claim_queue: ClaimQueueSnapshot) -> Self { + fn new( + para_id: ParaId, + claim_queue: ClaimQueueSnapshot, + block_number: Option, + ) -> Self { let assignments = claim_queue.iter_all_claims().fold(HashMap::new(), |mut acc, (core, claims)| { let n_claims = claims.iter().filter(|para| para == &¶_id).count(); @@ -240,7 +260,12 @@ impl PerRelayParent { acc }); - Self { validator_group: HashMap::default(), collations: HashMap::new(), assignments } + Self { + validator_group: HashMap::default(), + collations: HashMap::new(), + assignments, + block_number, + } } } @@ -306,6 +331,9 @@ struct State { /// Aggregated reputation change reputation: ReputationAggregator, + + /// An utility for tracking all collations produced by the collator. + collation_tracker: CollationTracker, } impl State { @@ -333,6 +361,7 @@ impl State { active_collation_fetches: Default::default(), advertisement_timeouts: Default::default(), reputation, + collation_tracker: Default::default(), } } } @@ -485,12 +514,21 @@ async fn distribute_collation( ParentHeadData::OnlyHash(parent_head_data_hash) }; + let para_head = receipt.descriptor.para_head(); per_relay_parent.collations.insert( candidate_hash, - CollationWithCoreIndex( - Collation { receipt, pov, parent_head_data, status: CollationStatus::Created }, + CollationData { + collation: Collation { + receipt, + pov, + parent_head_data, + status: CollationStatus::Created, + }, core_index, - ), + stats: per_relay_parent + .block_number + .map(|n| CollationStats::new(para_head, n, &state.metrics)), + }, ); // The leaf should be present in the allowed ancestry of some leaf. @@ -1060,6 +1098,7 @@ async fn handle_incoming_request( waiting.collation_fetch_active = true; // Obtain a timer for sending collation let _ = state.metrics.time_collation_distribution("send"); + send_collation(state, req, receipt, pov, parent_head_data).await; } }, @@ -1239,6 +1278,60 @@ async fn handle_network_msg( Ok(()) } +/// Update collation tracker with the backed and included candidates. +#[overseer::contextbounds(CollatorProtocol, prefix = crate::overseer)] +async fn process_block_events( + ctx: &mut Context, + collation_tracker: &mut CollationTracker, + leaf: Hash, + maybe_block_number: Option, + para_id: ParaId, + metrics: &Metrics, +) { + if let Ok(events) = get_candidate_events(ctx.sender(), leaf).await { + let Some(block_number) = maybe_block_number else { + // This should not happen. If it does this log message explains why + // metrics and logs are missing for the candidates under this block. + gum::debug!( + target: crate::LOG_TARGET_STATS, + relay_block = ?leaf, + ?para_id, + "Failed to get relay chain block number", + ); + return + }; + + for ev in events { + match ev { + CandidateEvent::CandidateIncluded(receipt, _, _, _) => { + if receipt.descriptor.para_id() != para_id { + continue + } + collation_tracker.collation_included(block_number, leaf, receipt, metrics); + }, + CandidateEvent::CandidateBacked(receipt, _, _, _) => { + if receipt.descriptor.para_id() != para_id { + continue + } + + let Some(block_number) = maybe_block_number else { continue }; + let Some(stats) = + collation_tracker.collation_backed(block_number, leaf, receipt, metrics) + else { + continue + }; + + // Continue measuring inclusion latency. + collation_tracker.track(stats); + }, + _ => { + // do not care about other events + }, + } + } + } +} + /// Handles our view changes. #[overseer::contextbounds(CollatorProtocol, prefix = crate::overseer)] async fn handle_our_view_change( @@ -1254,14 +1347,27 @@ async fn handle_our_view_change( let added: Vec<_> = view.iter().filter(|h| !implicit_view.contains_leaf(h)).collect(); for leaf in added { - let claim_queue = fetch_claim_queue(ctx.sender(), *leaf).await?; - state.per_relay_parent.insert(*leaf, PerRelayParent::new(para_id, claim_queue)); + let claim_queue: ClaimQueueSnapshot = fetch_claim_queue(ctx.sender(), *leaf).await?; implicit_view .activate_leaf(ctx.sender(), *leaf) .await .map_err(Error::ImplicitViewFetchError)?; + let block_number = implicit_view.block_number(leaf); + state + .per_relay_parent + .insert(*leaf, PerRelayParent::new(para_id, claim_queue, block_number)); + + process_block_events( + ctx, + &mut state.collation_tracker, + *leaf, + block_number, + para_id, + &state.metrics, + ) + .await; let allowed_ancestry = implicit_view .known_allowed_relay_parents_under(leaf, state.collating_on) .unwrap_or_default(); @@ -1275,11 +1381,13 @@ async fn handle_our_view_change( .collect::>(); for block_hash in allowed_ancestry { + let block_number = implicit_view.block_number(block_hash); + if state.per_relay_parent.get(block_hash).is_none() { let claim_queue = fetch_claim_queue(ctx.sender(), *block_hash).await?; state .per_relay_parent - .insert(*block_hash, PerRelayParent::new(para_id, claim_queue)); + .insert(*block_hash, PerRelayParent::new(para_id, claim_queue, block_number)); } let per_relay_parent = @@ -1305,50 +1413,115 @@ async fn handle_our_view_change( // If the leaf is deactivated it still may stay in the view as a part // of implicit ancestry. Only update the state after the hash is actually // pruned from the block info storage. + let maybe_block_number = implicit_view.block_number(&leaf); let pruned = implicit_view.deactivate_leaf(leaf); for removed in &pruned { gum::debug!(target: LOG_TARGET, relay_parent = ?removed, "Removing relay parent because our view changed."); + if let Some(block_number) = maybe_block_number { + let expired_collations = state.collation_tracker.drain_expired(block_number); + process_expired_collations(expired_collations, *removed, para_id, &state.metrics); + } + + // Get all the collations built on top of the removed feaf. let collations = state .per_relay_parent .remove(removed) .map(|per_relay_parent| per_relay_parent.collations) .unwrap_or_default(); + for collation_with_core in collations.into_values() { let collation = collation_with_core.collation(); + let candidate_hash: CandidateHash = collation.receipt.hash(); - let candidate_hash = collation.receipt.hash(); state.collation_result_senders.remove(&candidate_hash); state.validator_groups_buf.remove_candidate(&candidate_hash); - match collation.status { - CollationStatus::Created => gum::warn!( - target: LOG_TARGET, - candidate_hash = ?collation.receipt.hash(), - pov_hash = ?collation.pov.hash(), - "Collation wasn't advertised to any validator.", - ), - CollationStatus::Advertised => gum::debug!( - target: LOG_TARGET, - candidate_hash = ?collation.receipt.hash(), - pov_hash = ?collation.pov.hash(), - "Collation was advertised but not requested by any validator.", - ), - CollationStatus::Requested => gum::debug!( - target: LOG_TARGET, - candidate_hash = ?collation.receipt.hash(), - pov_hash = ?collation.pov.hash(), - "Collation was requested.", - ), - } + process_out_of_view_collation(&mut state.collation_tracker, collation_with_core); } + state.waiting_collation_fetches.remove(removed); } } Ok(()) } +fn process_out_of_view_collation( + collation_tracker: &mut CollationTracker, + mut collation_with_core: CollationData, +) { + let collation = collation_with_core.collation(); + let candidate_hash: CandidateHash = collation.receipt.hash(); + + match collation.status { + CollationStatus::Created => gum::warn!( + target: LOG_TARGET, + ?candidate_hash, + pov_hash = ?collation.pov.hash(), + "Collation wasn't advertised to any validator.", + ), + CollationStatus::Advertised => gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + pov_hash = ?collation.pov.hash(), + "Collation was advertised but not requested by any validator.", + ), + CollationStatus::Requested => { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + pov_hash = ?collation.pov.hash(), + "Collation was requested.", + ); + }, + } + + let collation_status = collation.status.clone(); + let Some(mut stats) = collation_with_core.take_stats() else { return }; + + // If the collation stats are still available, it means it was never + // succesfully fetched, even if a fetch request was received, but not succeed. + // + // Will expire in it's current state at the next block import. + stats.set_pre_backing_status(collation_status); + collation_tracker.track(stats); +} + +fn process_expired_collations( + expired_collations: Vec, + removed: Hash, + para_id: ParaId, + metrics: &Metrics, +) { + for expired_collation in expired_collations { + let collation_state = if expired_collation.fetch_latency().is_none() { + // If collation was not fetched, we rely on the status provided + // by the collator protocol. + expired_collation.pre_backing_status().label() + } else if expired_collation.backed().is_none() { + "fetched" + } else if expired_collation.included().is_none() { + "backed" + } else { + "none" + }; + + let age = expired_collation.expired().unwrap_or_default(); + gum::debug!( + target: crate::LOG_TARGET_STATS, + ?age, + ?collation_state, + relay_parent = ?removed, + ?para_id, + head = ?expired_collation.head(), + "Collation expired", + ); + + metrics.on_collation_expired(age as f64, collation_state); + } +} + /// The collator protocol collator side main loop. #[overseer::contextbounds(CollatorProtocol, prefix = crate::overseer)] pub(crate) async fn run( @@ -1385,7 +1558,7 @@ async fn run_inner( let new_reputation_delay = || futures_timer::Delay::new(reputation_interval).fuse(); let mut reputation_delay = new_reputation_delay(); - let mut state = State::new(local_peer_id, collator_pair, metrics, reputation); + let mut state = State::new(local_peer_id, collator_pair, metrics.clone(), reputation); let mut runtime = RuntimeInfo::new(None); loop { @@ -1416,10 +1589,11 @@ async fn run_inner( }, CollationSendResult { relay_parent, candidate_hash, peer_id, timed_out } = state.active_collation_fetches.select_next_some() => { + let next = if let Some(waiting) = state.waiting_collation_fetches.get_mut(&relay_parent) { if timed_out { gum::debug!( - target: LOG_TARGET, + target: LOG_TARGET_STATS, ?relay_parent, ?peer_id, ?candidate_hash, @@ -1435,6 +1609,37 @@ async fn run_inner( state.validator_groups_buf.reset_validator_interest(candidate_hash, authority_id); } waiting.waiting_peers.remove(&(peer_id, candidate_hash)); + + // Update collation status to fetched. + if let Some(per_relay_parent) = state.per_relay_parent.get_mut(&relay_parent) { + if let Some(collation_with_core) = per_relay_parent.collations.get_mut(&candidate_hash) { + let maybe_stats = collation_with_core.take_stats(); + let our_para_id = collation_with_core.collation().receipt.descriptor.para_id(); + + if let Some(mut stats) = maybe_stats { + // Update the timestamp when collation has been sent (from subsysytem perspective) + stats.set_fetched_at(std::time::Instant::now()); + gum::debug!( + target: LOG_TARGET_STATS, + para_head = ?stats.head(), + %our_para_id, + "Collation fetch latency is {}ms", + stats.fetch_latency().unwrap_or_default().as_millis(), + ); + + // Update the pre-backing status. Should be requested at this point. + stats.set_pre_backing_status(collation_with_core.collation().status.clone()); + debug_assert_eq!(collation_with_core.collation().status, CollationStatus::Requested); + + // Observe fetch latency metric. + stats.take_fetch_latency_metric(); + stats.set_backed_latency_metric(metrics.time_collation_backing_latency()); + + // Next step is to measure backing latency. + state.collation_tracker.track(stats); + } + } + } } if let Some(next) = waiting.req_queue.pop_front() { diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs index 08e5f733c12db..997711f076302 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/mod.rs @@ -1460,6 +1460,7 @@ fn connect_to_buffered_groups() { 1, ) .await; + test_state.group_rotation_info = test_state.group_rotation_info.bump_rotation(); let head_b = test_state.relay_parent; diff --git a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs index 236a4965dfded..8be5c4e876b5b 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs @@ -181,32 +181,46 @@ pub(super) async fn update_view( } for _ in ancestry_iter { - let Some(msg) = + while let Some(msg) = overseer_peek_with_timeout(virtual_overseer, Duration::from_millis(50)).await - else { - return - }; - - if !matches!( - &msg, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, - RuntimeApiRequest::ClaimQueue(_) - )) - ) { - // Claim queue has already been fetched for this leaf. - break - } + { + if !matches!( + &msg, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::ClaimQueue(_), + )) + ) && !matches!( + &msg, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::CandidateEvents(_), + )) + ) { + break + } - assert_matches!( - overseer_recv_with_timeout(virtual_overseer, Duration::from_millis(50)).await.unwrap(), - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _, - RuntimeApiRequest::ClaimQueue(tx), - )) => { - tx.send(Ok(test_state.claim_queue.clone())).unwrap(); + match overseer_recv_with_timeout(virtual_overseer, Duration::from_millis(50)) + .await + .unwrap() + { + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::ClaimQueue(tx), + )) => { + tx.send(Ok(test_state.claim_queue.clone())).unwrap(); + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + .., + RuntimeApiRequest::CandidateEvents(tx), + )) => { + tx.send(Ok(vec![])).unwrap(); + }, + _ => { + unimplemented!() + }, } - ); + } } } } @@ -681,6 +695,7 @@ fn advertise_and_send_collation_by_hash() { (candidate, pov) }) .collect(); + for (candidate, pov) in &candidates { distribute_collation_with_receipt( &mut virtual_overseer, diff --git a/polkadot/node/network/collator-protocol/src/lib.rs b/polkadot/node/network/collator-protocol/src/lib.rs index b8111931a48a7..b3a508285e1ee 100644 --- a/polkadot/node/network/collator-protocol/src/lib.rs +++ b/polkadot/node/network/collator-protocol/src/lib.rs @@ -45,6 +45,7 @@ mod validator_side; mod validator_side_experimental; const LOG_TARGET: &'static str = "parachain::collator-protocol"; +const LOG_TARGET_STATS: &'static str = "parachain::collator-protocol-stats"; /// A collator eviction policy - how fast to evict collators which are inactive. #[derive(Debug, Clone, Copy)] diff --git a/polkadot/node/subsystem-util/src/backing_implicit_view.rs b/polkadot/node/subsystem-util/src/backing_implicit_view.rs index d8e242109955a..4af42d80b4735 100644 --- a/polkadot/node/subsystem-util/src/backing_implicit_view.rs +++ b/polkadot/node/subsystem-util/src/backing_implicit_view.rs @@ -154,6 +154,12 @@ impl View { self.leaves.contains_key(leaf_hash) } + /// Get the block number of a leaf in the current view. + /// Returns `None` if leaf is not in the view. + pub fn block_number(&self, leaf_hash: &Hash) -> Option { + self.block_info_storage.get(leaf_hash).map(|block_info| block_info.block_number) + } + /// Activate a leaf in the view. /// This will request the minimum relay parents the leaf and will load headers in the /// ancestry of the leaf as needed. These are the 'implicit ancestors' of the leaf. diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs index c2b66e8be9cc5..7e0e4c9f2daae 100644 --- a/polkadot/node/test/service/src/lib.rs +++ b/polkadot/node/test/service/src/lib.rs @@ -205,6 +205,7 @@ pub fn node_config( keystore: KeystoreConfig::InMemory, database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, trie_cache_maximum_size: Some(64 * 1024 * 1024), + warm_up_trie_cache: None, state_pruning: Default::default(), blocks_pruning: BlocksPruning::KeepFinalized, chain_spec: Box::new(spec), diff --git a/polkadot/runtime/common/src/assigned_slots/mod.rs b/polkadot/runtime/common/src/assigned_slots/mod.rs index 71bd85ec20a7d..6e96904a54b17 100644 --- a/polkadot/runtime/common/src/assigned_slots/mod.rs +++ b/polkadot/runtime/common/src/assigned_slots/mod.rs @@ -676,11 +676,11 @@ mod tests { type RuntimeCall = RuntimeCall; } - impl frame_system::offchain::CreateInherent for Test + impl frame_system::offchain::CreateBare for Test where RuntimeCall: From, { - fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + fn create_bare(call: Self::RuntimeCall) -> Self::Extrinsic { UncheckedExtrinsic::new_bare(call) } } diff --git a/polkadot/runtime/common/src/integration_tests.rs b/polkadot/runtime/common/src/integration_tests.rs index 8c45a3b58d594..a477ef05f59ac 100644 --- a/polkadot/runtime/common/src/integration_tests.rs +++ b/polkadot/runtime/common/src/integration_tests.rs @@ -106,11 +106,11 @@ where type RuntimeCall = RuntimeCall; } -impl frame_system::offchain::CreateInherent for Test +impl frame_system::offchain::CreateBare for Test where RuntimeCall: From, { - fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + fn create_bare(call: Self::RuntimeCall) -> Self::Extrinsic { UncheckedExtrinsic::new_bare(call) } } diff --git a/polkadot/runtime/common/src/paras_registrar/mock.rs b/polkadot/runtime/common/src/paras_registrar/mock.rs index c1c2618b3c13c..44b3a7b679e74 100644 --- a/polkadot/runtime/common/src/paras_registrar/mock.rs +++ b/polkadot/runtime/common/src/paras_registrar/mock.rs @@ -57,11 +57,11 @@ where type RuntimeCall = RuntimeCall; } -impl frame_system::offchain::CreateInherent for Test +impl frame_system::offchain::CreateBare for Test where RuntimeCall: From, { - fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + fn create_bare(call: Self::RuntimeCall) -> Self::Extrinsic { UncheckedExtrinsic::new_bare(call) } } diff --git a/polkadot/runtime/parachains/src/disputes/slashing.rs b/polkadot/runtime/parachains/src/disputes/slashing.rs index 95dbf2ba42bb9..59c4e9f5f926d 100644 --- a/polkadot/runtime/parachains/src/disputes/slashing.rs +++ b/polkadot/runtime/parachains/src/disputes/slashing.rs @@ -653,7 +653,7 @@ impl Default for SlashingReportHandler { impl HandleReports for SlashingReportHandler where - T: Config + frame_system::offchain::CreateInherent>, + T: Config + frame_system::offchain::CreateBare>, R: ReportOffence< T::AccountId, T::KeyOwnerIdentification, @@ -685,7 +685,7 @@ where dispute_proof: DisputeProof, key_owner_proof: ::KeyOwnerProof, ) -> Result<(), sp_runtime::TryRuntimeError> { - use frame_system::offchain::{CreateInherent, SubmitTransaction}; + use frame_system::offchain::{CreateBare, SubmitTransaction}; let session_index = dispute_proof.time_slot.session_index; let validator_index = dispute_proof.validator_index.0; @@ -696,7 +696,7 @@ where key_owner_proof, }; - let xt = >>::create_inherent(call.into()); + let xt = >>::create_bare(call.into()); match SubmitTransaction::>::submit_transaction(xt) { Ok(()) => { log::info!( diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index 74c17d690310a..60606242679bc 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -99,11 +99,11 @@ where type RuntimeCall = RuntimeCall; } -impl frame_system::offchain::CreateInherent for Test +impl frame_system::offchain::CreateBare for Test where RuntimeCall: From, { - fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + fn create_bare(call: Self::RuntimeCall) -> Self::Extrinsic { UncheckedExtrinsic::new_bare(call) } } diff --git a/polkadot/runtime/parachains/src/paras/mod.rs b/polkadot/runtime/parachains/src/paras/mod.rs index b31eb5d9967df..c747d02d1d605 100644 --- a/polkadot/runtime/parachains/src/paras/mod.rs +++ b/polkadot/runtime/parachains/src/paras/mod.rs @@ -636,7 +636,7 @@ pub mod pallet { frame_system::Config + configuration::Config + shared::Config - + frame_system::offchain::CreateInherent> + + frame_system::offchain::CreateBare> { #[allow(deprecated)] type RuntimeEvent: From + IsType<::RuntimeEvent>; @@ -2279,7 +2279,7 @@ impl Pallet { ) { use frame_system::offchain::SubmitTransaction; - let xt = T::create_inherent(Call::include_pvf_check_statement { stmt, signature }.into()); + let xt = T::create_bare(Call::include_pvf_check_statement { stmt, signature }.into()); if let Err(e) = SubmitTransaction::>::submit_transaction(xt) { log::error!(target: LOG_TARGET, "Error submitting pvf check statement: {:?}", e,); } diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 68d1b9f1710aa..3362be0c071ea 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -707,11 +707,11 @@ where } } -impl frame_system::offchain::CreateInherent for Runtime +impl frame_system::offchain::CreateBare for Runtime where RuntimeCall: From, { - fn create_inherent(call: RuntimeCall) -> UncheckedExtrinsic { + fn create_bare(call: RuntimeCall) -> UncheckedExtrinsic { UncheckedExtrinsic::new_bare(call) } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_bounties.rs b/polkadot/runtime/rococo/src/weights/pallet_bounties.rs index ee903a2f4e33a..141b9626f5152 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_bounties.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_bounties.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_bounties` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-04-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `d3a9aad6f7a3`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `b5f9d80cc353`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: @@ -64,11 +64,11 @@ impl pallet_bounties::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `210` // Estimated: `3593` - // Minimum execution time: 27_405_000 picoseconds. - Weight::from_parts(28_960_668, 0) + // Minimum execution time: 26_840_000 picoseconds. + Weight::from_parts(28_494_084, 0) .saturating_add(Weight::from_parts(0, 3593)) - // Standard Error: 6 - .saturating_add(Weight::from_parts(607, 0).saturating_mul(d.into())) + // Standard Error: 5 + .saturating_add(Weight::from_parts(644, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -80,8 +80,8 @@ impl pallet_bounties::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `302` // Estimated: `3642` - // Minimum execution time: 14_322_000 picoseconds. - Weight::from_parts(14_993_000, 0) + // Minimum execution time: 14_096_000 picoseconds. + Weight::from_parts(14_743_000, 0) .saturating_add(Weight::from_parts(0, 3642)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -92,8 +92,8 @@ impl pallet_bounties::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `322` // Estimated: `3642` - // Minimum execution time: 14_227_000 picoseconds. - Weight::from_parts(14_967_000, 0) + // Minimum execution time: 13_949_000 picoseconds. + Weight::from_parts(14_522_000, 0) .saturating_add(Weight::from_parts(0, 3642)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -106,8 +106,8 @@ impl pallet_bounties::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `302` // Estimated: `3642` - // Minimum execution time: 16_980_000 picoseconds. - Weight::from_parts(18_167_000, 0) + // Minimum execution time: 17_266_000 picoseconds. + Weight::from_parts(17_968_000, 0) .saturating_add(Weight::from_parts(0, 3642)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -120,8 +120,8 @@ impl pallet_bounties::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `498` // Estimated: `3642` - // Minimum execution time: 41_196_000 picoseconds. - Weight::from_parts(42_588_000, 0) + // Minimum execution time: 41_346_000 picoseconds. + Weight::from_parts(42_379_000, 0) .saturating_add(Weight::from_parts(0, 3642)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -134,8 +134,8 @@ impl pallet_bounties::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `494` // Estimated: `3642` - // Minimum execution time: 31_821_000 picoseconds. - Weight::from_parts(32_823_000, 0) + // Minimum execution time: 31_182_000 picoseconds. + Weight::from_parts(32_520_000, 0) .saturating_add(Weight::from_parts(0, 3642)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -148,8 +148,8 @@ impl pallet_bounties::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `400` // Estimated: `3642` - // Minimum execution time: 18_795_000 picoseconds. - Weight::from_parts(19_758_000, 0) + // Minimum execution time: 17_660_000 picoseconds. + Weight::from_parts(18_922_000, 0) .saturating_add(Weight::from_parts(0, 3642)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -170,8 +170,8 @@ impl pallet_bounties::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `764` // Estimated: `8799` - // Minimum execution time: 116_017_000 picoseconds. - Weight::from_parts(117_965_000, 0) + // Minimum execution time: 114_245_000 picoseconds. + Weight::from_parts(119_907_000, 0) .saturating_add(Weight::from_parts(0, 8799)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(8)) @@ -188,8 +188,8 @@ impl pallet_bounties::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `444` // Estimated: `3642` - // Minimum execution time: 42_519_000 picoseconds. - Weight::from_parts(44_526_000, 0) + // Minimum execution time: 43_088_000 picoseconds. + Weight::from_parts(44_290_000, 0) .saturating_add(Weight::from_parts(0, 3642)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -208,8 +208,8 @@ impl pallet_bounties::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `680` // Estimated: `6196` - // Minimum execution time: 80_800_000 picoseconds. - Weight::from_parts(82_838_000, 0) + // Minimum execution time: 81_575_000 picoseconds. + Weight::from_parts(82_919_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(6)) @@ -220,8 +220,8 @@ impl pallet_bounties::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `358` // Estimated: `3642` - // Minimum execution time: 14_685_000 picoseconds. - Weight::from_parts(15_756_000, 0) + // Minimum execution time: 14_680_000 picoseconds. + Weight::from_parts(15_096_000, 0) .saturating_add(Weight::from_parts(0, 3642)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -237,15 +237,31 @@ impl pallet_bounties::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0 + b * (297 ±0)` // Estimated: `1887 + b * (5206 ±0)` - // Minimum execution time: 3_160_000 picoseconds. - Weight::from_parts(3_311_000, 0) + // Minimum execution time: 3_023_000 picoseconds. + Weight::from_parts(3_260_000, 0) .saturating_add(Weight::from_parts(0, 1887)) - // Standard Error: 13_041 - .saturating_add(Weight::from_parts(37_231_411, 0).saturating_mul(b.into())) + // Standard Error: 8_928 + .saturating_add(Weight::from_parts(36_833_581, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 5206).saturating_mul(b.into())) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:1 w:0) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(16400), added: 18875, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `16795` + // Estimated: `19865` + // Minimum execution time: 38_100_000 picoseconds. + Weight::from_parts(39_566_000, 0) + .saturating_add(Weight::from_parts(0, 19865)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } } diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 4d32146c215c1..5aef006ee3d5b 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -179,11 +179,11 @@ where type Extrinsic = UncheckedExtrinsic; } -impl frame_system::offchain::CreateInherent for Runtime +impl frame_system::offchain::CreateBare for Runtime where RuntimeCall: From, { - fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + fn create_bare(call: Self::RuntimeCall) -> Self::Extrinsic { UncheckedExtrinsic::new_bare(call) } } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 07b63441298f1..86be11d5335cc 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1065,11 +1065,11 @@ where } } -impl frame_system::offchain::CreateInherent for Runtime +impl frame_system::offchain::CreateBare for Runtime where RuntimeCall: From, { - fn create_inherent(call: RuntimeCall) -> UncheckedExtrinsic { + fn create_bare(call: RuntimeCall) -> UncheckedExtrinsic { UncheckedExtrinsic::new_bare(call) } } diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index c22bda0094f6a..7f57a75096aac 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -38,6 +38,7 @@ pallet-assets = { workspace = true, default-features = true } polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-runtime-parachains = { workspace = true, default-features = true } sp-tracing = { features = ["test-utils"], workspace = true, default-features = true } +xcm-simulator = { workspace = true, default-features = true } [features] default = ["std"] @@ -73,6 +74,7 @@ runtime-benchmarks = [ "xcm-runtime-apis/runtime-benchmarks", "xcm/runtime-benchmarks", ] +test-utils = ["std"] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index e83fefc3231cd..e018b5ec211f7 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -26,6 +26,8 @@ mod mock; mod tests; pub mod migration; +#[cfg(any(test, feature = "test-utils"))] +pub mod xcm_helpers; extern crate alloc; diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index c37d0bf1c77e3..4ee64e8b85a5c 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use codec::Encode; pub use core::cell::RefCell; use frame_support::{ construct_runtime, derive_impl, parameter_types, @@ -46,6 +45,7 @@ use xcm_executor::{ traits::{Identity, JustTry}, XcmExecutor, }; +use xcm_simulator::helpers::derive_topic_id; use crate::{self as pallet_xcm, TestWeightInfo}; @@ -190,7 +190,7 @@ impl SendXcm for TestSendXcm { { return Err(SendError::Transport("Intentional deliver failure used in tests".into())); } - let hash = fake_message_hash(&message); + let hash = derive_topic_id(&message); SENT_XCM.with(|q| q.borrow_mut().push(pair)); Ok(hash) } @@ -211,7 +211,7 @@ impl SendXcm for TestSendXcmErrX8 { } } fn deliver(pair: (Location, Xcm<()>)) -> Result { - let hash = fake_message_hash(&pair.1); + let hash = derive_topic_id(&pair.1); SENT_XCM.with(|q| q.borrow_mut().push(pair)); Ok(hash) } @@ -243,7 +243,7 @@ impl SendXcm for TestPaidForPara3000SendXcm { Ok((pair, Para3000PaymentAssets::get())) } fn deliver(pair: (Location, Xcm<()>)) -> Result { - let hash = fake_message_hash(&pair.1); + let hash = derive_topic_id(&pair.1); SENT_XCM.with(|q| q.borrow_mut().push(pair)); Ok(hash) } @@ -690,12 +690,18 @@ impl super::benchmarking::Config for Test { } } -pub(crate) fn last_event() -> RuntimeEvent { - System::events().pop().expect("RuntimeEvent expected").event +pub(crate) fn all_events() -> Vec { + System::events().into_iter().map(|e| e.event).collect() } pub(crate) fn last_events(n: usize) -> Vec { - System::events().into_iter().map(|e| e.event).rev().take(n).rev().collect() + let all_events = all_events(); + let split_idx = all_events.len().saturating_sub(n); + all_events.split_at(split_idx).1.to_vec() +} + +pub(crate) fn last_event() -> RuntimeEvent { + last_events(1).pop().expect("RuntimeEvent expected") } pub(crate) fn buy_execution(fees: impl Into) -> Instruction { @@ -739,7 +745,3 @@ pub(crate) fn new_test_ext_with_balances_and_xcm_version( ext.execute_with(|| System::set_block_number(1)); ext } - -pub(crate) fn fake_message_hash(message: &Xcm) -> XcmHash { - message.using_encoded(sp_io::hashing::blake2_256) -} diff --git a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs index 00ebbb2010e36..5b2ac0f44492c 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs @@ -19,6 +19,7 @@ use crate::{ mock::*, tests::{ALICE, BOB, FEE_AMOUNT, INITIAL_BALANCE, SEND_AMOUNT}, + xcm_helpers::find_xcm_sent_message_id, DispatchResult, OriginFor, }; use frame_support::{ @@ -1434,6 +1435,8 @@ fn remote_asset_reserve_and_remote_fee_reserve_call( assert_eq!(AssetsPallet::active_issuance(usdc_id_location.clone()), expected_usdc_issuance); // Verify sent XCM program + let expected_hash = + find_xcm_sent_message_id::(all_events()).expect("Missing XcmPallet::Sent event"); assert_eq!( sent_xcm(), vec![( @@ -1452,7 +1455,8 @@ fn remote_asset_reserve_and_remote_fee_reserve_call( buy_limited_execution(expected_fee_on_dest, Unlimited), DepositAsset { assets: AllCounted(1).into(), beneficiary } ]) - } + }, + SetTopic(expected_hash), ]) )], ); @@ -2526,6 +2530,7 @@ fn remote_asset_reserve_and_remote_fee_reserve_paid_call( let foreign_id_location_reanchored = foreign_asset_id_location.clone().reanchored(&dest, &context).unwrap(); let dest_reanchored = dest.reanchored(&reserve_location, &context).unwrap(); + let sent_msg_id = find_xcm_sent_message_id::(all_events()).unwrap(); let sent_message = Xcm(vec![ WithdrawAsset((Location::here(), SEND_AMOUNT).into()), ClearOrigin, @@ -2540,8 +2545,8 @@ fn remote_asset_reserve_and_remote_fee_reserve_paid_call( DepositAsset { assets: AllCounted(1).into(), beneficiary }, ]), }, + SetTopic(sent_msg_id), ]); - let sent_msg_id = fake_message_hash(&sent_message); let mut last_events = last_events(7).into_iter(); // asset events diff --git a/polkadot/xcm/pallet-xcm/src/tests/mod.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs index 8897fa1763631..0094309e5fa67 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/mod.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -23,6 +23,7 @@ use crate::{ migration::data::NeedsMigration, mock::*, pallet::{LockedFungibles, RemoteLockedFungibles, SupportedVersion}, + xcm_helpers::find_xcm_sent_message_id, AssetTraps, AuthorizedAliasers, Config, CurrentMigration, Error, ExecuteControllerWeightInfo, LatestVersionedLocation, MaxAuthorizedAliases, Pallet, Queries, QueryStatus, RecordedXcm, RemoteLockedFungibleRecord, ShouldRecordXcm, VersionDiscoveryQueue, VersionMigrationStage, @@ -45,6 +46,7 @@ use xcm_executor::{ traits::{Properties, QueryHandler, QueryResponseStatus, ShouldExecute}, XcmExecutor, }; +use xcm_simulator::fake_message_hash; const ALICE: AccountId = AccountId::new([0u8; 32]); const BOB: AccountId = AccountId::new([1u8; 32]); @@ -1679,17 +1681,18 @@ fn execute_initiate_transfer_and_check_sent_event() { ); assert_ok!(result); + let sent_msg_id = find_xcm_sent_message_id::(all_events()).unwrap(); let sent_message: Xcm<()> = Xcm(vec![ WithdrawAsset(Assets::new()), ClearOrigin, BuyExecution { fees: fee_asset.clone(), weight_limit: Unlimited }, DepositAsset { assets: All.into(), beneficiary: beneficiary.clone() }, + SetTopic(sent_msg_id), ]); assert!(log_capture .contains(format!("xcm::send: Sending msg msg={:?}", sent_message).as_str())); let origin: Location = AccountId32 { network: None, id: ALICE.into() }.into(); - let sent_msg_id = fake_message_hash(&sent_message); assert_eq!( last_events(2), vec![ @@ -1738,7 +1741,7 @@ fn deliver_failure_with_expect_error() { assert!(result.is_err()); // Check logs for send attempt and failure - assert!(log_capture.contains("xcm::send: Sending msg msg=Xcm([WithdrawAsset(Assets([])), ClearOrigin, ExpectError(Some((1, Unimplemented)))])")); + assert!(log_capture.contains("xcm::send: Sending msg msg=Xcm([WithdrawAsset(Assets([])), ClearOrigin, ExpectError(Some((1, Unimplemented))), SetTopic(")); assert!(log_capture.contains("xcm::send: XCM failed to deliver with error error=Transport(\"Intentional deliver failure used in tests\")")); }) }); diff --git a/substrate/frame/revive/fixtures/contracts/set_empty_storage.rs b/polkadot/xcm/pallet-xcm/src/xcm_helpers.rs similarity index 57% rename from substrate/frame/revive/fixtures/contracts/set_empty_storage.rs rename to polkadot/xcm/pallet-xcm/src/xcm_helpers.rs index 1d2773278bd3a..80f8c6ebad0ef 100644 --- a/substrate/frame/revive/fixtures/contracts/set_empty_storage.rs +++ b/polkadot/xcm/pallet-xcm/src/xcm_helpers.rs @@ -1,5 +1,3 @@ -// This file is part of Substrate. - // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 @@ -15,18 +13,21 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![no_std] -#![no_main] -include!("../panic_handler.rs"); - -use uapi::{HostFn, HostFnImpl as api, StorageFlags}; - -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn deploy() {} +use xcm::latest::XcmHash; -#[no_mangle] -#[polkavm_derive::polkavm_export] -pub extern "C" fn call() { - api::set_storage(StorageFlags::empty(), &[0u8; 32], &[0u8; 4]); +/// Finds the message ID of the first `XcmPallet::Sent` event in the given events. +pub fn find_xcm_sent_message_id( + events: impl IntoIterator::RuntimeEvent>, +) -> Option +where + T: crate::Config, + ::RuntimeEvent: TryInto>, +{ + events.into_iter().find_map(|event| { + if let Ok(crate::Event::Sent { message_id, .. }) = event.try_into() { + Some(message_id) + } else { + None + } + }) } diff --git a/polkadot/xcm/src/v5/mod.rs b/polkadot/xcm/src/v5/mod.rs index 18fa3a9d6b63f..28be606287280 100644 --- a/polkadot/xcm/src/v5/mod.rs +++ b/polkadot/xcm/src/v5/mod.rs @@ -357,6 +357,15 @@ impl XcmContext { pub fn with_message_id(message_id: XcmHash) -> XcmContext { XcmContext { origin: None, message_id, topic: None } } + + /// Returns the topic if set, otherwise the message_id. + pub fn topic_or_message_id(&self) -> XcmHash { + if let Some(id) = self.topic { + id.into() + } else { + self.message_id + } + } } /// Cross-Consensus Message: A message from one consensus system to another. diff --git a/polkadot/xcm/xcm-builder/Cargo.toml b/polkadot/xcm/xcm-builder/Cargo.toml index 31cdb3a6ec16d..92a3f0203261e 100644 --- a/polkadot/xcm/xcm-builder/Cargo.toml +++ b/polkadot/xcm/xcm-builder/Cargo.toml @@ -41,6 +41,7 @@ polkadot-primitives = { workspace = true, default-features = true } polkadot-runtime-parachains = { workspace = true, default-features = true } polkadot-test-runtime = { workspace = true } primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } +xcm-simulator = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/polkadot/xcm/xcm-builder/src/tests/assets.rs b/polkadot/xcm/xcm-builder/src/tests/assets.rs index b510eab8df53e..51ca5a16ea9c6 100644 --- a/polkadot/xcm/xcm-builder/src/tests/assets.rs +++ b/polkadot/xcm/xcm-builder/src/tests/assets.rs @@ -191,10 +191,10 @@ fn reserve_transfer_should_work() { ReserveAssetDeposited((Parent, 100u128).into()), ClearOrigin, DepositAsset { assets: AllCounted(1).into(), beneficiary: three }, + SetTopic(hash), ]); - let expected_hash = fake_message_hash(&expected_msg); assert_eq!(asset_list(Parachain(2)), vec![(Here, 100).into()]); - assert_eq!(sent_xcm(), vec![(Parachain(2).into(), expected_msg, expected_hash)]); + assert_eq!(sent_xcm(), vec![(Parachain(2).into(), expected_msg, hash)]); } #[test] diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/mod.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/mod.rs index 90ad9921d65a1..d260f60109ad0 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/mod.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/mod.rs @@ -73,7 +73,7 @@ fn test_weight(mut count: u64) -> Weight { fn maybe_forward_id_for(topic: &XcmHash) -> XcmHash { match UsingTopic::get() { - true => forward_id_for(topic), + true => *topic, false => fake_id(), } } diff --git a/polkadot/xcm/xcm-builder/src/tests/mock.rs b/polkadot/xcm/xcm-builder/src/tests/mock.rs index d81044bfffbbe..b932aaee6fcf8 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mock.rs @@ -49,6 +49,7 @@ pub use xcm_executor::{ }, AssetsInHolding, Config, }; +pub use xcm_simulator::helpers::derive_topic_id; #[derive(Debug)] pub enum TestOrigin { @@ -177,7 +178,7 @@ impl SendXcm for TestMessageSenderImpl { msg: &mut Option>, ) -> SendResult<(Location, Xcm<()>, XcmHash)> { let msg = msg.take().unwrap(); - let hash = fake_message_hash(&msg); + let hash = derive_topic_id(&msg); let triplet = (dest.take().unwrap(), msg, hash); Ok((triplet, SEND_PRICE.with(|l| l.borrow().clone()))) } @@ -207,7 +208,7 @@ impl ExportXcm for TestMessageExporter { Ok(Assets::new()) } }); - let h = fake_message_hash(&m); + let h = derive_topic_id(&m); match r { Ok(price) => Ok(((network, channel, s, d, m, h), price)), Err(e) => { diff --git a/polkadot/xcm/xcm-builder/src/tests/querying.rs b/polkadot/xcm/xcm-builder/src/tests/querying.rs index 062b508368f77..b07c1b389eb3b 100644 --- a/polkadot/xcm/xcm-builder/src/tests/querying.rs +++ b/polkadot/xcm/xcm-builder/src/tests/querying.rs @@ -39,14 +39,16 @@ fn pallet_query_should_work() { ); assert_eq!(r, Outcome::Complete { used: Weight::from_parts(10, 10) }); - let expected_msg = Xcm::<()>(vec![QueryResponse { - query_id: 1, - max_weight: Weight::from_parts(50, 50), - response: Response::PalletsInfo(Default::default()), - querier: Some(Here.into()), - }]); - let expected_hash = fake_message_hash(&expected_msg); - assert_eq!(sent_xcm(), vec![(Parachain(1).into(), expected_msg, expected_hash)]); + let expected_msg = Xcm::<()>(vec![ + QueryResponse { + query_id: 1, + max_weight: Weight::from_parts(50, 50), + response: Response::PalletsInfo(Default::default()), + querier: Some(Here.into()), + }, + SetTopic(hash), + ]); + assert_eq!(sent_xcm(), vec![(Parachain(1).into(), expected_msg, hash)]); } #[test] @@ -72,26 +74,28 @@ fn pallet_query_with_results_should_work() { ); assert_eq!(r, Outcome::Complete { used: Weight::from_parts(10, 10) }); - let expected_msg = Xcm::<()>(vec![QueryResponse { - query_id: 1, - max_weight: Weight::from_parts(50, 50), - response: Response::PalletsInfo( - vec![PalletInfo::new( - 1, - b"Balances".as_ref().into(), - b"pallet_balances".as_ref().into(), - 1, - 42, - 69, - ) - .unwrap()] - .try_into() - .unwrap(), - ), - querier: Some(Here.into()), - }]); - let expected_hash = fake_message_hash(&expected_msg); - assert_eq!(sent_xcm(), vec![(Parachain(1).into(), expected_msg, expected_hash)]); + let expected_msg = Xcm::<()>(vec![ + QueryResponse { + query_id: 1, + max_weight: Weight::from_parts(50, 50), + response: Response::PalletsInfo( + vec![PalletInfo::new( + 1, + b"Balances".as_ref().into(), + b"pallet_balances".as_ref().into(), + 1, + 42, + 69, + ) + .unwrap()] + .try_into() + .unwrap(), + ), + querier: Some(Here.into()), + }, + SetTopic(hash), + ]); + assert_eq!(sent_xcm(), vec![(Parachain(1).into(), expected_msg, hash)]); } #[test] diff --git a/polkadot/xcm/xcm-builder/src/tests/transacting.rs b/polkadot/xcm/xcm-builder/src/tests/transacting.rs index ba932beaeb3d9..9d4509c21000d 100644 --- a/polkadot/xcm/xcm-builder/src/tests/transacting.rs +++ b/polkadot/xcm/xcm-builder/src/tests/transacting.rs @@ -146,14 +146,16 @@ fn report_successful_transact_status_should_work() { Weight::zero(), ); assert_eq!(r, Outcome::Complete { used: Weight::from_parts(70, 70) }); - let expected_msg = Xcm(vec![QueryResponse { - response: Response::DispatchResult(MaybeErrorCode::Success), - query_id: 42, - max_weight: Weight::from_parts(5000, 5000), - querier: Some(Here.into()), - }]); - let expected_hash = fake_message_hash(&expected_msg); - assert_eq!(sent_xcm(), vec![(Parent.into(), expected_msg, expected_hash)]); + let expected_msg = Xcm(vec![ + QueryResponse { + response: Response::DispatchResult(MaybeErrorCode::Success), + query_id: 42, + max_weight: Weight::from_parts(5000, 5000), + querier: Some(Here.into()), + }, + SetTopic(hash), + ]); + assert_eq!(sent_xcm(), vec![(Parent.into(), expected_msg, hash)]); } #[test] @@ -182,14 +184,16 @@ fn report_failed_transact_status_should_work() { Weight::zero(), ); assert_eq!(r, Outcome::Complete { used: Weight::from_parts(70, 70) }); - let expected_msg = Xcm(vec![QueryResponse { - response: Response::DispatchResult(vec![2].into()), - query_id: 42, - max_weight: Weight::from_parts(5000, 5000), - querier: Some(Here.into()), - }]); - let expected_hash = fake_message_hash(&expected_msg); - assert_eq!(sent_xcm(), vec![(Parent.into(), expected_msg, expected_hash)]); + let expected_msg = Xcm(vec![ + QueryResponse { + response: Response::DispatchResult(vec![2].into()), + query_id: 42, + max_weight: Weight::from_parts(5000, 5000), + querier: Some(Here.into()), + }, + SetTopic(hash), + ]); + assert_eq!(sent_xcm(), vec![(Parent.into(), expected_msg, hash)]); } #[test] @@ -311,12 +315,14 @@ fn clear_transact_status_should_work() { Weight::zero(), ); assert_eq!(r, Outcome::Complete { used: Weight::from_parts(80, 80) }); - let expected_msg = Xcm(vec![QueryResponse { - response: Response::DispatchResult(MaybeErrorCode::Success), - query_id: 42, - max_weight: Weight::from_parts(5000, 5000), - querier: Some(Here.into()), - }]); - let expected_hash = fake_message_hash(&expected_msg); - assert_eq!(sent_xcm(), vec![(Parent.into(), expected_msg, expected_hash)]); + let expected_msg = Xcm(vec![ + QueryResponse { + response: Response::DispatchResult(MaybeErrorCode::Success), + query_id: 42, + max_weight: Weight::from_parts(5000, 5000), + querier: Some(Here.into()), + }, + SetTopic(hash), + ]); + assert_eq!(sent_xcm(), vec![(Parent.into(), expected_msg, hash)]); } diff --git a/polkadot/xcm/xcm-builder/src/universal_exports.rs b/polkadot/xcm/xcm-builder/src/universal_exports.rs index fc85640c757fb..da718ea1b6ed3 100644 --- a/polkadot/xcm/xcm-builder/src/universal_exports.rs +++ b/polkadot/xcm/xcm-builder/src/universal_exports.rs @@ -233,10 +233,6 @@ impl>> ExporterFor for NetworkExportTable } } -pub fn forward_id_for(original_id: &XcmHash) -> XcmHash { - (b"forward_id_for", original_id).using_encoded(sp_io::hashing::blake2_256) -} - /// Implementation of `SendXcm` which wraps the message inside an `ExportMessage` instruction /// and sends it to a destination known to be able to handle it. /// @@ -287,7 +283,7 @@ impl Some(forward_id_for(t)), + Some(SetTopic(t)) => Some(*t), _ => None, }; @@ -368,7 +364,7 @@ impl Some(forward_id_for(t)), + Some(SetTopic(t)) => Some(*t), _ => None, }; diff --git a/polkadot/xcm/xcm-builder/tests/mock/mod.rs b/polkadot/xcm/xcm-builder/tests/mock/mod.rs index 4a12ea257d2ef..7a2eb8cc55adf 100644 --- a/polkadot/xcm/xcm-builder/tests/mock/mod.rs +++ b/polkadot/xcm/xcm-builder/tests/mock/mod.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use codec::Encode; use core::cell::RefCell; use frame_support::{ construct_runtime, derive_impl, parameter_types, @@ -39,6 +38,7 @@ use xcm_builder::{ IsChildSystemParachain, IsConcrete, MintLocation, RespectSuspension, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, }; +use xcm_simulator::helpers::derive_topic_id; pub type AccountId = AccountId32; pub type Balance = u128; @@ -57,7 +57,7 @@ impl SendXcm for TestSendXcm { msg: &mut Option>, ) -> SendResult<(Location, Xcm<()>, XcmHash)> { let msg = msg.take().unwrap(); - let hash = fake_message_hash(&msg); + let hash = derive_topic_id(&msg); let triplet = (dest.take().unwrap(), msg, hash); Ok((triplet, Assets::new())) } @@ -256,7 +256,3 @@ pub fn kusama_like_with_balances(balances: Vec<(AccountId, Balance)>) -> sp_io:: ext.execute_with(|| System::set_block_number(1)); ext } - -pub fn fake_message_hash(message: &Xcm) -> XcmHash { - message.using_encoded(sp_io::hashing::blake2_256) -} diff --git a/polkadot/xcm/xcm-builder/tests/scenarios.rs b/polkadot/xcm/xcm-builder/tests/scenarios.rs index e3a1924a19d7a..80f3672206269 100644 --- a/polkadot/xcm/xcm-builder/tests/scenarios.rs +++ b/polkadot/xcm/xcm-builder/tests/scenarios.rs @@ -17,13 +17,14 @@ mod mock; use mock::{ - fake_message_hash, kusama_like_with_balances, AccountId, Balance, Balances, BaseXcmWeight, - System, XcmConfig, CENTS, + kusama_like_with_balances, AccountId, Balance, Balances, BaseXcmWeight, System, XcmConfig, + CENTS, }; use polkadot_parachain_primitives::primitives::Id as ParaId; use sp_runtime::traits::AccountIdConversion; use xcm::latest::{prelude::*, Error::UntrustedTeleportLocation}; use xcm_executor::XcmExecutor; +use xcm_simulator::fake_message_hash; pub const ALICE: AccountId = AccountId::new([0u8; 32]); pub const PARA_ID: u32 = 2000; @@ -177,17 +178,16 @@ fn report_holding_works() { let other_para_acc: AccountId = ParaId::from(other_para_id).into_account_truncating(); assert_eq!(Balances::free_balance(other_para_acc), amount); assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE - 2 * amount); - let expected_msg = Xcm(vec![QueryResponse { - query_id: response_info.query_id, - response: Response::Assets(vec![].into()), - max_weight: response_info.max_weight, - querier: Some(Here.into()), - }]); - let expected_hash = fake_message_hash(&expected_msg); - assert_eq!( - mock::sent_xcm(), - vec![(Parachain(PARA_ID).into(), expected_msg, expected_hash,)] - ); + let expected_msg = Xcm(vec![ + QueryResponse { + query_id: response_info.query_id, + response: Response::Assets(vec![].into()), + max_weight: response_info.max_weight, + querier: Some(Here.into()), + }, + SetTopic(hash.into()), + ]); + assert_eq!(mock::sent_xcm(), vec![(Parachain(PARA_ID).into(), expected_msg, hash,)]); }); } @@ -261,12 +261,9 @@ fn teleport_to_asset_hub_works() { let expected_msg = Xcm(vec![ReceiveTeleportedAsset((Parent, amount).into()), ClearOrigin] .into_iter() .chain(teleport_effects.clone().into_iter()) + .chain([SetTopic(hash.into())]) .collect()); - let expected_hash = fake_message_hash(&expected_msg); - assert_eq!( - mock::sent_xcm(), - vec![(Parachain(asset_hub_id).into(), expected_msg, expected_hash,)] - ); + assert_eq!(mock::sent_xcm(), vec![(Parachain(asset_hub_id).into(), expected_msg, hash,)]); }); } @@ -314,12 +311,9 @@ fn reserve_based_transfer_works() { let expected_msg = Xcm(vec![ReserveAssetDeposited((Parent, amount).into()), ClearOrigin] .into_iter() .chain(transfer_effects.into_iter()) + .chain([SetTopic(hash.into())]) .collect()); - let expected_hash = fake_message_hash(&expected_msg); - assert_eq!( - mock::sent_xcm(), - vec![(Parachain(other_para_id).into(), expected_msg, expected_hash,)] - ); + assert_eq!(mock::sent_xcm(), vec![(Parachain(other_para_id).into(), expected_msg, hash,)]); }); } diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index cf0b7887fd93b..a14c417217634 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -247,6 +247,7 @@ impl ExecuteXcm for XcmExecutor XcmExecutor { msg: Xcm<()>, reason: FeeReason, ) -> Result { + let mut msg = msg; + // Only the last `SetTopic` instruction is considered relevant. If the message does not end + // with it, a `topic_or_message_id()` from the context is appended to it. This behaviour is + // then consistent with `WithUniqueTopic`. + if !matches!(msg.last(), Some(SetTopic(_))) { + let topic_id = self.context.topic_or_message_id(); + msg.0.push(SetTopic(topic_id.into())); + } tracing::trace!( target: "xcm::send", ?msg, @@ -461,7 +470,7 @@ impl XcmExecutor { self.original_origin.clone(), dest, error.clone(), - self.context.message_id, + self.context.topic_or_message_id(), ); Err(error.into()) }, @@ -856,7 +865,7 @@ impl XcmExecutor { Config::XcmEventEmitter::emit_process_failure_event( self.original_origin.clone(), error, - self.context.message_id, + self.context.topic_or_message_id(), ); *r = Err(ExecutorError { index: i as u32, diff --git a/polkadot/xcm/xcm-executor/src/tests/initiate_transfer.rs b/polkadot/xcm/xcm-executor/src/tests/initiate_transfer.rs index 9395b047c3448..eb9fa27454457 100644 --- a/polkadot/xcm/xcm-executor/src/tests/initiate_transfer.rs +++ b/polkadot/xcm/xcm-executor/src/tests/initiate_transfer.rs @@ -59,7 +59,7 @@ fn clears_origin() { let (dest, sent_message) = sent_xcm().pop().unwrap(); assert_eq!(dest, Parent.into()); - assert_eq!(sent_message.len(), 5); + assert_eq!(sent_message.len(), 6); let mut instr = sent_message.inner().iter(); assert!(matches!(instr.next().unwrap(), ReserveAssetDeposited(..))); assert!(matches!(instr.next().unwrap(), PayFees { .. })); @@ -96,7 +96,7 @@ fn preserves_origin() { let (dest, sent_message) = sent_xcm().pop().unwrap(); assert_eq!(dest, Parent.into()); - assert_eq!(sent_message.len(), 5); + assert_eq!(sent_message.len(), 6); let mut instr = sent_message.inner().iter(); assert!(matches!(instr.next().unwrap(), ReserveAssetDeposited(..))); assert!(matches!(instr.next().unwrap(), PayFees { .. })); @@ -137,7 +137,7 @@ fn unpaid_execution_goes_after_origin_alteration() { let (destination, sent_message) = sent_xcm().pop().unwrap(); assert_eq!(destination, Parent.into()); - assert_eq!(sent_message.len(), 5); + assert_eq!(sent_message.len(), 6); let mut instructions = sent_message.inner().iter(); assert!(matches!(instructions.next().unwrap(), ReserveAssetDeposited(..))); assert!(matches!( @@ -178,7 +178,7 @@ fn no_alias_origin_if_root() { let (destination, sent_message) = sent_xcm().pop().unwrap(); assert_eq!(destination, Parent.into()); - assert_eq!(sent_message.len(), 4); + assert_eq!(sent_message.len(), 5); let mut instructions = sent_message.inner().iter(); assert!(matches!(instructions.next().unwrap(), ReserveAssetDeposited(..))); assert!(matches!(instructions.next().unwrap(), UnpaidExecution { .. })); @@ -214,7 +214,7 @@ fn unpaid_transact() { let (destination, sent_message) = sent_xcm().pop().unwrap(); assert_eq!(destination, to_another_system_para); - assert_eq!(sent_message.len(), 2); + assert_eq!(sent_message.len(), 3); let mut instructions = sent_message.inner().iter(); assert!(matches!(instructions.next().unwrap(), UnpaidExecution { .. })); assert!(matches!(instructions.next().unwrap(), Transact { .. })); diff --git a/polkadot/xcm/xcm-executor/src/tests/pay_fees.rs b/polkadot/xcm/xcm-executor/src/tests/pay_fees.rs index d5b21d3bc601f..504b06966ad5c 100644 --- a/polkadot/xcm/xcm-executor/src/tests/pay_fees.rs +++ b/polkadot/xcm/xcm-executor/src/tests/pay_fees.rs @@ -103,12 +103,15 @@ fn works_for_delivery_fees() { let querier: Location = (Parachain(1000), AccountId32 { id: SENDER.into(), network: None }).into(); - let sent_message = Xcm(vec![QueryResponse { - query_id: 0, - response: Response::ExecutionResult(None), - max_weight: Weight::zero(), - querier: Some(querier), - }]); + let sent_message = Xcm(vec![ + QueryResponse { + query_id: 0, + response: Response::ExecutionResult(None), + max_weight: Weight::zero(), + querier: Some(querier), + }, + SetTopic(vm.context.topic_or_message_id()), + ]); // The messages were "sent" successfully. assert_eq!( diff --git a/polkadot/xcm/xcm-runtime-apis/Cargo.toml b/polkadot/xcm/xcm-runtime-apis/Cargo.toml index f0c30535c02ff..abfc833b745b5 100644 --- a/polkadot/xcm/xcm-runtime-apis/Cargo.toml +++ b/polkadot/xcm/xcm-runtime-apis/Cargo.toml @@ -31,6 +31,7 @@ sp-io = { workspace = true } sp-tracing = { workspace = true, default-features = true } xcm-builder = { workspace = true } xcm-executor = { workspace = true } +xcm-simulator = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs b/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs index c9683e0268290..1307c7adaa9cb 100644 --- a/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs +++ b/polkadot/xcm/xcm-runtime-apis/tests/fee_estimation.rs @@ -27,10 +27,10 @@ use xcm_runtime_apis::{ mod mock; use mock::{ - fake_message_hash, new_test_ext_with_balances, new_test_ext_with_balances_and_assets, - DeliveryFees, ExistentialDeposit, HereLocation, OriginCaller, RuntimeCall, RuntimeEvent, - TestClient, + new_test_ext_with_balances, new_test_ext_with_balances_and_assets, DeliveryFees, + ExistentialDeposit, HereLocation, OriginCaller, RuntimeCall, RuntimeEvent, TestClient, }; +use xcm_simulator::fake_message_hash; // Scenario: User `1` in the local chain (id 2000) wants to transfer assets to account `[0u8; 32]` // on "AssetHub". He wants to make sure he has enough for fees, so before he calls the @@ -377,6 +377,7 @@ fn dry_run_xcm_common(xcm_version: XcmVersion) { .buy_execution((Here, execution_fees), Unlimited) .deposit_reserve_asset(AllCounted(1), (Parent, Parachain(2100)), inner_xcm.clone()) .build(); + let expected_msg_id = fake_message_hash(&xcm); let balances = vec![( who, transfer_amount + execution_fees + DeliveryFees::get() + ExistentialDeposit::get(), @@ -400,8 +401,8 @@ fn dry_run_xcm_common(xcm_version: XcmVersion) { .clear_origin() .buy_execution((Here, 1u128), Unlimited) .deposit_asset(AllCounted(1), [0u8; 32]) + .set_topic(expected_msg_id) .build(); - let expected_msg_id = fake_message_hash(&expected_xcms); assert_eq!( dry_run_effects.forwarded_xcms, vec![( diff --git a/polkadot/xcm/xcm-runtime-apis/tests/mock.rs b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs index 05c5dcbbef251..e3276cce157d8 100644 --- a/polkadot/xcm/xcm-runtime-apis/tests/mock.rs +++ b/polkadot/xcm/xcm-runtime-apis/tests/mock.rs @@ -17,7 +17,6 @@ //! Mock runtime for tests. //! Implements both runtime APIs for fee estimation and getting the messages for transfers. -use codec::Encode; use core::{cell::RefCell, marker::PhantomData}; use frame_support::{ construct_runtime, derive_impl, parameter_types, sp_runtime, @@ -50,6 +49,7 @@ use xcm_runtime_apis::{ fees::{Error as XcmPaymentApiError, XcmPaymentApi}, trusted_query::{Error as TrustedQueryApiError, TrustedQueryApi}, }; +use xcm_simulator::helpers::derive_topic_id; construct_runtime! { pub enum TestRuntime { @@ -124,7 +124,7 @@ impl SendXcm for TestXcmSender { Ok((ticket, fees)) } fn deliver(ticket: Self::Ticket) -> Result { - let hash = fake_message_hash(&ticket.1); + let hash = derive_topic_id(&ticket.1); SENT_XCM.with(|q| q.borrow_mut().push(ticket)); Ok(hash) } @@ -150,10 +150,6 @@ impl InspectMessageQueues for TestXcmSender { } } -pub(crate) fn fake_message_hash(message: &Xcm) -> XcmHash { - message.using_encoded(sp_io::hashing::blake2_256) -} - pub type XcmRouter = TestXcmSender; parameter_types! { diff --git a/polkadot/xcm/xcm-simulator/example/src/tests.rs b/polkadot/xcm/xcm-simulator/example/src/tests.rs index e6a996e4e5589..ede4ad70fa100 100644 --- a/polkadot/xcm/xcm-simulator/example/src/tests.rs +++ b/polkadot/xcm/xcm-simulator/example/src/tests.rs @@ -527,7 +527,7 @@ fn query_holding() { let query_id_set = 1234; // Send a message which fully succeeds on the relay chain - ParaA::execute_with(|| { + let expected_hash = ParaA::execute_with(|| { let message = Xcm(vec![ WithdrawAsset((Here, send_amount).into()), buy_execution((Here, send_amount)), @@ -543,6 +543,8 @@ fn query_holding() { ]); // Send withdraw and deposit with query holding assert_ok!(ParachainPalletXcm::send_xcm(Here, Parent, message.clone(),)); + + VersionedXcm::from(message).using_encoded(sp_core::blake2_256) }); // Check that transfer was executed @@ -563,12 +565,15 @@ fn query_holding() { ParaA::execute_with(|| { assert_eq!( ReceivedDmp::::get(), - vec![Xcm(vec![QueryResponse { - query_id: query_id_set, - response: Response::Assets(Assets::new()), - max_weight: Weight::from_parts(1_000_000_000, 1024 * 1024), - querier: Some(Here.into()), - }])], + vec![Xcm(vec![ + QueryResponse { + query_id: query_id_set, + response: Response::Assets(Assets::new()), + max_weight: Weight::from_parts(1_000_000_000, 1024 * 1024), + querier: Some(Here.into()), + }, + SetTopic(expected_hash), + ])], ); }); } diff --git a/polkadot/xcm/xcm-simulator/src/lib.rs b/polkadot/xcm/xcm-simulator/src/lib.rs index 59df394406ea0..d494c64daacec 100644 --- a/polkadot/xcm/xcm-simulator/src/lib.rs +++ b/polkadot/xcm/xcm-simulator/src/lib.rs @@ -413,7 +413,7 @@ macro_rules! decl_test_network { fn deliver( triple: ($crate::ParaId, $crate::Location, $crate::Xcm<()>), ) -> Result<$crate::XcmHash, $crate::SendError> { - let hash = $crate::fake_message_hash(&triple.2); + let hash = $crate::helpers::derive_topic_id(&triple.2); $crate::PARA_MESSAGE_BUS.with(|b| b.borrow_mut().push_back(triple)); Ok(hash) } @@ -445,10 +445,68 @@ macro_rules! decl_test_network { fn deliver( pair: ($crate::Location, $crate::Xcm<()>), ) -> Result<$crate::XcmHash, $crate::SendError> { - let hash = $crate::fake_message_hash(&pair.1); + let hash = $crate::helpers::derive_topic_id(&pair.1); $crate::RELAY_MESSAGE_BUS.with(|b| b.borrow_mut().push_back(pair)); Ok(hash) } } }; } + +pub mod helpers { + use super::*; + use sp_runtime::testing::H256; + use std::collections::{HashMap, HashSet}; + + /// Derives a topic ID for an XCM in tests. + pub fn derive_topic_id(message: &Xcm) -> XcmHash { + if let Some(SetTopic(topic_id)) = message.last() { + *topic_id + } else { + fake_message_hash(message) + } + } + + /// A test utility for tracking XCM topic IDs + #[derive(Clone)] + pub struct TopicIdTracker { + ids: HashMap, + } + impl TopicIdTracker { + /// Initialises a new, empty topic ID tracker. + pub fn new() -> Self { + TopicIdTracker { ids: HashMap::new() } + } + + /// Asserts that exactly one unique topic ID is present across all captured entries. + pub fn assert_unique(&self) { + let unique_ids: HashSet<_> = self.ids.values().collect(); + assert_eq!( + unique_ids.len(), + 1, + "Expected exactly one topic ID, found {}: {:?}", + unique_ids.len(), + unique_ids + ); + } + + /// Inserts a topic ID with the given chain name in the captor. + pub fn insert(&mut self, chain: &str, id: H256) { + self.ids.insert(chain.to_string(), id); + } + + /// Inserts a topic ID for a given chain and then asserts global uniqueness. + pub fn insert_and_assert_unique(&mut self, chain: &str, id: H256) { + if let Some(existing_id) = self.ids.get(chain) { + assert_eq!( + id, *existing_id, + "Topic ID mismatch for chain '{}': expected {:?}, got {:?}", + id, existing_id, chain + ); + } else { + self.insert(chain, id); + } + self.assert_unique(); + } + } +} diff --git a/polkadot/zombienet-sdk-tests/Cargo.toml b/polkadot/zombienet-sdk-tests/Cargo.toml index 8a2ca05c105d5..2a7c76f824682 100644 --- a/polkadot/zombienet-sdk-tests/Cargo.toml +++ b/polkadot/zombienet-sdk-tests/Cargo.toml @@ -16,8 +16,7 @@ log = { workspace = true } polkadot-primitives = { workspace = true, default-features = true } serde = { workspace = true } serde_json = { workspace = true } -subxt = { workspace = true } -subxt-signer = { workspace = true } +subxt = { version = "0.38.1", default-features = false } tokio = { workspace = true, features = ["rt-multi-thread"] } tokio-util = { workspace = true, features = ["time"] } zombienet-orchestrator = { workspace = true } diff --git a/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs b/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs index 8d66d9430b5ec..372b42b35c4d9 100644 --- a/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs +++ b/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs @@ -12,11 +12,13 @@ use cumulus_zombienet_sdk_helpers::{ }; use polkadot_primitives::{BlockNumber, CandidateHash, DisputeState, Id as ParaId, SessionIndex}; use serde_json::json; -use subxt::{OnlineClient, PolkadotConfig}; use tokio::time::Duration; use tokio_util::time::FutureExt; use zombienet_orchestrator::network::node::LogLineCountOptions; -use zombienet_sdk::NetworkConfigBuilder; +use zombienet_sdk::{ + subxt::{OnlineClient, PolkadotConfig}, + NetworkConfigBuilder, +}; #[tokio::test(flavor = "multi_thread")] async fn dispute_past_session_slashing() -> Result<(), anyhow::Error> { diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs index ceb27551ad454..92a59d6ded8d0 100644 --- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs +++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs @@ -8,9 +8,11 @@ use anyhow::anyhow; use cumulus_zombienet_sdk_helpers::{assert_finalized_para_throughput, create_assign_core_call}; use polkadot_primitives::Id as ParaId; use serde_json::json; -use subxt::{OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::dev; -use zombienet_sdk::NetworkConfigBuilder; +use zombienet_sdk::{ + subxt::{OnlineClient, PolkadotConfig}, + subxt_signer::sr25519::dev, + NetworkConfigBuilder, +}; #[tokio::test(flavor = "multi_thread")] async fn basic_3cores_test() -> Result<(), anyhow::Error> { diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs index 1a0bdcd215eb9..340bad10bed70 100644 --- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs +++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs @@ -11,9 +11,11 @@ use cumulus_zombienet_sdk_helpers::{ use polkadot_primitives::{CoreIndex, Id as ParaId}; use serde_json::json; use std::collections::{BTreeMap, VecDeque}; -use subxt::{OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::dev; -use zombienet_sdk::NetworkConfigBuilder; +use zombienet_sdk::{ + subxt::{OnlineClient, PolkadotConfig}, + subxt_signer::sr25519::dev, + NetworkConfigBuilder, +}; #[tokio::test(flavor = "multi_thread")] async fn doesnt_break_parachains_test() -> Result<(), anyhow::Error> { diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_12cores.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_12cores.rs index ed9bad3034011..959966de08d05 100644 --- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_12cores.rs +++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_12cores.rs @@ -11,9 +11,11 @@ use cumulus_zombienet_sdk_helpers::{ }; use polkadot_primitives::Id as ParaId; use serde_json::json; -use subxt::{OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::dev; -use zombienet_sdk::NetworkConfigBuilder; +use zombienet_sdk::{ + subxt::{OnlineClient, PolkadotConfig}, + subxt_signer::sr25519::dev, + NetworkConfigBuilder, +}; #[tokio::test(flavor = "multi_thread")] async fn slot_based_12cores_test() -> Result<(), anyhow::Error> { diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs index 422feb4d20cf4..c626fdc00d91c 100644 --- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs +++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs @@ -11,9 +11,11 @@ use cumulus_zombienet_sdk_helpers::{ }; use polkadot_primitives::Id as ParaId; use serde_json::json; -use subxt::{OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::dev; -use zombienet_sdk::NetworkConfigBuilder; +use zombienet_sdk::{ + subxt::{OnlineClient, PolkadotConfig}, + subxt_signer::sr25519::dev, + NetworkConfigBuilder, +}; #[tokio::test(flavor = "multi_thread")] async fn slot_based_3cores_test() -> Result<(), anyhow::Error> { diff --git a/polkadot/zombienet-sdk-tests/tests/functional/approval_voting_coalescing.rs b/polkadot/zombienet-sdk-tests/tests/functional/approval_voting_coalescing.rs index 717328f759a48..1402f32eb2d4e 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/approval_voting_coalescing.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/approval_voting_coalescing.rs @@ -8,8 +8,10 @@ use anyhow::anyhow; use cumulus_zombienet_sdk_helpers::{assert_finality_lag, assert_para_throughput}; use polkadot_primitives::Id as ParaId; use serde_json::json; -use subxt::{OnlineClient, PolkadotConfig}; -use zombienet_sdk::NetworkConfigBuilder; +use zombienet_sdk::{ + subxt::{OnlineClient, PolkadotConfig}, + NetworkConfigBuilder, +}; #[tokio::test(flavor = "multi_thread")] async fn approval_voting_coalescing_test() -> Result<(), anyhow::Error> { diff --git a/polkadot/zombienet-sdk-tests/tests/functional/approved_peer_mixed_validators.rs b/polkadot/zombienet-sdk-tests/tests/functional/approved_peer_mixed_validators.rs index 0da219711e7eb..9052cf33a9274 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/approved_peer_mixed_validators.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/approved_peer_mixed_validators.rs @@ -12,9 +12,11 @@ use tokio::time::Duration; use cumulus_zombienet_sdk_helpers::{assert_finality_lag, assert_finalized_para_throughput}; use polkadot_primitives::Id as ParaId; use serde_json::json; -use subxt::{OnlineClient, PolkadotConfig}; use zombienet_orchestrator::network::node::LogLineCountOptions; -use zombienet_sdk::NetworkConfigBuilder; +use zombienet_sdk::{ + subxt::{OnlineClient, PolkadotConfig}, + NetworkConfigBuilder, +}; #[tokio::test(flavor = "multi_thread")] async fn approved_peer_mixed_validators_test() -> Result<(), anyhow::Error> { diff --git a/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs b/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs index 09c3b2b4d8b24..6dec2339be47e 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs @@ -8,8 +8,10 @@ use anyhow::anyhow; use cumulus_zombienet_sdk_helpers::{assert_finality_lag, assert_finalized_para_throughput}; use polkadot_primitives::Id as ParaId; use serde_json::json; -use subxt::{OnlineClient, PolkadotConfig}; -use zombienet_sdk::NetworkConfigBuilder; +use zombienet_sdk::{ + subxt::{OnlineClient, PolkadotConfig}, + NetworkConfigBuilder, +}; #[tokio::test(flavor = "multi_thread")] async fn async_backing_6_seconds_rate_test() -> Result<(), anyhow::Error> { diff --git a/polkadot/zombienet-sdk-tests/tests/functional/duplicate_collations.rs b/polkadot/zombienet-sdk-tests/tests/functional/duplicate_collations.rs index 13cd2114ca848..39cd1383fb54c 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/duplicate_collations.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/duplicate_collations.rs @@ -10,10 +10,12 @@ use tokio::time::Duration; use cumulus_zombienet_sdk_helpers::{assert_finalized_para_throughput, create_assign_core_call}; use polkadot_primitives::Id as ParaId; use serde_json::json; -use subxt::{OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::dev; use zombienet_orchestrator::network::node::LogLineCountOptions; -use zombienet_sdk::NetworkConfigBuilder; +use zombienet_sdk::{ + subxt::{OnlineClient, PolkadotConfig}, + subxt_signer::sr25519::dev, + NetworkConfigBuilder, +}; const VALIDATOR_COUNT: u8 = 3; diff --git a/polkadot/zombienet-sdk-tests/tests/functional/spam_statement_distribution_requests.rs b/polkadot/zombienet-sdk-tests/tests/functional/spam_statement_distribution_requests.rs index a3a62e1e2f92a..0e346f658d38b 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/spam_statement_distribution_requests.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/spam_statement_distribution_requests.rs @@ -9,9 +9,11 @@ use tokio::time::Duration; use cumulus_zombienet_sdk_helpers::assert_para_throughput; use polkadot_primitives::Id as ParaId; use serde_json::json; -use subxt::{OnlineClient, PolkadotConfig}; use zombienet_orchestrator::network::node::LogLineCountOptions; -use zombienet_sdk::NetworkConfigBuilder; +use zombienet_sdk::{ + subxt::{OnlineClient, PolkadotConfig}, + NetworkConfigBuilder, +}; #[tokio::test(flavor = "multi_thread")] async fn spam_statement_distribution_requests_test() -> Result<(), anyhow::Error> { diff --git a/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs b/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs index 0d2de9024c8d0..2e8ec0a10a122 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs @@ -8,8 +8,10 @@ use anyhow::anyhow; use cumulus_zombienet_sdk_helpers::{assert_finality_lag, assert_finalized_para_throughput}; use polkadot_primitives::Id as ParaId; use serde_json::json; -use subxt::{OnlineClient, PolkadotConfig}; -use zombienet_sdk::NetworkConfigBuilder; +use zombienet_sdk::{ + subxt::{OnlineClient, PolkadotConfig}, + NetworkConfigBuilder, +}; #[tokio::test(flavor = "multi_thread")] async fn sync_backing_test() -> Result<(), anyhow::Error> { diff --git a/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs b/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs index 5105bdf156d8a..501b1b20a51eb 100644 --- a/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs +++ b/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs @@ -11,10 +11,10 @@ use anyhow::anyhow; -#[subxt::subxt(runtime_metadata_path = "metadata-files/coretime-rococo-local.scale")] +#[zombienet_sdk::subxt::subxt(runtime_metadata_path = "metadata-files/coretime-rococo-local.scale")] mod coretime_rococo {} -#[subxt::subxt(runtime_metadata_path = "metadata-files/rococo-local.scale")] +#[zombienet_sdk::subxt::subxt(runtime_metadata_path = "metadata-files/rococo-local.scale")] mod rococo {} use rococo::runtime_types::{ @@ -30,10 +30,12 @@ use rococo::runtime_types::{ use serde_json::json; use std::{fmt::Display, sync::Arc}; -use subxt::{events::StaticEvent, utils::AccountId32, OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::dev; use tokio::sync::RwLock; -use zombienet_sdk::NetworkConfigBuilder; +use zombienet_sdk::{ + subxt::{events::StaticEvent, utils::AccountId32, OnlineClient, PolkadotConfig}, + subxt_signer::sr25519::dev, + NetworkConfigBuilder, +}; use coretime_rococo::{ self as coretime_api, @@ -88,7 +90,7 @@ async fn assert_total_issuance( assert_eq!(ti, actual_ti); } -type EventOf = Arc)>>>; +type EventOf = Arc)>>>; macro_rules! trace_event { ($event:ident : $mod:ident => $($ev:ident),*) => { @@ -102,9 +104,11 @@ macro_rules! trace_event { }; } -async fn para_watcher(api: OnlineClient, events: EventOf) -where - ::Number: Display, +async fn para_watcher( + api: OnlineClient, + events: EventOf, +) where + ::Number: Display, { let mut blocks_sub = api.blocks().subscribe_finalized().await.unwrap(); @@ -130,9 +134,11 @@ where } } -async fn relay_watcher(api: OnlineClient, events: EventOf) -where - ::Number: Display, +async fn relay_watcher( + api: OnlineClient, + events: EventOf, +) where + ::Number: Display, { let mut blocks_sub = api.blocks().subscribe_finalized().await.unwrap(); @@ -157,7 +163,11 @@ where } } -async fn wait_for_event bool + Copy>( +async fn wait_for_event< + C: zombienet_sdk::subxt::Config + Clone, + E: StaticEvent, + P: Fn(&E) -> bool + Copy, +>( events: EventOf, pallet: &'static str, variant: &'static str, @@ -179,9 +189,11 @@ async fn wait_for_event b } } -async fn ti_watcher(api: OnlineClient, prefix: &'static str) -where - ::Number: Display, +async fn ti_watcher( + api: OnlineClient, + prefix: &'static str, +) where + ::Number: Display, { let mut blocks_sub = api.blocks().subscribe_finalized().await.unwrap(); diff --git a/prdoc/pr_6010.prdoc b/prdoc/pr_6010.prdoc new file mode 100644 index 0000000000000..bbf85fa2469e9 --- /dev/null +++ b/prdoc/pr_6010.prdoc @@ -0,0 +1,31 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Proof Of Possession for public keys + +doc: + - audience: Runtime Dev + description: | + Introduced `ProofOfPossessionGenerator` and `ProofOfPossessionVerifier` traits. + Provided blanket implementations for all crypto `Pair` types implementing + the `NonAggregatable` trait. Currently, this includes all cryptographic types + **except** the experimental BLS, which have dedicated implementations. + Implemented `ProofOfPossessionVerifier` for all application-level crypto wrappers + from the `sp-application-crypto`crate. + Enabled PoP generation for all `RuntimePublic` crypto types, allowing PoP generation + within the runtime context (with support of `sp-io` host function). + BLS PoP generation within the context of runtime requires a new dedicated host function + in `sp-io` and a new dedicated `Keystore` method. + +crates: +- name: sp-application-crypto + bump: minor +- name: sp-core + bump: minor +- name: sp-keystore + bump: minor +- name: sp-io + bump: minor + note: Introduces a new host function (not exposed by default, feature gated) +- name: sc-keystore + bump: minor diff --git a/prdoc/pr_7556.prdoc b/prdoc/pr_7556.prdoc new file mode 100644 index 0000000000000..fafcfd091a12b --- /dev/null +++ b/prdoc/pr_7556.prdoc @@ -0,0 +1,11 @@ +title: 'Add trie cache warmup' +doc: +- audience: Node Dev + description: "Warm up the Trie cache based on a CLI flag to enhance the performance of smart contracts on AssetHub by reducing storage access time." +crates: +- name: sc-cli + bump: major +- name: sc-service + bump: major +- name: sc-client-db + bump: minor diff --git a/prdoc/pr_7597.prdoc b/prdoc/pr_7597.prdoc new file mode 100644 index 0000000000000..50bc9c3787d02 --- /dev/null +++ b/prdoc/pr_7597.prdoc @@ -0,0 +1,40 @@ +title: 'Introduce CreateBare, deprecated CreateInherent' +doc: +- audience: Runtime Dev + description: | + Rename `CreateInherent` to `CreateBare`, add method `create_bare` and deprecate `create_inherent`. + + Both unsigned transaction and inherent use the extrinsic type `Bare`. + Before this PR `CreateInherent` trait was use to generate unsigned transaction, now unsigned transaction can be generated using a proper trait `CreateBare`. + + How to upgrade: + * Change usage of `CreateInherent` to `CreateBare` and `create_inherent` to `create_bare`. + * Implement `CreateBare` for the runtime, the method `create_bare` is usually implemented using `Extrinsic::new_bare`. + +crates: +- name: frame-system + bump: major +- name: polkadot-runtime-common + bump: major +- name: polkadot-runtime-parachains + bump: major +- name: rococo-runtime + bump: major +- name: westend-runtime + bump: major +- name: pallet-babe + bump: major +- name: pallet-beefy + bump: major +- name: pallet-election-provider-multi-block + bump: major +- name: pallet-election-provider-multi-phase + bump: major +- name: pallet-grandpa + bump: major +- name: pallet-im-online + bump: major +- name: pallet-mixnet + bump: major +- name: pallet-offences-benchmarking + bump: major diff --git a/prdoc/pr_7691.prdoc b/prdoc/pr_7691.prdoc new file mode 100644 index 0000000000000..4052a66f50e76 --- /dev/null +++ b/prdoc/pr_7691.prdoc @@ -0,0 +1,26 @@ +title: Ensure Consistent Topic IDs for Traceable Cross-Chain XCM +doc: +- audience: Runtime Dev + description: |- + This PR ensures every XCM processed with the same topic ID across multiple chains to improve traceability. +crates: +- name: emulated-integration-tests-common + bump: patch +- name: pallet-xcm + bump: patch +- name: staging-xcm + bump: patch +- name: staging-xcm-builder + bump: patch +- name: staging-xcm-executor + bump: patch +- name: xcm-runtime-apis + bump: patch +- name: xcm-simulator + bump: patch +- name: xcm-simulator-example + bump: patch +- name: asset-hub-westend-integration-tests + bump: none +- name: bridge-hub-westend-integration-tests + bump: none \ No newline at end of file diff --git a/prdoc/pr_7785.prdoc b/prdoc/pr_7785.prdoc new file mode 100644 index 0000000000000..f75c8181fa09b --- /dev/null +++ b/prdoc/pr_7785.prdoc @@ -0,0 +1,22 @@ +title: 'pallet scheduler: fix weight and add safety checks' +doc: +- audience: Runtime Dev + description: |- + Changes: + - Add runtime integrity test for scheduler pallet to ensure that lookups use sensible weights + - Check all passed storage names in the omni bencher to be known by FRAME metadata + - Trim storage names in omni bencher to fix V1 bench syntax bug + - Fix V1 bench syntax storage name sanitization for specific Rust versions + + I re-ran the benchmarks with the omni-bencher modifications and it did not change the [proof size](https://weights.tasty.limo/compare?repo=polkadot-sdk&threshold=1&path_pattern=substrate%2Fframe%2F**%2Fsrc%2Fweights.rs%2Cpolkadot%2Fruntime%2F*%2Fsrc%2Fweights%2F**%2F*.rs%2Cpolkadot%2Fbridges%2Fmodules%2F*%2Fsrc%2Fweights.rs%2Ccumulus%2F**%2Fweights%2F*.rs%2Ccumulus%2F**%2Fweights%2Fxcm%2F*.rs%2Ccumulus%2F**%2Fsrc%2Fweights.rs&method=asymptotic&ignore_errors=true&unit=proof&old=cc0142510b81dcf1c1a22f7dc164c453c25287e6&new=bb19d78821eaeaf2262f6a23ee45f83dd4f94d29). I reverted [the commit](https://github.com/paritytech/polkadot-sdk/pull/7785/commits/bb19d78821eaeaf2262f6a23ee45f83dd4f94d29) afterwards to reduce the noise for reviewers. +crates: +- name: frame-benchmarking-cli + bump: minor +- name: frame-benchmarking + bump: minor +- name: pallet-scheduler + bump: minor +- name: asset-hub-westend-runtime + bump: minor +- name: westend-runtime + bump: minor diff --git a/prdoc/pr_8069.prdoc b/prdoc/pr_8069.prdoc new file mode 100644 index 0000000000000..45f9c6d3d4348 --- /dev/null +++ b/prdoc/pr_8069.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Benchmark storage access on block validation + +doc: + - audience: [Runtime Dev, Node Dev] + description: | + Adds checking storage weights on block validation for both read and write benchmarks. + +crates: + - name: cumulus-pallet-parachain-system + bump: minor + - name: frame-benchmarking-cli + bump: minor + - name: frame-storage-access-test-runtime + bump: major diff --git a/prdoc/pr_8230.prdoc b/prdoc/pr_8230.prdoc new file mode 100644 index 0000000000000..feee621cefc2a --- /dev/null +++ b/prdoc/pr_8230.prdoc @@ -0,0 +1,19 @@ +title: add parachain block validation latency metrics and logs +doc: +- audience: Node Dev + description: | + This change introduces a few metrics (and corresponding logs) to track the state of collations: + - time till collation fetched + - backing latency (counting from RP) + - backing latency (counting from collation fetch) + - inclusion latency + - expired collations (not backed, not advertised, not fetched) + + These metrics are useful to determine the reliability of parachain block production and validation. +crates: +- name: polkadot-collator-protocol + bump: patch +- name: polkadot-network-bridge + bump: patch +- name: polkadot-node-subsystem-util + bump: minor diff --git a/prdoc/pr_8234.prdoc b/prdoc/pr_8234.prdoc new file mode 100644 index 0000000000000..6b9ad38b4f158 --- /dev/null +++ b/prdoc/pr_8234.prdoc @@ -0,0 +1,29 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Set a memory limit when decoding an `UncheckedExtrinsic` + +doc: + - audience: Runtime Dev + description: | + This PR sets a 16 MiB heap memory limit when decoding an `UncheckedExtrinsic`. + The `ExtrinsicCall` trait has been moved from `frame-support` to `sp-runtime`. + The `EnsureInherentsAreFirst` trait has been removed and the checking logic has been moved to `frame-executive`. + +crates: + - name: frame-support + bump: major + - name: frame-support-procedural + bump: patch + - name: sp-runtime + bump: minor + - name: cumulus-pallet-parachain-system + bump: patch + - name: frame-executive + bump: minor + - name: pallet-revive + bump: minor + - name: asset-hub-westend-runtime + bump: patch + - name: penpal-runtime + bump: patch diff --git a/prdoc/pr_8265.prdoc b/prdoc/pr_8265.prdoc new file mode 100644 index 0000000000000..8ae598166964e --- /dev/null +++ b/prdoc/pr_8265.prdoc @@ -0,0 +1,44 @@ +title: '[FRAME] Omni bencher fixes' +doc: +- audience: Runtime Dev + description: |- + Changes: + - Add `--pallets` option to selectively run multiple pallets. In the past we only had `--pallet` to run a single one. + - Add `--exclude-extrinsics [pallet::extrinsic]` to add (Pallet,Extrinsic) tuples that should be excluded. + - Fix storage overlay reversion *before* the benchmark runs. + - Test root hash for V2 benchmarks to be deterministic + - Changed DB reps to 1 for speedup since it should not be needed to run multiple times. (TODO test) + + Checked that it fixes the Kusama benchmark issue when [patching](https://github.com/ggwpez/substrate-scripts/blob/master/diener.py) to a fixed stable2412 fork: + + (before) + ```pre + The following 5 benchmarks failed: + - pallet_nomination_pools::apply_slash_fail + - pallet_nomination_pools::migrate_delegation + - pallet_nomination_pools::pool_migrate + - pallet_offences::report_offence_babe + - pallet_offences::report_offence_grandpa + Error: Input("5 benchmarks failed") + ``` + + (after) + ```pre + The following 1 benchmarks failed: + - pallet_nomination_pools::apply_slash_fail + Error: Input("1 benchmarks failed") + ``` + This one needs fixing but is not breaking the other ones anymore. +crates: +- name: frame-benchmarking-cli + bump: major +- name: frame-support-procedural + bump: patch +- name: frame-benchmarking-pallet-pov + bump: patch +- name: frame-benchmarking + bump: patch +- name: pallet-staking + bump: patch +- name: frame-omni-bencher + bump: minor diff --git a/prdoc/pr_8344.prdoc b/prdoc/pr_8344.prdoc new file mode 100644 index 0000000000000..2a3a48066d726 --- /dev/null +++ b/prdoc/pr_8344.prdoc @@ -0,0 +1,32 @@ +title: 'XCMP weight metering: account for the MQ page position' +doc: +- audience: Runtime Dev + description: |- + This PR introduces some XCMP weight metering improvements. +crates: +- name: cumulus-pallet-xcmp-queue + bump: major +- name: pallet-message-queue + bump: patch +- name: frame-support + bump: major +- name: asset-hub-rococo-runtime + bump: patch +- name: asset-hub-westend-runtime + bump: patch +- name: bridge-hub-rococo-runtime + bump: patch +- name: bridge-hub-westend-runtime + bump: patch +- name: collectives-westend-runtime + bump: patch +- name: coretime-rococo-runtime + bump: patch +- name: coretime-westend-runtime + bump: patch +- name: people-rococo-runtime + bump: patch +- name: people-westend-runtime + bump: patch +- name: pallet-staking-async-parachain-runtime + bump: patch diff --git a/prdoc/pr_8345.prdoc b/prdoc/pr_8345.prdoc new file mode 100644 index 0000000000000..886e8b17cd123 --- /dev/null +++ b/prdoc/pr_8345.prdoc @@ -0,0 +1,26 @@ +title: 'tx/metrics: Add metrics for the RPC v2 `transactionWatch_v1_submitAndWatch`' +doc: +- audience: Node Operator + description: |- + This PR adds metrics for the following RPC subscription: [transactionWatch_v1_submitAndWatch](https://paritytech.github.io/json-rpc-interface-spec/api/transactionWatch_v1_submitAndWatch.html) + + Metrics are exposed in two ways: + - simple counters of how many events we've seen globally + - a histogram vector of execution times, which is labeled by `initial event` -> `final event` + - This helps us identify how long it takes the transaction pool to advance the state of the events, and further debug issues + + Part of: https://github.com/paritytech/polkadot-sdk/issues/8336 + + ### (outdated) PoC Dashboards + + ![Screenshot 2025-04-28 at 17 50 48](https://github.com/user-attachments/assets/9fd0bf30-a321-4362-a10b-dfc3de1eb474) + + + ### Next steps + - [x] initial dashboards with a live node + - [x] adjust testing +crates: +- name: sc-service + bump: major +- name: sc-rpc-spec-v2 + bump: major diff --git a/prdoc/pr_8382.prdoc b/prdoc/pr_8382.prdoc new file mode 100644 index 0000000000000..a0941c6c6d2b0 --- /dev/null +++ b/prdoc/pr_8382.prdoc @@ -0,0 +1,9 @@ +title: add poke_deposit extrinsic to pallet-bounties +doc: +- audience: Runtime Dev + description: This PR adds a new extrinsic `poke_deposit` to `pallet-bounties`. This extrinsic will be used to re-adjust the deposits made in the pallet to create a new bounty. +crates: +- name: rococo-runtime + bump: major +- name: pallet-bounties + bump: major diff --git a/prdoc/pr_8473.prdoc b/prdoc/pr_8473.prdoc new file mode 100644 index 0000000000000..f4a2294249454 --- /dev/null +++ b/prdoc/pr_8473.prdoc @@ -0,0 +1,35 @@ +title: 'Snowbridge: Remove asset location check' +doc: +- audience: Runtime Dev + description: |- + Since the TokenIdOf conversion is XCM version-agnostic and we store the TokenId as the key in storage, + checking whether the key exists is sufficient to verify if the token is registered. + There is no need to verify the asset location. +crates: +- name: snowbridge-outbound-queue-primitives + bump: patch + validate: false +- name: snowbridge-inbound-queue-primitives + bump: patch + validate: false +- name: snowbridge-test-utils + bump: patch + validate: false +- name: snowbridge-pallet-inbound-queue + bump: patch + validate: false +- name: snowbridge-pallet-inbound-queue-v2 + bump: patch + validate: false +- name: snowbridge-pallet-system + bump: patch + validate: false +- name: snowbridge-pallet-system-v2 + bump: patch + validate: false +- name: bridge-hub-westend-runtime + bump: patch + validate: false +- name: bridge-hub-westend-integration-tests + bump: patch + validate: false diff --git a/prdoc/pr_8500.prdoc b/prdoc/pr_8500.prdoc new file mode 100644 index 0000000000000..b08333ef2e08a --- /dev/null +++ b/prdoc/pr_8500.prdoc @@ -0,0 +1,9 @@ +title: 'txpool: fix tx removal from unlocks set' +doc: +- audience: Node Dev + description: |- + Now removing a tx subtree will correctly remove it from the transactions that would unlock it. + +crates: +- name: sc-transaction-pool + bump: major diff --git a/prdoc/pr_8504.prdoc b/prdoc/pr_8504.prdoc new file mode 100644 index 0000000000000..762d407e8582f --- /dev/null +++ b/prdoc/pr_8504.prdoc @@ -0,0 +1,74 @@ +title: Fix generated address returned by Substrate RPC runtime call +doc: +- audience: Runtime Dev + description: |- + ## Description + + When dry-running a contract deployment through the runtime API, the returned address does not match the actual address that will be used when the transaction is submitted. This inconsistency occurs because the address derivation logic doesn't properly account for the difference between transaction execution and dry-run execution contexts. + + The issue stems from the `create1` address derivation logic in `exec.rs`: + + ```rust + address::create1( + &deployer, + // the Nonce from the origin has been incremented pre-dispatch, so we + // need to subtract 1 to get the nonce at the time of the call. + if origin_is_caller { + account_nonce.saturating_sub(1u32.into()).saturated_into() + } else { + account_nonce.saturated_into() + }, + ) + ``` + + The code correctly subtracts 1 from the account nonce during a transaction execution (because the nonce is incremented pre-dispatch), but doesn't account for execution context - whether it's a real transaction or a dry run through the RPC. + + ## Review Notes + + This PR adds a new condition to check for the `ExecContext` when calculating the nonce for address derivation: + + ```rust + address::create1( + &deployer, + // the Nonce from the origin has been incremented pre-dispatch, so we + // need to subtract 1 to get the nonce at the time of the call. + if origin_is_caller && matches!(exec_context, ExecContext::Transaction) { + account_nonce.saturating_sub(1u32.into()).saturated_into() + } else { + account_nonce.saturated_into() + }, + ) + ``` + + A new test `nonce_not_incremented_in_dry_run()` has been added to verify the behavior. + + ## Before Fix + + - Dry-run contract deployment returns address derived with nonce N + - Actual transaction deployment creates contract at address derived with nonce N-1 + - Result: Inconsistent addresses between simulation and actual execution + + ## After Fix + + - Dry-run and actual transaction deployments both create contracts at the same address + - Result: Consistent contract addresses regardless of execution context + - Added test case to verify nonce handling in different execution contexts + + This fix ensures that users can rely on the address returned by a dry run to match the actual address that will be used when the transaction is submitted. + + Fixes https://github.com/paritytech/contract-issues/issues/37 + + # Checklist + + * [x] My PR includes a detailed description as outlined in the "Description" and its two subsections above. + * [x] My PR follows the [labeling requirements]( + https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md#Process + ) of this project (at minimum one label for `T` required) + * External contributors: ask maintainers to put the right label on your PR. + * [x] I have made corresponding changes to the documentation (if applicable) + * [x] I have added tests that prove my fix is effective or that my feature works (if applicable) +crates: +- name: asset-hub-westend-runtime + bump: patch +- name: pallet-revive + bump: major diff --git a/prdoc/pr_8533.prdoc b/prdoc/pr_8533.prdoc new file mode 100644 index 0000000000000..1c08e9db5349b --- /dev/null +++ b/prdoc/pr_8533.prdoc @@ -0,0 +1,8 @@ +title: '`fatxpool`: add fallback for ready at light' +doc: +- audience: Node Dev + description: | + Add fallback for `ready_at_light` for the case of not finding a best view that can be used to return a set of ready transactions. Optimised as well how the best view is searched. +crates: +- name: sc-transaction-pool + bump: major diff --git a/prdoc/pr_8559.prdoc b/prdoc/pr_8559.prdoc new file mode 100644 index 0000000000000..64aea15b7e27c --- /dev/null +++ b/prdoc/pr_8559.prdoc @@ -0,0 +1,11 @@ +title: '[pallet-revive] rename DepositLimit::Unchecked & minor code cleanup ' +doc: +- audience: Runtime Dev + description: |- + minor cleanups for pallet-revive eth-rpc + rename DepositLimit::Unchecked to DepositLimit::UnsafeOnlyForDryRun +crates: +- name: pallet-revive-eth-rpc + bump: patch +- name: pallet-revive + bump: major diff --git a/prdoc/pr_8585.prdoc b/prdoc/pr_8585.prdoc new file mode 100644 index 0000000000000..65f984ce4f4f1 --- /dev/null +++ b/prdoc/pr_8585.prdoc @@ -0,0 +1,16 @@ +title: fix epmb solution duplicate issue + add remote mining apparatus to epm +doc: +- audience: Runtime Dev + description: | + Prevents the NPoS election process from accepting duplicate voters and targets. +crates: +- name: pallet-election-provider-multi-block + bump: patch +- name: pallet-election-provider-multi-phase + bump: minor +- name: frame-election-provider-solution-type + bump: patch +- name: frame-election-provider-support + bump: patch +- name: sp-npos-elections + bump: major diff --git a/prdoc/pr_8587.prdoc b/prdoc/pr_8587.prdoc new file mode 100644 index 0000000000000..a9d9fc8662e95 --- /dev/null +++ b/prdoc/pr_8587.prdoc @@ -0,0 +1,12 @@ +title: '[pallet-revive] make subscription task panic on error' +doc: +- audience: Runtime Dev + description: |- + - subscription task are "essential tasks" and the service should go down when they fail. + - Upgrade subxt to 0.41 + - Update zombienet-sdk to use the subxt re-export of subxt so it does not conflict with the workspace version +crates: +- name: pallet-revive-eth-rpc + bump: patch +- name: frame-benchmarking-cli + bump: patch diff --git a/prdoc/pr_8606.prdoc b/prdoc/pr_8606.prdoc new file mode 100644 index 0000000000000..493cad93dcc7b --- /dev/null +++ b/prdoc/pr_8606.prdoc @@ -0,0 +1,10 @@ +title: Use hashbrown hashmap/hashset in validation context +doc: +- audience: Node Dev + description: |- + Discovered while profiling https://github.com/paritytech/polkadot-sdk/issues/6131#issuecomment-2891523233 with the benchmark https://github.com/paritytech/polkadot-sdk/pull/8069 that when running in validation a big chunk of the time is spent inserting and retrieving data from the BTreeMap/BTreeSet. + + By switching to hashbrown HashMap/HashSet in validation TrieCache and TrieRecorder and the memory-db https://github.com/paritytech/trie/pull/221 read costs improve with around ~40% and write with about ~20% +crates: +- name: cumulus-pallet-parachain-system + bump: minor diff --git a/prdoc/pr_8615.prdoc b/prdoc/pr_8615.prdoc new file mode 100644 index 0000000000000..1f8845b01ea66 --- /dev/null +++ b/prdoc/pr_8615.prdoc @@ -0,0 +1,8 @@ +title: 'Bridges: Fix - Improve try-state for pallet-xcm-bridge-hub' +doc: +- audience: Runtime Dev + description: |- + Improve try-state for pallet-xcm-bridge-hub, it removes try_as and uses try_into implementation instead. +crates: +- name: pallet-xcm-bridge-hub + bump: patch diff --git a/prdoc/pr_8648.prdoc b/prdoc/pr_8648.prdoc new file mode 100644 index 0000000000000..e566158170083 --- /dev/null +++ b/prdoc/pr_8648.prdoc @@ -0,0 +1,10 @@ +title: Charge deposit based on key length +doc: +- audience: Runtime Dev + description: We were only charging storage deposit based on value length but not + based on key length. Since we allow for variable length keys this has to be done. + Needs to be back ported since changing this in an already deployed system will + be nasty. +crates: +- name: pallet-revive + bump: patch diff --git a/prdoc/pr_8650.prdoc b/prdoc/pr_8650.prdoc new file mode 100644 index 0000000000000..a2a1a8d06f074 --- /dev/null +++ b/prdoc/pr_8650.prdoc @@ -0,0 +1,25 @@ +title: 'litep2p/peerset: Reject non-reserved peers in the reserved-only mode' +doc: +- audience: Node Operator + description: |- + This PR rejects non-reserved peers in the reserved-only mode of the litep2p notification peerset. + + Previously, litep2p ignored completely the reserved-only state while accepting inbound connections. However, it handled it properly during the slot allocation phase. + - the main changes are in the `report_inbound_substream` function, which now propagated a `Rejected` response to litep2p on the reserved-only state + - in response, litep2p should never open an inbound substream after receiving the rejected response + - the state of peers is not advanced while in `Disconnected` or `Backoff` states + - the opening state is moved to `Cancelled` + - for consistency purposes (and fuzzing purposes), the `report_substream_opened` is more robustly handling the `Disconnected` state + - while at it have replaced a panic with `debug_assert` and an instant reject + + ## Testing Done + - started 2 nodes in Kusama and Polkadot with litep2p + - added the `reserved_only_rejects_non_reserved_peers` test to ensure litep2p handles peers properly from different states + + + This PR has been extracted from https://github.com/paritytech/polkadot-sdk/pull/8461 to ease the review process + + cc @paritytech/networking +crates: +- name: sc-network + bump: patch diff --git a/prdoc/pr_8652.prdoc b/prdoc/pr_8652.prdoc new file mode 100644 index 0000000000000..fdc8256140dcd --- /dev/null +++ b/prdoc/pr_8652.prdoc @@ -0,0 +1,10 @@ +title: '[pallet-revive] impl_revive_api macro' +doc: +- audience: Runtime Dev + description: Move pallet-revive runtime api implementation in a macro, so that we + don't repeat the code for every runtime. +crates: +- name: asset-hub-westend-runtime + bump: patch +- name: pallet-revive + bump: minor diff --git a/prdoc/pr_8664.prdoc b/prdoc/pr_8664.prdoc new file mode 100644 index 0000000000000..f195543c1ae48 --- /dev/null +++ b/prdoc/pr_8664.prdoc @@ -0,0 +1,10 @@ +title: '[pallet-revive] Fix rpc-types' +doc: +- audience: Runtime Dev + description: |- + Update rpc-types + - Remove unnecessary derive traits + - Fix json decoding for `BlockNumberOrTagOrHash` +crates: +- name: pallet-revive + bump: patch diff --git a/prdoc/pr_8667.prdoc b/prdoc/pr_8667.prdoc new file mode 100644 index 0000000000000..82d47c21b54c4 --- /dev/null +++ b/prdoc/pr_8667.prdoc @@ -0,0 +1,17 @@ +title: 'revive: Simplify the storage meter' +doc: +- audience: Runtime Dev + description: |- + Historically, the collection of storage deposits was running in an infallible context. Meaning we needed to make sure that the caller was able to pay the deposits when the last contract execution returns. To achieve that, we capped the storage deposit limit to the maximum balance of the origin. This made the code more complex: It conflated the deposit **limit** with the amount of balance the origin has. + + In the meantime, we changed code around to make the deposit collection fallible. But never changed this aspect. + + This PR rectifies that by doing: + - The root storage meter and all its nested meter's limits are completely independent of the origin's balance. This makes it way easier to argue about the limit that a nested meter has at any point. + - Consistently use `StorageDepositNotEnoughFunds` (limit not reached) and `StorageDepositLimitExhausted` (limit reached). + - Origin not being able to pay the ed for a new account is now `StorageDepositNotEnoughFunds` and traps the caller rather then being a `TransferFailed` return code. Important since we are hiding the ed from contracts. So it should also not be an error code that must be handled. + + Im preparation for: https://github.com/paritytech/contract-issues/issues/38 +crates: +- name: pallet-revive + bump: patch diff --git a/prdoc/pr_8669.prdoc b/prdoc/pr_8669.prdoc new file mode 100644 index 0000000000000..9ff187e8ff152 --- /dev/null +++ b/prdoc/pr_8669.prdoc @@ -0,0 +1,12 @@ +title: 'cumulus-aura: Improve equivocation checks' +doc: +- audience: Node Dev + description: |- + Instead of just checking for the slot, we also take the block number and the relay parent into account (as we actually allow to build multiple blocks per slot). Then this pr also ensures that we are still able to import blocks from availability recovery. This ensures that a network doesn't get stuck on a storm of equivocations. The next step after this pull request would be to implement on chain slashing for equivocations and probably disabling of the offending author. +crates: +- name: cumulus-client-consensus-aura + bump: patch +- name: cumulus-client-pov-recovery + bump: none +- name: cumulus-pallet-parachain-system + bump: none diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs index da82729dbec0f..c5a8663337157 100644 --- a/substrate/bin/node/cli/benches/block_production.rs +++ b/substrate/bin/node/cli/benches/block_production.rs @@ -72,6 +72,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { keystore: KeystoreConfig::InMemory, database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, trie_cache_maximum_size: Some(64 * 1024 * 1024), + warm_up_trie_cache: None, state_pruning: Some(PruningMode::ArchiveAll), blocks_pruning: BlocksPruning::KeepAll, chain_spec: spec, diff --git a/substrate/bin/node/cli/benches/transaction_pool.rs b/substrate/bin/node/cli/benches/transaction_pool.rs index c07cb3ec0d13c..12d1340c0ba0a 100644 --- a/substrate/bin/node/cli/benches/transaction_pool.rs +++ b/substrate/bin/node/cli/benches/transaction_pool.rs @@ -63,6 +63,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { keystore: KeystoreConfig::InMemory, database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, trie_cache_maximum_size: Some(64 * 1024 * 1024), + warm_up_trie_cache: None, state_pruning: Some(PruningMode::ArchiveAll), blocks_pruning: BlocksPruning::KeepAll, chain_spec: spec, diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 63463c4479ebd..c7b59bf4e1144 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -40,7 +40,7 @@ use frame_election_provider_support::{ }; use frame_support::{ derive_impl, - dispatch::{DispatchClass, DispatchInfo}, + dispatch::DispatchClass, dynamic_params::{dynamic_pallet_params, dynamic_params}, genesis_builder_helper::{build_state, get_preset}, instances::{Instance1, Instance2}, @@ -85,10 +85,8 @@ use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use pallet_nfts::PalletFeatures; use pallet_nis::WithMaximumOf; use pallet_nomination_pools::PoolId; -use pallet_revive::{evm::runtime::EthExtra, AddressMapper}; +use pallet_revive::evm::runtime::EthExtra; use pallet_session::historical as pallet_session_historical; -use sp_core::U256; -use sp_runtime::traits::TransactionExtension; // Can't use `FungibleAdapter` here until Treasury pallet migrates to fungibles // use pallet_broker::TaskId; @@ -103,7 +101,7 @@ use sp_consensus_beefy::{ mmr::MmrLeafVersion, }; use sp_consensus_grandpa::AuthorityId as GrandpaId; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata, H160}; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_inherents::{CheckInherentsResult, InherentData}; use sp_runtime::{ curve::PiecewiseLinear, @@ -1571,11 +1569,11 @@ where } } -impl frame_system::offchain::CreateInherent for Runtime +impl frame_system::offchain::CreateBare for Runtime where RuntimeCall: From, { - fn create_inherent(call: RuntimeCall) -> UncheckedExtrinsic { + fn create_bare(call: RuntimeCall) -> UncheckedExtrinsic { generic::UncheckedExtrinsic::new_bare(call).into() } } @@ -2796,16 +2794,6 @@ mod runtime { pub type MetaTx = pallet_meta_tx::Pallet; } -impl TryFrom for pallet_revive::Call { - type Error = (); - - fn try_from(value: RuntimeCall) -> Result { - match value { - RuntimeCall::Revive(call) => Ok(call), - _ => Err(()), - } - } -} /// The address format for describing accounts. pub type Address = sp_runtime::MultiAddress; /// Block header type as expected by this runtime. @@ -3063,7 +3051,11 @@ mod benches { ); } -impl_runtime_apis! { +pallet_revive::impl_runtime_apis_plus_revive!( + Runtime, + Executive, + EthExtraImpl, + impl sp_api::Core for Runtime { fn version() -> RuntimeVersion { VERSION @@ -3380,181 +3372,6 @@ impl_runtime_apis! { } } - impl pallet_revive::ReviveApi for Runtime - { - fn balance(address: H160) -> U256 { - Revive::evm_balance(&address) - } - - fn block_gas_limit() -> U256 { - Revive::evm_block_gas_limit() - } - - fn gas_price() -> U256 { - Revive::evm_gas_price() - } - - fn nonce(address: H160) -> Nonce { - let account = ::AddressMapper::to_account_id(&address); - System::account_nonce(account) - } - - fn eth_transact(tx: pallet_revive::evm::GenericTransaction) -> Result, pallet_revive::EthTransactError> - { - let blockweights: BlockWeights = ::BlockWeights::get(); - let tx_fee = |pallet_call, mut dispatch_info: DispatchInfo| { - let call = RuntimeCall::Revive(pallet_call); - dispatch_info.extension_weight = EthExtraImpl::get_eth_extension(0, 0u32.into()).weight(&call); - let uxt: UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic::new_bare(call).into(); - - pallet_transaction_payment::Pallet::::compute_fee( - uxt.encoded_size() as u32, - &dispatch_info, - 0u32.into(), - ) - }; - - Revive::bare_eth_transact(tx, blockweights.max_block, tx_fee) - } - - fn call( - origin: AccountId, - dest: H160, - value: Balance, - gas_limit: Option, - storage_deposit_limit: Option, - input_data: Vec, - ) -> pallet_revive::ContractResult { - Revive::bare_call( - RuntimeOrigin::signed(origin), - dest, - value, - gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block), - pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), - input_data, - ) - } - - fn instantiate( - origin: AccountId, - value: Balance, - gas_limit: Option, - storage_deposit_limit: Option, - code: pallet_revive::Code, - data: Vec, - salt: Option<[u8; 32]>, - ) -> pallet_revive::ContractResult - { - Revive::bare_instantiate( - RuntimeOrigin::signed(origin), - value, - gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block), - pallet_revive::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), - code, - data, - salt, - ) - } - - fn upload_code( - origin: AccountId, - code: Vec, - storage_deposit_limit: Option, - ) -> pallet_revive::CodeUploadResult - { - Revive::bare_upload_code( - RuntimeOrigin::signed(origin), - code, - storage_deposit_limit.unwrap_or(u128::MAX), - ) - } - - fn get_storage_var_key( - address: H160, - key: Vec, - ) -> pallet_revive::GetStorageResult { - Revive::get_storage_var_key( - address, - key - ) - } - - fn get_storage( - address: H160, - key: [u8; 32], - ) -> pallet_revive::GetStorageResult { - Revive::get_storage( - address, - key - ) - } - - fn trace_block( - block: Block, - tracer_type: pallet_revive::evm::TracerType, - ) -> Vec<(u32, pallet_revive::evm::Trace)> { - use pallet_revive::tracing::trace; - let mut tracer = Revive::evm_tracer(tracer_type); - let mut traces = vec![]; - let (header, extrinsics) = block.deconstruct(); - Executive::initialize_block(&header); - for (index, ext) in extrinsics.into_iter().enumerate() { - trace(tracer.as_tracing(), || { - let _ = Executive::apply_extrinsic(ext); - }); - - if let Some(tx_trace) = tracer.collect_trace() { - traces.push((index as u32, tx_trace)); - } - } - - traces - } - - fn trace_tx( - block: Block, - tx_index: u32, - tracer_type: pallet_revive::evm::TracerType, - ) -> Option { - use pallet_revive::tracing::trace; - let mut tracer = Revive::evm_tracer(tracer_type); - let (header, extrinsics) = block.deconstruct(); - - Executive::initialize_block(&header); - for (index, ext) in extrinsics.into_iter().enumerate() { - if index as u32 == tx_index { - trace(tracer.as_tracing(), || { - let _ = Executive::apply_extrinsic(ext); - }); - break; - } else { - let _ = Executive::apply_extrinsic(ext); - } - } - - tracer.collect_trace() - } - - fn trace_call( - tx: pallet_revive::evm::GenericTransaction, - tracer_type: pallet_revive::evm::TracerType, - ) - -> Result - { - use pallet_revive::tracing::trace; - let mut tracer = Revive::evm_tracer(tracer_type); - let result = trace(tracer.as_tracing(), || Self::eth_transact(tx)); - - if let Some(trace) = tracer.collect_trace() { - Ok(trace) - } else if let Err(err) = result { - Err(err) - } else { - Ok(tracer.empty_trace()) - } - } - } - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< Block, Balance, @@ -3915,7 +3732,8 @@ impl_runtime_apis! { genesis_config_presets::preset_names() } } -} + +); #[cfg(test)] mod tests { diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs index 0d58a96cff4fa..45af839bb6938 100644 --- a/substrate/bin/node/testing/src/bench.rs +++ b/substrate/bin/node/testing/src/bench.rs @@ -53,7 +53,7 @@ use sp_core::{ use sp_crypto_hashing::blake2_256; use sp_inherents::InherentData; use sp_runtime::{ - generic::{self, ExtrinsicFormat, Preamble, EXTRINSIC_FORMAT_VERSION}, + generic::{self, ExtrinsicFormat, Preamble}, traits::{Block as BlockT, IdentifyAccount, Verify}, OpaqueExtrinsic, }; @@ -587,26 +587,21 @@ impl BenchKeyring { key.sign(b) } }); - generic::UncheckedExtrinsic { - preamble: Preamble::Signed( - sp_runtime::MultiAddress::Id(signed), - signature, - tx_ext, - ), - function: payload.0, - } + generic::UncheckedExtrinsic::new_signed( + payload.0, + sp_runtime::MultiAddress::Id(signed), + signature, + tx_ext, + ) .into() }, - ExtrinsicFormat::Bare => generic::UncheckedExtrinsic { - preamble: Preamble::Bare(EXTRINSIC_FORMAT_VERSION), - function: xt.function, - } - .into(), - ExtrinsicFormat::General(ext_version, tx_ext) => generic::UncheckedExtrinsic { - preamble: sp_runtime::generic::Preamble::General(ext_version, tx_ext), - function: xt.function, - } - .into(), + ExtrinsicFormat::Bare => generic::UncheckedExtrinsic::new_bare(xt.function).into(), + ExtrinsicFormat::General(ext_version, tx_ext) => + generic::UncheckedExtrinsic::from_parts( + xt.function, + Preamble::General(ext_version, tx_ext), + ) + .into(), } } diff --git a/substrate/bin/node/testing/src/keyring.rs b/substrate/bin/node/testing/src/keyring.rs index 8eb8d95c9896c..6b100a6477970 100644 --- a/substrate/bin/node/testing/src/keyring.rs +++ b/substrate/bin/node/testing/src/keyring.rs @@ -24,7 +24,7 @@ use node_primitives::{AccountId, Balance, Nonce}; use sp_core::{crypto::get_public_from_string_or_panic, ecdsa, ed25519, sr25519}; use sp_crypto_hashing::blake2_256; use sp_keyring::Sr25519Keyring; -use sp_runtime::generic::{self, Era, ExtrinsicFormat, EXTRINSIC_FORMAT_VERSION}; +use sp_runtime::generic::{self, Era, ExtrinsicFormat}; /// Alice's account id. pub fn alice() -> AccountId { @@ -121,25 +121,19 @@ pub fn sign( } }) .into(); - generic::UncheckedExtrinsic { - preamble: sp_runtime::generic::Preamble::Signed( - sp_runtime::MultiAddress::Id(signed), - signature, - tx_ext, - ), - function: payload.0, - } + generic::UncheckedExtrinsic::new_signed( + payload.0, + sp_runtime::MultiAddress::Id(signed), + signature, + tx_ext, + ) .into() }, - ExtrinsicFormat::Bare => generic::UncheckedExtrinsic { - preamble: sp_runtime::generic::Preamble::Bare(EXTRINSIC_FORMAT_VERSION), - function: xt.function, - } - .into(), - ExtrinsicFormat::General(ext_version, tx_ext) => generic::UncheckedExtrinsic { - preamble: sp_runtime::generic::Preamble::General(ext_version, tx_ext), - function: xt.function, - } + ExtrinsicFormat::Bare => generic::UncheckedExtrinsic::new_bare(xt.function).into(), + ExtrinsicFormat::General(ext_version, tx_ext) => generic::UncheckedExtrinsic::from_parts( + xt.function, + generic::Preamble::General(ext_version, tx_ext), + ) .into(), } } diff --git a/substrate/client/cli/src/config.rs b/substrate/client/cli/src/config.rs index 59238b3307cf2..d456a4072d0e0 100644 --- a/substrate/client/cli/src/config.rs +++ b/substrate/client/cli/src/config.rs @@ -252,6 +252,16 @@ pub trait CliConfiguration: Sized { Ok(self.import_params().map(|x| x.trie_cache_maximum_size()).unwrap_or_default()) } + /// Get if we should warm up the trie cache. + /// + /// By default this is retrieved from `ImportParams` if it is available. Otherwise its `None`. + fn warm_up_trie_cache(&self) -> Result> { + Ok(self + .import_params() + .map(|x| x.warm_up_trie_cache().map(|x| x.into())) + .unwrap_or_default()) + } + /// Get the state pruning mode. /// /// By default this is retrieved from `PruningMode` if it is available. Otherwise its @@ -528,6 +538,7 @@ pub trait CliConfiguration: Sized { database: self.database_config(&config_dir, database_cache_size, database)?, data_path: config_dir, trie_cache_maximum_size: self.trie_cache_maximum_size()?, + warm_up_trie_cache: self.warm_up_trie_cache()?, state_pruning: self.state_pruning()?, blocks_pruning: self.blocks_pruning()?, executor: ExecutorConfiguration { diff --git a/substrate/client/cli/src/params/import_params.rs b/substrate/client/cli/src/params/import_params.rs index e4b8b9644febc..236907957df67 100644 --- a/substrate/client/cli/src/params/import_params.rs +++ b/substrate/client/cli/src/params/import_params.rs @@ -23,7 +23,7 @@ use crate::{ }, params::{DatabaseParams, PruningParams}, }; -use clap::Args; +use clap::{Args, ValueEnum}; use std::path::PathBuf; /// Parameters for block import. @@ -80,6 +80,38 @@ pub struct ImportParams { /// Providing `0` will disable the cache. #[arg(long, value_name = "Bytes", default_value_t = 1024 * 1024 * 1024)] pub trie_cache_size: usize, + + /// Warm up the trie cache. + /// + /// No warmup if flag is not present. Using flag without value chooses non-blocking warmup. + #[arg(long, value_name = "STRATEGY", value_enum, num_args = 0..=1, default_missing_value = "non-blocking")] + pub warm_up_trie_cache: Option, +} + +/// Warmup strategy for the trie cache. +#[derive(Debug, Clone, Copy, ValueEnum)] +pub enum TrieCacheWarmUpStrategy { + /// Warm up the cache in a non-blocking way. + #[clap(name = "non-blocking")] + NonBlocking, + /// Warm up the cache in a blocking way (not recommended for production use). + /// + /// When enabled, the trie cache warm-up will block the node startup until complete. + /// This is not recommended for production use as it can significantly delay node startup. + /// Only enable this option for testing or debugging purposes. + #[clap(name = "blocking")] + Blocking, +} + +impl From for sc_service::config::TrieCacheWarmUpStrategy { + fn from(strategy: TrieCacheWarmUpStrategy) -> Self { + match strategy { + TrieCacheWarmUpStrategy::NonBlocking => + sc_service::config::TrieCacheWarmUpStrategy::NonBlocking, + TrieCacheWarmUpStrategy::Blocking => + sc_service::config::TrieCacheWarmUpStrategy::Blocking, + } + } } impl ImportParams { @@ -92,6 +124,11 @@ impl ImportParams { } } + /// Specify if we should warm up the trie cache. + pub fn warm_up_trie_cache(&self) -> Option { + self.warm_up_trie_cache + } + /// Get the WASM execution method from the parameters pub fn wasm_method(&self) -> sc_service::config::WasmExecutionMethod { self.execution_strategies.check_usage_and_print_deprecation_warning(); diff --git a/substrate/client/cli/src/runner.rs b/substrate/client/cli/src/runner.rs index 9c5834d8d80ae..2cc55f2fccd01 100644 --- a/substrate/client/cli/src/runner.rs +++ b/substrate/client/cli/src/runner.rs @@ -252,6 +252,7 @@ mod tests { keystore: sc_service::config::KeystoreConfig::InMemory, database: sc_client_db::DatabaseSource::ParityDb { path: root.clone() }, trie_cache_maximum_size: None, + warm_up_trie_cache: None, state_pruning: None, blocks_pruning: sc_client_db::BlocksPruning::KeepAll, chain_spec: Box::new( diff --git a/substrate/client/consensus/babe/src/tests.rs b/substrate/client/consensus/babe/src/tests.rs index 5c2e0eae959c1..70fb5a5529d98 100644 --- a/substrate/client/consensus/babe/src/tests.rs +++ b/substrate/client/consensus/babe/src/tests.rs @@ -423,6 +423,7 @@ async fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + ' .await; } +#[cfg(ignore_flaky_test)] // https://github.com/paritytech/polkadot-sdk/issues/48 #[tokio::test] async fn authoring_blocks() { run_one_test(|_, _| ()).await; @@ -441,6 +442,7 @@ async fn rejects_missing_inherent_digest() { .await; } +#[cfg(ignore_flaky_test)] // https://github.com/paritytech/polkadot-sdk/issues/48 #[tokio::test] #[should_panic(expected = "has a bad seal")] async fn rejects_missing_seals() { diff --git a/substrate/client/db/Cargo.toml b/substrate/client/db/Cargo.toml index 6cf2680d5e809..4c7296032f2b9 100644 --- a/substrate/client/db/Cargo.toml +++ b/substrate/client/db/Cargo.toml @@ -43,6 +43,7 @@ sp-database = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } sp-trie = { workspace = true, default-features = true } +sysinfo = { workspace = true } [dev-dependencies] array-bytes = { workspace = true, default-features = true } diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index 7282a3ad7a651..5e8fa18fe86df 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -1239,6 +1239,22 @@ impl Backend { let offchain_storage = offchain::LocalStorage::new(db.clone()); + let shared_trie_cache = config.trie_cache_maximum_size.map(|maximum_size| { + let system_memory = sysinfo::System::new_all(); + let used_memory = system_memory.used_memory(); + let total_memory = system_memory.total_memory(); + + debug!("Initializing shared trie cache with size {} bytes, {}% of total memory", maximum_size, (maximum_size as f64 / total_memory as f64 * 100.0)); + if maximum_size as u64 > total_memory - used_memory { + warn!( + "Not enough memory to initialize shared trie cache. Cache size: {} bytes. System memory: used {} bytes, total {} bytes", + maximum_size, used_memory, total_memory, + ); + } + + SharedTrieCache::new(sp_trie::cache::CacheSize::new(maximum_size), config.metrics_registry.as_ref()) + }); + let backend = Backend { storage: Arc::new(storage_db), offchain_storage, @@ -1250,12 +1266,7 @@ impl Backend { state_usage: Arc::new(StateUsageStats::new()), blocks_pruning: config.blocks_pruning, genesis_state: RwLock::new(None), - shared_trie_cache: config.trie_cache_maximum_size.map(|maximum_size| { - SharedTrieCache::new( - sp_trie::cache::CacheSize::new(maximum_size), - config.metrics_registry.as_ref(), - ) - }), + shared_trie_cache, }; // Older DB versions have no last state key. Check if the state is available and set it. diff --git a/substrate/client/keystore/src/local.rs b/substrate/client/keystore/src/local.rs index 180b167202439..83d784b799d09 100644 --- a/substrate/client/keystore/src/local.rs +++ b/substrate/client/keystore/src/local.rs @@ -38,7 +38,7 @@ use sp_core::bandersnatch; } sp_keystore::bls_experimental_enabled! { -use sp_core::{bls381, ecdsa_bls381, KeccakHasher}; +use sp_core::{bls381, ecdsa_bls381, KeccakHasher, proof_of_possession::ProofOfPossessionGenerator}; } use crate::{Error, Result}; @@ -141,6 +141,21 @@ impl LocalKeystore { .map(|pair| pair.vrf_pre_output(input)); Ok(pre_output) } + + sp_keystore::bls_experimental_enabled! { + fn generate_proof_of_possession( + &self, + key_type: KeyTypeId, + public: &T::Public, + ) -> std::result::Result, TraitError> { + let proof_of_possession = self + .0 + .read() + .key_pair_by_type::(public, key_type)? + .map(|mut pair| pair.generate_proof_of_possession()); + Ok(proof_of_possession) + } + } } impl Keystore for LocalKeystore { @@ -358,6 +373,14 @@ impl Keystore for LocalKeystore { self.sign::(key_type, public, msg) } + fn bls381_generate_proof_of_possession( + &self, + key_type: KeyTypeId, + public: &bls381::Public + ) -> std::result::Result, TraitError> { + self.generate_proof_of_possession::(key_type, public) + } + fn ecdsa_bls381_public_keys(&self, key_type: KeyTypeId) -> Vec { self.public_keys::(key_type) } @@ -370,7 +393,23 @@ impl Keystore for LocalKeystore { key_type: KeyTypeId, seed: Option<&str>, ) -> std::result::Result { - self.generate_new::(key_type, seed) + let pubkey = self.generate_new::(key_type, seed)?; + + let s = self + .0 + .read() + .additional + .get(&(key_type, pubkey.to_vec())) + .map(|s| s.to_string()) + .expect("Can retrieve seed"); + + // This is done to give the keystore access to individual keys, this is necessary to avoid + // unnecessary host functions for paired keys and re-use host functions implemented for each + // element of the pair. + self.generate_new::(key_type, Some(&*s)).expect("seed slice is valid"); + self.generate_new::(key_type, Some(&*s)).expect("seed slice is valid"); + + Ok(pubkey) } fn ecdsa_bls381_sign( @@ -818,4 +857,61 @@ mod tests { assert_eq!(0o100600, permissions.mode()); } + + #[test] + #[cfg(feature = "bls-experimental")] + fn ecdsa_bls381_generate_with_none_works() { + use sp_core::testing::ECDSA_BLS381; + + let store = LocalKeystore::in_memory(); + let ecdsa_bls381_key = + store.ecdsa_bls381_generate_new(ECDSA_BLS381, None).expect("Cant generate key"); + + let ecdsa_keys = store.ecdsa_public_keys(ECDSA_BLS381); + let bls381_keys = store.bls381_public_keys(ECDSA_BLS381); + let ecdsa_bls381_keys = store.ecdsa_bls381_public_keys(ECDSA_BLS381); + + assert_eq!(ecdsa_keys.len(), 1); + assert_eq!(bls381_keys.len(), 1); + assert_eq!(ecdsa_bls381_keys.len(), 1); + + let ecdsa_key = ecdsa_keys[0]; + let bls381_key = bls381_keys[0]; + + let mut combined_key_raw = [0u8; ecdsa_bls381::PUBLIC_KEY_LEN]; + combined_key_raw[..ecdsa::PUBLIC_KEY_SERIALIZED_SIZE].copy_from_slice(ecdsa_key.as_ref()); + combined_key_raw[ecdsa::PUBLIC_KEY_SERIALIZED_SIZE..].copy_from_slice(bls381_key.as_ref()); + let combined_key = ecdsa_bls381::Public::from_raw(combined_key_raw); + + assert_eq!(combined_key, ecdsa_bls381_key); + } + + #[test] + #[cfg(feature = "bls-experimental")] + fn ecdsa_bls381_generate_with_seed_works() { + use sp_core::testing::ECDSA_BLS381; + + let store = LocalKeystore::in_memory(); + let ecdsa_bls381_key = store + .ecdsa_bls381_generate_new(ECDSA_BLS381, Some("//Alice")) + .expect("Cant generate key"); + + let ecdsa_keys = store.ecdsa_public_keys(ECDSA_BLS381); + let bls381_keys = store.bls381_public_keys(ECDSA_BLS381); + let ecdsa_bls381_keys = store.ecdsa_bls381_public_keys(ECDSA_BLS381); + + assert_eq!(ecdsa_keys.len(), 1); + assert_eq!(bls381_keys.len(), 1); + assert_eq!(ecdsa_bls381_keys.len(), 1); + + let ecdsa_key = ecdsa_keys[0]; + let bls381_key = bls381_keys[0]; + + let mut combined_key_raw = [0u8; ecdsa_bls381::PUBLIC_KEY_LEN]; + combined_key_raw[..ecdsa::PUBLIC_KEY_SERIALIZED_SIZE].copy_from_slice(ecdsa_key.as_ref()); + combined_key_raw[ecdsa::PUBLIC_KEY_SERIALIZED_SIZE..].copy_from_slice(bls381_key.as_ref()); + let combined_key = ecdsa_bls381::Public::from_raw(combined_key_raw); + + assert_eq!(combined_key, ecdsa_bls381_key); + } } diff --git a/substrate/client/network/src/discovery.rs b/substrate/client/network/src/discovery.rs index 493c0f51d87e3..26982a5c2ad1a 100644 --- a/substrate/client/network/src/discovery.rs +++ b/substrate/client/network/src/discovery.rs @@ -1303,26 +1303,26 @@ fn kademlia_protocol_name>( #[cfg(test)] mod tests { - use super::{ - kademlia_protocol_name, legacy_kademlia_protocol_name, DiscoveryConfig, DiscoveryOut, - }; + use super::{kademlia_protocol_name, legacy_kademlia_protocol_name, DiscoveryConfig}; use crate::config::ProtocolId; - use futures::prelude::*; - use libp2p::{ - core::{ - transport::{MemoryTransport, Transport}, - upgrade, - }, - identity::Keypair, - noise, - swarm::{Swarm, SwarmEvent}, - yamux, Multiaddr, - }; + use libp2p::{identity::Keypair, Multiaddr}; use sp_core::hash::H256; - use std::{collections::HashSet, task::Poll, time::Duration}; + #[cfg(ignore_flaky_test)] // https://github.com/paritytech/polkadot-sdk/issues/48 #[tokio::test] async fn discovery_working() { + use super::DiscoveryOut; + use futures::prelude::*; + use libp2p::{ + core::{ + transport::{MemoryTransport, Transport}, + upgrade, + }, + noise, + swarm::{Swarm, SwarmEvent}, + yamux, + }; + use std::{collections::HashSet, task::Poll, time::Duration}; let mut first_swarm_peer_id_and_addr = None; let genesis_hash = H256::from_low_u64_be(1); diff --git a/substrate/client/network/src/litep2p/shim/notification/peerset.rs b/substrate/client/network/src/litep2p/shim/notification/peerset.rs index fb822794ccf0a..153987e5956d3 100644 --- a/substrate/client/network/src/litep2p/shim/notification/peerset.rs +++ b/substrate/client/network/src/litep2p/shim/notification/peerset.rs @@ -139,7 +139,7 @@ impl From for traits::Direction { } /// Open result for a fully-opened connection. -#[derive(PartialEq, Eq)] +#[derive(PartialEq, Eq, Debug)] pub enum OpenResult { /// Accept the connection. Accept { @@ -416,6 +416,15 @@ impl Peerset { // if some connected peer gets banned. peerstore_handle.register_protocol(Arc::new(PeersetHandle { tx: cmd_tx.clone() })); + log::debug!( + target: LOG_TARGET, + "{}: creating new peerset with max_outbound {} and max_inbound {} and reserved_only {}", + protocol, + max_out, + max_in, + reserved_only, + ); + ( Self { protocol, @@ -485,8 +494,25 @@ impl Peerset { return OpenResult::Reject }, + // The peer was already rejected by the `report_inbound_substream` call and this + // should never happen. However, this code path is exercised by our fuzzer. + PeerState::Disconnected => { + log::debug!( + target: LOG_TARGET, + "{}: substream opened for a peer that was previously rejected {peer:?}", + self.protocol, + ); + return OpenResult::Reject + }, state => { - panic!("{}: invalid state for open substream {peer:?} {state:?}", self.protocol); + log::error!( + target: LOG_TARGET, + "{}: substream opened for a peer in invalid state {peer:?}: {state:?}", + self.protocol, + ); + + debug_assert!(false); + return OpenResult::Reject; }, } } @@ -545,14 +571,27 @@ impl Peerset { PeerState::Closing { .. } | PeerState::Connected { .. } => { log::debug!(target: LOG_TARGET, "{}: reserved peer {peer:?} disconnected", self.protocol); }, + // The peer was already rejected by the `report_inbound_substream` call and this + // should never happen. However, this code path is exercised by our fuzzer. + PeerState::Disconnected => { + log::debug!( + target: LOG_TARGET, + "{}: substream closed for a peer that was previously rejected {peer:?}", + self.protocol, + ); + }, state => { log::warn!(target: LOG_TARGET, "{}: invalid state for disconnected peer {peer:?}: {state:?}", self.protocol); debug_assert!(false); }, } - *state = PeerState::Backoff; - self.connected_peers.fetch_sub(1usize, Ordering::Relaxed); + // Rejected peers do not count towards slot allocation. + if !matches!(state, PeerState::Disconnected) { + self.connected_peers.fetch_sub(1usize, Ordering::Relaxed); + } + + *state = PeerState::Backoff; self.pending_backoffs.push(Box::pin(async move { Delay::new(DEFAULT_BACKOFF).await; (peer, DISCONNECT_ADJUSTMENT) @@ -576,12 +615,25 @@ impl Peerset { let state = self.peers.entry(peer).or_insert(PeerState::Disconnected); let is_reserved_peer = self.reserved_peers.contains(&peer); + // Check if this is a non-reserved peer and if the protocol is in reserved-only mode. + let should_reject = self.reserved_only && !is_reserved_peer; + match state { + // disconnected peers that are reserved-only peers are rejected + PeerState::Disconnected if should_reject => { + log::trace!( + target: LOG_TARGET, + "{}: rejecting non-reserved peer {peer:?} in reserved-only mode (prev state: {state:?})", + self.protocol, + ); + + return ValidationResult::Reject + }, // disconnected peers proceed directly to inbound slot allocation PeerState::Disconnected => {}, // peer is backed off but if it can be accepted (either a reserved peer or inbound slot // available), accept the peer and then just ignore the back-off timer when it expires - PeerState::Backoff => + PeerState::Backoff => { if !is_reserved_peer && self.num_in == self.max_in { log::trace!( target: LOG_TARGET, @@ -590,7 +642,16 @@ impl Peerset { ); return ValidationResult::Reject - }, + } + + // The peer remains in the `PeerState::Backoff` state until the current timer + // expires. Then, the peer will be in the disconnected state, subject to further + // rejection if the peer is not reserved by then. + if should_reject { + return ValidationResult::Reject + } + }, + // `Peerset` had initiated an outbound substream but litep2p had received an inbound // substream before the command to open the substream was received, meaning local and // remote desired to open a connection at the same time. Since outbound substreams @@ -605,6 +666,17 @@ impl Peerset { // inbound substreams, that system has to be kept working for the time being. Once that // issue is fixed, this approach can be re-evaluated if need be. PeerState::Opening { direction: Direction::Outbound(reserved) } => { + if should_reject { + log::trace!( + target: LOG_TARGET, + "{}: rejecting inbound substream from {peer:?} ({reserved:?}) in reserved-only mode that was marked outbound", + self.protocol, + ); + + *state = PeerState::Canceled { direction: Direction::Outbound(*reserved) }; + return ValidationResult::Reject + } + log::trace!( target: LOG_TARGET, "{}: inbound substream received for {peer:?} ({reserved:?}) that was marked outbound", @@ -616,7 +688,7 @@ impl Peerset { PeerState::Canceled { direction } => { log::trace!( target: LOG_TARGET, - "{}: {peer:?} is canceled, rejecting substream", + "{}: {peer:?} is canceled, rejecting substream should_reject={should_reject}", self.protocol, ); @@ -870,6 +942,12 @@ impl Peerset { &self.peers } + /// Get reference to known peers. + #[cfg(test)] + pub fn peers_mut(&mut self) -> &mut HashMap { + &mut self.peers + } + /// Get reference to reserved peers. #[cfg(test)] pub fn reserved_peers(&self) -> &HashSet { @@ -893,6 +971,8 @@ impl Stream for Peerset { } if let Poll::Ready(Some(action)) = Pin::new(&mut self.cmd_rx).poll_next(cx) { + log::trace!(target: LOG_TARGET, "{}: received command {action:?}", self.protocol); + match action { PeersetCommand::DisconnectPeer { peer } if !self.reserved_peers.contains(&peer) => match self.peers.remove(&peer) { diff --git a/substrate/client/network/src/litep2p/shim/notification/tests/peerset.rs b/substrate/client/network/src/litep2p/shim/notification/tests/peerset.rs index 295a5b441b3ea..9ec332681336f 100644 --- a/substrate/client/network/src/litep2p/shim/notification/tests/peerset.rs +++ b/substrate/client/network/src/litep2p/shim/notification/tests/peerset.rs @@ -35,7 +35,10 @@ use sc_network_types::PeerId; use std::{ collections::HashSet, - sync::{atomic::Ordering, Arc}, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, task::Poll, }; @@ -1123,3 +1126,174 @@ async fn set_reserved_peers_cannot_move_previously_reserved() { assert_eq!(peerset.num_out(), 0usize); assert_eq!(peerset.reserved_peers().len(), 3usize); } + +#[tokio::test] +async fn reserved_only_rejects_non_reserved_peers() { + sp_tracing::try_init_simple(); + + let peerstore_handle = Arc::new(peerstore_handle_test()); + let reserved_peers = HashSet::from_iter([PeerId::random(), PeerId::random(), PeerId::random()]); + + let connected_peers = Arc::new(AtomicUsize::new(0)); + let (mut peerset, to_peerset) = Peerset::new( + ProtocolName::from("/notif/1"), + 3, + 3, + true, + reserved_peers.clone(), + connected_peers.clone(), + peerstore_handle, + ); + assert_eq!(peerset.num_in(), 0usize); + assert_eq!(peerset.num_out(), 0usize); + + // Step 1. Connect reserved peers. + { + match peerset.next().await { + Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => { + assert_eq!(peerset.num_in(), 0usize); + assert_eq!(peerset.num_out(), 0usize); + + for outbound_peer in &out_peers { + assert!(reserved_peers.contains(outbound_peer)); + assert_eq!( + peerset.peers().get(&outbound_peer), + Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) }) + ); + } + }, + event => panic!("invalid event: {event:?}"), + } + // Report the reserved peers as connected. + for peer in &reserved_peers { + assert!(std::matches!( + peerset.report_substream_opened(*peer, traits::Direction::Outbound), + OpenResult::Accept { .. } + )); + assert_eq!( + peerset.peers().get(peer), + Some(&PeerState::Connected { direction: Direction::Outbound(Reserved::Yes) }) + ); + } + assert_eq!(connected_peers.load(Ordering::Relaxed), 3usize); + } + + // Step 2. Ensure non-reserved peers are rejected. + let normal_peers: Vec = vec![PeerId::random(), PeerId::random(), PeerId::random()]; + { + // Report the peers as inbound for validation purposes. + for peer in &normal_peers { + // We are running in reserved only mode. + let result = peerset.report_inbound_substream(*peer); + assert_eq!(result, ValidationResult::Reject); + + // The peer must be kept in the disconnected state. + assert_eq!(peerset.peers().get(peer), Some(&PeerState::Disconnected)); + } + // Ensure slots are not used. + assert_eq!(peerset.num_in(), 0usize); + assert_eq!(peerset.num_out(), 0usize); + + // Report that all substreams were opened. + for peer in &normal_peers { + // We must reject them because the peers were rejected prior by + // `report_inbound_substream` and therefore set into the disconnected state. + let result = peerset.report_substream_opened(*peer, traits::Direction::Inbound); + assert_eq!(result, OpenResult::Reject); + + // Peer remains disconnected. + assert_eq!(peerset.peers().get(&peer), Some(&PeerState::Disconnected)); + } + assert_eq!(connected_peers.load(Ordering::Relaxed), 3usize); + + // Because we have returned `Reject` from `report_substream_opened` + // the substreams will later be closed. + for peer in &normal_peers { + peerset.report_substream_closed(*peer); + + // Peer moves into the backoff state. + assert_eq!(peerset.peers().get(peer), Some(&PeerState::Backoff)); + } + // The slots are not used / altered. + assert_eq!(connected_peers.load(Ordering::Relaxed), 3usize); + } + + // Move peers out of the backoff state (ie simulate 5s elapsed time). + for (peer, state) in peerset.peers_mut() { + if normal_peers.contains(peer) { + match state { + PeerState::Backoff => *state = PeerState::Disconnected, + state => panic!("invalid state peer={peer:?} state={state:?}"), + } + } else if reserved_peers.contains(peer) { + match state { + PeerState::Connected { direction: Direction::Outbound(Reserved::Yes) } => {}, + state => panic!("invalid state peer={peer:?} state={state:?}"), + } + } else { + panic!("invalid peer={peer:?} not present"); + } + } + + // Step 3. Allow connections from non-reserved peers. + { + to_peerset + .unbounded_send(PeersetCommand::SetReservedOnly { reserved_only: false }) + .unwrap(); + // This will activate the non-reserved peers and give us the best outgoing + // candidates to connect to. + match peerset.next().await { + Some(PeersetNotificationCommand::OpenSubstream { peers }) => { + // These are the non-reserved peers we informed the peerset above. + assert_eq!(peers.len(), 3); + for peer in &peers { + assert!(!reserved_peers.contains(peer)); + assert_eq!( + peerset.peers().get(peer), + Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::No) }) + ); + assert!(normal_peers.contains(peer)); + } + }, + event => panic!("invalid event : {event:?}"), + } + // Ensure slots are used. + assert_eq!(peerset.num_in(), 0usize); + assert_eq!(peerset.num_out(), 3usize); + + for peer in &normal_peers { + let result = peerset.report_inbound_substream(*peer); + assert_eq!(result, ValidationResult::Accept); + // Direction is kept from the outbound slot allocation. + assert_eq!( + peerset.peers().get(peer), + Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::No) }) + ); + } + // Ensure slots are used. + assert_eq!(peerset.num_in(), 0usize); + assert_eq!(peerset.num_out(), 3usize); + // Peers are only reported as connected once the substream is opened. + // 3 represents the reserved peers that are already connected. + assert_eq!(connected_peers.load(Ordering::Relaxed), 3usize); + + let (success, failure) = normal_peers.split_at(2); + for peer in success { + assert!(std::matches!( + peerset.report_substream_opened(*peer, traits::Direction::Outbound), + OpenResult::Accept { .. } + )); + assert_eq!( + peerset.peers().get(peer), + Some(&PeerState::Connected { direction: Direction::Outbound(Reserved::No) }) + ); + } + // Simulate one failure. + let failure = failure[0]; + peerset.report_substream_open_failure(failure, NotificationError::ChannelClogged); + assert_eq!(peerset.peers().get(&failure), Some(&PeerState::Backoff)); + assert_eq!(peerset.num_in(), 0usize); + assert_eq!(peerset.num_out(), 2usize); + assert_eq!(connected_peers.load(Ordering::Relaxed), 5usize); + } +} diff --git a/substrate/client/network/test/src/sync.rs b/substrate/client/network/test/src/sync.rs index 91307d8692816..d60d1c0a518d8 100644 --- a/substrate/client/network/test/src/sync.rs +++ b/substrate/client/network/test/src/sync.rs @@ -826,6 +826,7 @@ async fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() /// Ensures that if we as a syncing node sync to the tip while we are connected to another peer /// that is currently also doing a major sync. +#[cfg(ignore_flaky_test)] // https://github.com/paritytech/polkadot-sdk/issues/48 #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn sync_to_tip_when_we_sync_together_with_multiple_peers() { sp_tracing::try_init_simple(); diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index ebe7e7eca7b4f..150443fe80b92 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -28,6 +28,7 @@ hex = { workspace = true, default-features = true } itertools = { workspace = true } log = { workspace = true, default-features = true } parking_lot = { workspace = true, default-features = true } +prometheus-endpoint = { workspace = true, default-features = true } rand = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = true } sc-rpc = { workspace = true, default-features = true } diff --git a/substrate/client/rpc-spec-v2/src/transaction/event.rs b/substrate/client/rpc-spec-v2/src/transaction/event.rs index 882ac8490b07c..8cf998e33136e 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/event.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/event.rs @@ -95,6 +95,19 @@ pub enum TransactionEvent { Dropped(TransactionDropped), } +impl TransactionEvent { + /// Returns true if this is the last event emitted by the RPC subscription. + pub fn is_final(&self) -> bool { + matches!( + &self, + TransactionEvent::Finalized(_) | + TransactionEvent::Error(_) | + TransactionEvent::Invalid(_) | + TransactionEvent::Dropped(_) + ) + } +} + /// Intermediate representation (IR) for the transaction events /// that handles block events only. /// diff --git a/substrate/client/rpc-spec-v2/src/transaction/metrics.rs b/substrate/client/rpc-spec-v2/src/transaction/metrics.rs new file mode 100644 index 0000000000000..25745ba9116df --- /dev/null +++ b/substrate/client/rpc-spec-v2/src/transaction/metrics.rs @@ -0,0 +1,155 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Metrics for recording transaction events. + +use std::{collections::HashSet, time::Instant}; + +use prometheus_endpoint::{ + exponential_buckets, linear_buckets, register, Histogram, HistogramOpts, PrometheusError, + Registry, +}; + +use super::TransactionEvent; + +/// RPC layer metrics for transaction pool. +#[derive(Debug, Clone)] +pub struct Metrics { + validated: Histogram, + in_block: Histogram, + finalized: Histogram, + dropped: Histogram, + invalid: Histogram, + error: Histogram, +} + +impl Metrics { + /// Creates a new [`Metrics`] instance. + pub fn new(registry: &Registry) -> Result { + let validated = register( + Histogram::with_opts( + HistogramOpts::new( + "rpc_transaction_validation_time", + "RPC Transaction validation time in seconds", + ) + .buckets(exponential_buckets(0.01, 2.0, 16).expect("Valid buckets; qed")), + )?, + registry, + )?; + + let in_block = register( + Histogram::with_opts( + HistogramOpts::new( + "rpc_transaction_in_block_time", + "RPC Transaction in block time in seconds", + ) + .buckets(linear_buckets(0.0, 3.0, 20).expect("Valid buckets; qed")), + )?, + registry, + )?; + + let finalized = register( + Histogram::with_opts( + HistogramOpts::new( + "rpc_transaction_finalized_time", + "RPC Transaction finalized time in seconds", + ) + .buckets(linear_buckets(0.01, 40.0, 20).expect("Valid buckets; qed")), + )?, + registry, + )?; + + let dropped = register( + Histogram::with_opts( + HistogramOpts::new( + "rpc_transaction_dropped_time", + "RPC Transaction dropped time in seconds", + ) + .buckets(linear_buckets(0.01, 3.0, 20).expect("Valid buckets; qed")), + )?, + registry, + )?; + + let invalid = register( + Histogram::with_opts( + HistogramOpts::new( + "rpc_transaction_invalid_time", + "RPC Transaction invalid time in seconds", + ) + .buckets(linear_buckets(0.01, 3.0, 20).expect("Valid buckets; qed")), + )?, + registry, + )?; + + let error = register( + Histogram::with_opts( + HistogramOpts::new( + "rpc_transaction_error_time", + "RPC Transaction error time in seconds", + ) + .buckets(linear_buckets(0.01, 3.0, 20).expect("Valid buckets; qed")), + )?, + registry, + )?; + + Ok(Metrics { validated, in_block, finalized, dropped, invalid, error }) + } +} + +/// Transaction metrics for a single transaction instance. +pub struct InstanceMetrics { + /// The metrics instance. + metrics: Option, + /// The time when the transaction was submitted. + submitted_at: Instant, + /// Ensure the states are reported once. + reported_states: HashSet<&'static str>, +} + +impl InstanceMetrics { + /// Creates a new [`InstanceMetrics`] instance. + pub fn new(metrics: Option) -> Self { + Self { metrics, submitted_at: Instant::now(), reported_states: HashSet::new() } + } + + /// Record the execution time of a transaction state. + /// + /// This represents how long it took for the transaction to move to the next state. + /// + /// The method must be called before the transaction event is provided to the user. + pub fn register_event(&mut self, event: &TransactionEvent) { + let Some(ref metrics) = self.metrics else { + return; + }; + + let (histogram, target_state) = match event { + TransactionEvent::Validated => (&metrics.validated, "validated"), + TransactionEvent::BestChainBlockIncluded(Some(_)) => (&metrics.in_block, "in_block"), + TransactionEvent::BestChainBlockIncluded(None) => (&metrics.in_block, "retracted"), + TransactionEvent::Finalized(..) => (&metrics.finalized, "finalized"), + TransactionEvent::Error(..) => (&metrics.error, "error"), + TransactionEvent::Dropped(..) => (&metrics.dropped, "dropped"), + TransactionEvent::Invalid(..) => (&metrics.invalid, "invalid"), + }; + + // Only record the state if it hasn't been reported before. + if self.reported_states.insert(target_state) { + histogram.observe(self.submitted_at.elapsed().as_secs_f64()); + } + } +} diff --git a/substrate/client/rpc-spec-v2/src/transaction/mod.rs b/substrate/client/rpc-spec-v2/src/transaction/mod.rs index 514ccf047dc28..4cb7ceb06ab0d 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/mod.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/mod.rs @@ -28,6 +28,8 @@ #[cfg(test)] mod tests; +mod metrics; + pub mod api; pub mod error; pub mod event; @@ -36,5 +38,6 @@ pub mod transaction_broadcast; pub use api::{TransactionApiServer, TransactionBroadcastApiServer}; pub use event::{TransactionBlock, TransactionDropped, TransactionError, TransactionEvent}; +pub use metrics::Metrics as TransactionMetrics; pub use transaction::Transaction; pub use transaction_broadcast::TransactionBroadcast; diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/setup.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/setup.rs index 570174a3db643..2618da953bc26 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/setup.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/setup.rs @@ -115,7 +115,8 @@ pub fn setup_api_tx() -> ( let (task_executor, executor_recv) = TaskExecutorBroadcast::new(); let tx_api = - RpcTransaction::new(client_mock.clone(), pool.clone(), Arc::new(task_executor)).into_rpc(); + RpcTransaction::new(client_mock.clone(), pool.clone(), Arc::new(task_executor), None) + .into_rpc(); (api, pool, client_mock, tx_api, executor_recv, pool_state) } diff --git a/substrate/client/rpc-spec-v2/src/transaction/transaction.rs b/substrate/client/rpc-spec-v2/src/transaction/transaction.rs index ac24ce960f61b..a497c1a71f8b0 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/transaction.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/transaction.rs @@ -30,6 +30,9 @@ use crate::{ use codec::Decode; use futures::{StreamExt, TryFutureExt}; use jsonrpsee::{core::async_trait, PendingSubscriptionSink}; + +use super::metrics::{InstanceMetrics, Metrics}; + use sc_rpc::utils::{RingBuffer, Subscription}; use sc_transaction_pool_api::{ error::IntoPoolError, BlockHash, TransactionFor, TransactionPool, TransactionSource, @@ -50,12 +53,19 @@ pub struct Transaction { pool: Arc, /// Executor to spawn subscriptions. executor: SubscriptionTaskExecutor, + /// Metrics for transactions. + metrics: Option, } impl Transaction { /// Creates a new [`Transaction`]. - pub fn new(client: Arc, pool: Arc, executor: SubscriptionTaskExecutor) -> Self { - Transaction { client, pool, executor } + pub fn new( + client: Arc, + pool: Arc, + executor: SubscriptionTaskExecutor, + metrics: Option, + ) -> Self { + Transaction { client, pool, executor, metrics } } } @@ -78,6 +88,9 @@ where let client = self.client.clone(); let pool = self.pool.clone(); + // Get a new transaction metrics instance and increment the counter. + let mut metrics = InstanceMetrics::new(self.metrics.clone()); + let fut = async move { let decoded_extrinsic = match TransactionFor::::decode(&mut &xt[..]) { Ok(decoded_extrinsic) => decoded_extrinsic, @@ -86,12 +99,14 @@ where let Ok(sink) = pending.accept().await.map(Subscription::from) else { return }; + let event = TransactionEvent::Invalid::>(TransactionError { + error: "Extrinsic bytes cannot be decoded".into(), + }); + + metrics.register_event(&event); + // The transaction is invalid. - let _ = sink - .send(&TransactionEvent::Invalid::>(TransactionError { - error: "Extrinsic bytes cannot be decoded".into(), - })) - .await; + let _ = sink.send(&event).await; return }, }; @@ -112,8 +127,17 @@ where match submit.await { Ok(stream) => { - let stream = - stream.filter_map(move |event| async move { handle_event(event) }).boxed(); + let stream = stream + .filter_map(|event| { + let event = handle_event(event); + + event.as_ref().inspect(|event| { + metrics.register_event(event); + }); + + async move { event } + }) + .boxed(); // If the subscription is too slow older events will be overwritten. sink.pipe_from_stream(stream, RingBuffer::new(3)).await; @@ -122,6 +146,9 @@ where // We have not created an `Watcher` for the tx. Make sure the // error is still propagated as an event. let event: TransactionEvent<::Hash> = err.into(); + + metrics.register_event(&event); + _ = sink.send(&event).await; }, }; @@ -134,7 +161,7 @@ where /// Handle events generated by the transaction-pool and convert them /// to the new API expected state. #[inline] -pub fn handle_event( +fn handle_event( event: TransactionStatus, ) -> Option> { match event { diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index 36601a40ff202..74d94c30cd69b 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -27,12 +27,13 @@ use crate::{ }; use futures::{select, FutureExt, StreamExt}; use jsonrpsee::RpcModule; -use log::info; +use log::{debug, error, info}; use prometheus_endpoint::Registry; use sc_chain_spec::{get_extension, ChainSpec}; use sc_client_api::{ execution_extensions::ExecutionExtensions, proof_provider::ProofProvider, BadBlocks, - BlockBackend, BlockchainEvents, ExecutorProvider, ForkBlocks, StorageProvider, UsageProvider, + BlockBackend, BlockchainEvents, ExecutorProvider, ForkBlocks, KeysIter, StorageProvider, + TrieCacheContext, UsageProvider, }; use sc_client_db::{Backend, BlocksPruning, DatabaseSettings, PruningMode}; use sc_consensus::import_queue::{ImportQueue, ImportQueueService}; @@ -90,6 +91,7 @@ use sp_consensus::block_validation::{ use sp_core::traits::{CodeExecutor, SpawnNamed}; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, BlockIdTo, NumberFor, Zero}; +use sp_storage::{ChildInfo, ChildType, PrefixedStorageKey}; use std::{ str::FromStr, sync::Arc, @@ -263,12 +265,89 @@ where }, )?; + if let Some(warm_up_strategy) = config.warm_up_trie_cache { + let storage_root = client.usage_info().chain.best_hash; + let backend_clone = backend.clone(); + + if warm_up_strategy.is_blocking() { + // We use the blocking strategy for testing purposes. + // So better to error out if it fails. + warm_up_trie_cache(backend_clone, storage_root)?; + } else { + task_manager.spawn_handle().spawn_blocking( + "warm-up-trie-cache", + None, + async move { + if let Err(e) = warm_up_trie_cache(backend_clone, storage_root) { + error!("Failed to warm up trie cache: {e}"); + } + }, + ); + } + } + client }; Ok((client, backend, keystore_container, task_manager)) } +fn child_info(key: Vec) -> Option { + let prefixed_key = PrefixedStorageKey::new(key); + ChildType::from_prefixed_key(&prefixed_key).and_then(|(child_type, storage_key)| { + (child_type == ChildType::ParentKeyId).then(|| ChildInfo::new_default(storage_key)) + }) +} + +fn warm_up_trie_cache( + backend: Arc>, + storage_root: TBl::Hash, +) -> Result<(), Error> { + use sc_client_api::backend::Backend; + use sp_state_machine::Backend as StateBackend; + + let untrusted_state = || backend.state_at(storage_root, TrieCacheContext::Untrusted); + let trusted_state = || backend.state_at(storage_root, TrieCacheContext::Trusted); + + debug!("Populating trie cache started",); + let start_time = std::time::Instant::now(); + let mut keys_count = 0; + let mut child_keys_count = 0; + for key in KeysIter::<_, TBl>::new(untrusted_state()?, None, None)? { + if keys_count != 0 && keys_count % 100_000 == 0 { + debug!("{} keys and {} child keys have been warmed", keys_count, child_keys_count); + } + match child_info(key.0.clone()) { + Some(info) => { + for child_key in + KeysIter::<_, TBl>::new_child(untrusted_state()?, info.clone(), None, None)? + { + if trusted_state()? + .child_storage(&info, &child_key.0) + .unwrap_or_default() + .is_none() + { + debug!("Child storage value unexpectedly empty: {child_key:?}"); + } + child_keys_count += 1; + } + }, + None => { + if trusted_state()?.storage(&key.0).unwrap_or_default().is_none() { + debug!("Storage value unexpectedly empty: {key:?}"); + } + keys_count += 1; + }, + } + } + debug!( + "Trie cache populated with {keys_count} keys and {child_keys_count} child keys in {} s", + start_time.elapsed().as_secs_f32() + ); + + Ok(()) +} + /// Creates a [`NativeElseWasmExecutor`](sc_executor::NativeElseWasmExecutor) according to /// [`Configuration`]. #[deprecated(note = "Please switch to `new_wasm_executor`. Will be removed at end of 2024.")] @@ -515,6 +594,14 @@ where let rpc_id_provider = config.rpc.id_provider.take(); // jsonrpsee RPC + // RPC-V2 specific metrics need to be registered before the RPC server is started, + // since we might have two instances running (one for the in-memory RPC and one for the network + // RPC). + let rpc_v2_metrics = config + .prometheus_registry() + .map(|registry| sc_rpc_spec_v2::transaction::TransactionMetrics::new(registry)) + .transpose()?; + let gen_rpc_module = || { gen_rpc_module( task_manager.spawn_handle(), @@ -529,6 +616,7 @@ where config.blocks_pruning, backend.clone(), &*rpc_builder, + rpc_v2_metrics.clone(), ) }; @@ -676,6 +764,7 @@ pub fn gen_rpc_module( blocks_pruning: BlocksPruning, backend: Arc, rpc_builder: &(dyn Fn(SubscriptionTaskExecutor) -> Result, Error>), + metrics: Option, ) -> Result, Error> where TBl: BlockT, @@ -731,6 +820,7 @@ where client.clone(), transaction_pool.clone(), task_executor.clone(), + metrics, ) .into_rpc(); diff --git a/substrate/client/service/src/config.rs b/substrate/client/service/src/config.rs index cc1fec8b081d2..74ae044bdc3f8 100644 --- a/substrate/client/service/src/config.rs +++ b/substrate/client/service/src/config.rs @@ -70,6 +70,8 @@ pub struct Configuration { /// /// If `None` is given the cache is disabled. pub trie_cache_maximum_size: Option, + /// Force the trie cache to be in memory. + pub warm_up_trie_cache: Option, /// State pruning settings. pub state_pruning: Option, /// Number of blocks to keep in the db. @@ -115,6 +117,22 @@ pub struct Configuration { pub base_path: BasePath, } +/// Warmup strategy for the trie cache. +#[derive(Debug, Clone, Copy)] +pub enum TrieCacheWarmUpStrategy { + /// Warm up the cache in a non-blocking way. + NonBlocking, + /// Warm up the cache in a blocking way. + Blocking, +} + +impl TrieCacheWarmUpStrategy { + /// Returns true if the warmup strategy is blocking. + pub(crate) fn is_blocking(&self) -> bool { + matches!(self, Self::Blocking) + } +} + /// Type for tasks spawned by the executor. #[derive(PartialEq)] pub enum TaskType { diff --git a/substrate/client/service/test/src/lib.rs b/substrate/client/service/test/src/lib.rs index d64581480cdb8..46217d46dd800 100644 --- a/substrate/client/service/test/src/lib.rs +++ b/substrate/client/service/test/src/lib.rs @@ -235,6 +235,7 @@ fn node_config( keystore: KeystoreConfig::Path { path: root.join("key"), password: None }, database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, trie_cache_maximum_size: Some(16 * 1024 * 1024), + warm_up_trie_cache: None, state_pruning: Default::default(), blocks_pruning: BlocksPruning::KeepFinalized, chain_spec: Box::new((*spec).clone()), diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 2faf5f4ebd30d..3553465668e31 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -474,7 +474,10 @@ where /// /// The method attempts to build a temporary view and create an iterator of ready transactions /// for a specific `at` hash. If a valid view is found, it collects and prunes - /// transactions already included in the blocks and returns the valid set. + /// transactions already included in the blocks and returns the valid set. Not finding a view + /// returns with the ready transaction set found in the most recent view processed by the + /// fork-aware txpool. Not being able to query for block number for the provided `at` block hash + /// results in returning an empty transaction set. /// /// Pruning is just rebuilding the underlying transactions graph, no validations are executed, /// so this process shall be fast. @@ -487,35 +490,27 @@ where "fatp::ready_at_light" ); - let Ok(block_number) = self.api.resolve_block_number(at) else { - return Box::new(std::iter::empty()) - }; - - let best_result = { - api.tree_route(self.enactment_state.lock().recent_finalized_block(), at).map( - |tree_route| { - if let Some((index, view)) = - tree_route.enacted().iter().enumerate().rev().skip(1).find_map(|(i, b)| { - self.view_store.get_view_at(b.hash, true).map(|(view, _)| (i, view)) - }) { - let e = tree_route.enacted()[index..].to_vec(); - (TreeRoute::new(e, 0).ok(), Some(view)) - } else { - (None, None) - } - }, - ) - }; - - if let Ok((Some(best_tree_route), Some(best_view))) = best_result { - let (tmp_view, _, _): (View, _, _) = - View::new_from_other(&best_view, &HashAndNumber { hash: at, number: block_number }); - + let at_number = self.api.resolve_block_number(at).ok(); + let finalized_number = self + .api + .resolve_block_number(self.enactment_state.lock().recent_finalized_block()) + .ok(); + + // Prune all txs from the best view found, considering the extrinsics part of the blocks + // that are more recent than the view itself. + if let Some((view, enacted_blocks, at_hn)) = at_number.and_then(|at_number| { + let at_hn = HashAndNumber { hash: at, number: at_number }; + finalized_number.and_then(|finalized_number| { + self.view_store + .find_view_descendent_up_to_number(&at_hn, finalized_number) + .map(|(view, enacted_blocks)| (view, enacted_blocks, at_hn)) + }) + }) { + let (tmp_view, _, _): (View, _, _) = View::new_from_other(&view, &at_hn); let mut all_extrinsics = vec![]; - - for h in best_tree_route.enacted() { + for h in enacted_blocks { let extrinsics = api - .block_body(h.hash) + .block_body(h) .await .unwrap_or_else(|error| { warn!( @@ -546,7 +541,7 @@ where debug!( target: LOG_TARGET, ?at, - best_view_hash = ?best_view.at.hash, + best_view_hash = ?view.at.hash, before_count, to_be_removed = all_extrinsics.len(), after_count, @@ -554,6 +549,17 @@ where "fatp::ready_at_light" ); Box::new(tmp_view.pool.validated_pool().ready()) + } else if let Some((most_recent_view, _)) = self + .view_store + .most_recent_view + .read() + .and_then(|at| self.view_store.get_view_at(at, true)) + { + // Fallback for the case when `at` is not on the already known fork. + // Falls back to the most recent view, which may include txs which + // are invalid or already included in the blocks but can still yield a + // partially valid ready set, which is still better than including nothing. + Box::new(most_recent_view.pool.validated_pool().ready()) } else { let empty: ReadyIteratorFor = Box::new(std::iter::empty()); debug!( diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/mod.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/mod.rs index fce2d4ad6b27e..b778042d33cc3 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/mod.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/mod.rs @@ -209,14 +209,16 @@ //! ### Light maintain //! The [maintain](#maintain) procedure can sometimes be quite heavy, and it may not be accomplished //! within the time window expected by the block builder. On top of that block builder may want to -//! build few blocks in the raw, not giving the pool enough time to accomplish possible ongoing +//! build few blocks in the row, not giving the pool enough time to accomplish possible ongoing //! maintain process. //! //! To address this, there is a [light version][`ready_at_light`] of the maintain procedure. It -//! [finds the best view][find_best_view], clones it and prunes all the transactions that were -//! included in enacted part of [tree route][`TreeRoute`] from the base view to the block at which a -//! ready iterator was requested. No new [transaction validations][runtime_api::validate] are -//! required to accomplish it. +//! [finds the first descendent view][`find_view_descendent_up_to_number`] up to the recent +//! finalized block, clones it and prunes all the transactions that were included in enacted part of +//! the traversed route, from the base view to the block at which a ready iterator was requested. No +//! new [transaction validations][runtime_api::validate] are required to accomplish it. If no view +//! is found, it will return the ready transactions of the most recent view processed by the +//! transaction pool. //! //! ### Providing ready transactions: `ready_at` //! The asynchronous [`ready_at`] function resolves to the [ready transactions @@ -314,6 +316,7 @@ //! [`ViewStore`]: crate::fork_aware_txpool::view_store::ViewStore //! [`finish_background_revalidations`]: crate::fork_aware_txpool::view_store::ViewStore::finish_background_revalidations //! [find_best_view]: crate::fork_aware_txpool::view_store::ViewStore::find_best_view +//! [`find_view_descendent_up_to_number`]: crate::fork_aware_txpool::view_store::ViewStore::find_view_descendent_up_to_number //! [`active_views`]: crate::fork_aware_txpool::view_store::ViewStore::active_views //! [`inactive_views`]: crate::fork_aware_txpool::view_store::ViewStore::inactive_views //! [`TxMemPool`]: crate::fork_aware_txpool::tx_mem_pool::TxMemPool diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index 3e7e230f7d31d..bcf1e8d5ceef9 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -40,7 +40,7 @@ use sc_transaction_pool_api::{ use sp_blockchain::{HashAndNumber, TreeRoute}; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, Saturating}, + traits::{Block as BlockT, Header, One, Saturating}, transaction_validity::{InvalidTransaction, TransactionValidityError}, }; use std::{ @@ -338,6 +338,39 @@ where self.active_views.read().is_empty() && self.inactive_views.read().is_empty() } + /// Searches in the view store for the first descendant view by iterating through the fork of + /// the `at` block, up to the provided `block_number`. + /// + /// Returns with a maybe pair of a view and a set of enacted blocks when the first view is + /// found. + pub(super) fn find_view_descendent_up_to_number( + &self, + at: &HashAndNumber, + up_to: <::Header as Header>::Number, + ) -> Option<(Arc>, Vec)> { + let mut enacted_blocks = Vec::new(); + let mut at_hash = at.hash; + let mut at_number = at.number; + + // Search for a view that can be used to get and return an approximate ready + // transaction set. + while at_number >= up_to { + // Found a view, stop searching. + if let Some((view, _)) = self.get_view_at(at_hash, true) { + return Some((view, enacted_blocks)); + } + + enacted_blocks.push(at_hash); + + // Move up into the fork. + let header = self.api.block_header(at_hash).ok().flatten()?; + at_hash = *header.parent_hash(); + at_number = at_number.saturating_sub(One::one()); + } + + None + } + /// Finds the best existing active view to clone from along the path. /// /// ```text diff --git a/substrate/client/transaction-pool/src/graph/ready.rs b/substrate/client/transaction-pool/src/graph/ready.rs index 3e52d3e4d9c0f..88986baa88ed1 100644 --- a/substrate/client/transaction-pool/src/graph/ready.rs +++ b/substrate/client/transaction-pool/src/graph/ready.rs @@ -298,8 +298,8 @@ impl ReadyTransactions { // remove from unlocks for tag in &tx.transaction.transaction.requires { if let Some(hash) = self.provided_tags.get(tag) { - if let Some(tx) = ready.get_mut(hash) { - remove_item(&mut tx.unlocks, hash); + if let Some(tx_unlocking) = ready.get_mut(hash) { + remove_item(&mut tx_unlocking.unlocks, &tx_hash); } } } @@ -788,4 +788,40 @@ mod tests { assert_eq!(it.next().as_ref().map(data), Some(7)); assert_eq!(it.next().as_ref().map(data), None); } + + #[test] + fn should_remove_tx_from_unlocks_set_of_its_parent() { + // given + let mut ready = ReadyTransactions::default(); + populate_pool(&mut ready); + + // when + let mut it = ready.get(); + let tx1 = it.next().unwrap(); + let tx2 = it.next().unwrap(); + let tx3 = it.next().unwrap(); + let tx4 = it.next().unwrap(); + let lock = ready.ready.read(); + let tx1_unlocks = &lock.get(&tx1.hash).unwrap().unlocks; + + // There are two tags provided by tx1 and required by tx2. + assert_eq!(tx1_unlocks[0], tx2.hash); + assert_eq!(tx1_unlocks[1], tx2.hash); + assert_eq!(tx1_unlocks[2], tx3.hash); + assert_eq!(tx1_unlocks[4], tx4.hash); + drop(lock); + + // then consider tx2 invalid, and hence, remove it. + let removed = ready.remove_subtree(&[tx2.hash]); + assert_eq!(removed.len(), 2); + assert_eq!(removed[0].hash, tx2.hash); + // tx3 is removed too, since it requires tx2 provides tags. + assert_eq!(removed[1].hash, tx3.hash); + + let lock = ready.ready.read(); + let tx1_unlocks = &lock.get(&tx1.hash).unwrap().unlocks; + assert!(!tx1_unlocks.contains(&tx2.hash)); + assert!(!tx1_unlocks.contains(&tx3.hash)); + assert!(tx1_unlocks.contains(&tx4.hash)); + } } diff --git a/substrate/client/transaction-pool/tests/fatp.rs b/substrate/client/transaction-pool/tests/fatp.rs index ba676715bdc13..c1411b29dafb9 100644 --- a/substrate/client/transaction-pool/tests/fatp.rs +++ b/substrate/client/transaction-pool/tests/fatp.rs @@ -2328,12 +2328,80 @@ fn fatp_ready_light_long_fork_retracted_works() { let header02b = api.push_block_with_parent(header01b.hash(), vec![xt1.clone()], true); let header03b = api.push_block_with_parent(header02b.hash(), vec![xt2.clone()], true); + // Returns the most recent view (`header01a`) ready transactions set. + let ready_iterator = pool.ready_at_light(header03b.hash()).now_or_never().unwrap(); + assert_eq!(ready_iterator.count(), 4); + + let event = new_best_block_event(&pool, Some(header01a.hash()), header01b.hash()); + block_on(pool.maintain(event)); + let mut ready_iterator = pool.ready_at_light(header03b.hash()).now_or_never().unwrap(); + let ready01 = ready_iterator.next(); + assert_eq!(ready01.unwrap().hash, api.hash_and_length(&xt3).0); + let ready02 = ready_iterator.next(); + assert_eq!(ready02.unwrap().hash, api.hash_and_length(&xt4).0); assert!(ready_iterator.next().is_none()); +} + +#[test] +fn fatp_ready_light_fallback_gets_triggered() { + sp_tracing::try_init_simple(); + + let (pool, api, _) = pool(); + api.set_nonce(api.genesis_hash(), Bob.into(), 200); + api.set_nonce(api.genesis_hash(), Charlie.into(), 200); + api.set_nonce(api.genesis_hash(), Dave.into(), 200); + api.set_nonce(api.genesis_hash(), Eve.into(), 200); + + let genesis = api.genesis_hash(); + + let xt0 = uxt(Alice, 200); + let xt1 = uxt(Bob, 200); + let xt2 = uxt(Charlie, 200); + let xt3 = uxt(Dave, 200); + let xt4 = uxt(Eve, 200); + + let submissions = vec![pool.submit_at(genesis, SOURCE, vec![xt0.clone(), xt1.clone()])]; + let results = block_on(futures::future::join_all(submissions)); + assert!(results.iter().all(|r| { r.is_ok() })); + + let header01a = api.push_block_with_parent(genesis, vec![xt4.clone()], true); + let event = new_best_block_event(&pool, Some(genesis), header01a.hash()); + block_on(pool.maintain(event)); + + let header01b = api.push_block_with_parent(genesis, vec![xt0.clone()], true); + // Call `ready_at_light` at genesis direct descendent, even if not notified as best or + // finalized. Should still return ready txs based on the most recent view processed by the + // txpool. + let ready_iterator = pool.ready_at_light(header01b.hash()).now_or_never().unwrap(); + assert_eq!(ready_iterator.count(), 2); + + let header02b = api.push_block_with_parent(header01b.hash(), vec![xt1.clone()], true); + let header03b = api.push_block_with_parent(header02b.hash(), vec![xt2.clone()], true); + + // Submit a few more txs to the pool. + let submissions = vec![pool.submit_at( + // `at` is ignored. + genesis, + SOURCE, + vec![xt2.clone(), xt3.clone()], + )]; + let results = block_on(futures::future::join_all(submissions)); + assert!(results.iter().all(|r| { r.is_ok() })); + + // Calling `ready_at_light` now on the last block of a fork, with no block notified as best. + // We should still get the ready txs from the most recent view processed by the txpool, + // but now with a few more txs which were submitted previously. + let ready_iterator = pool.ready_at_light(header03b.hash()).now_or_never().unwrap(); + assert_eq!(ready_iterator.count(), 4); let event = new_best_block_event(&pool, Some(header01a.hash()), header01b.hash()); block_on(pool.maintain(event)); + // Calling `ready_at_light` on the new best block (`header03b`) should consider its fork up to + // the finalized block for the search of the best view, and coincidentaly, that's the only view + // of the tree route, being the view created for NBB `header03b`. The returned ready txs are the + // ones left in the best view's pool after prunning the txs. let mut ready_iterator = pool.ready_at_light(header03b.hash()).now_or_never().unwrap(); let ready01 = ready_iterator.next(); assert_eq!(ready01.unwrap().hash, api.hash_and_length(&xt3).0); diff --git a/substrate/frame/babe/src/equivocation.rs b/substrate/frame/babe/src/equivocation.rs index 524ad23e58ee1..26279be897fdd 100644 --- a/substrate/frame/babe/src/equivocation.rs +++ b/substrate/frame/babe/src/equivocation.rs @@ -110,7 +110,7 @@ impl OffenceReportSystem, (EquivocationProof>, T::KeyOwnerProof)> for EquivocationReportSystem where - T: Config + pallet_authorship::Config + frame_system::offchain::CreateInherent>, + T: Config + pallet_authorship::Config + frame_system::offchain::CreateBare>, R: ReportOffence< T::AccountId, P::IdentificationTuple, @@ -132,7 +132,7 @@ where equivocation_proof: Box::new(equivocation_proof), key_owner_proof, }; - let xt = T::create_inherent(call.into()); + let xt = T::create_bare(call.into()); let res = SubmitTransaction::>::submit_transaction(xt); match res { Ok(_) => info!(target: LOG_TARGET, "Submitted equivocation report"), diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index dcfcee25e7b88..c620c3eb62fef 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -76,11 +76,11 @@ where type Extrinsic = TestXt; } -impl frame_system::offchain::CreateInherent for Test +impl frame_system::offchain::CreateBare for Test where RuntimeCall: From, { - fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + fn create_bare(call: Self::RuntimeCall) -> Self::Extrinsic { TestXt::new_bare(call) } } diff --git a/substrate/frame/beefy/src/equivocation.rs b/substrate/frame/beefy/src/equivocation.rs index 4f25cbb015f10..12fc34a6cbf81 100644 --- a/substrate/frame/beefy/src/equivocation.rs +++ b/substrate/frame/beefy/src/equivocation.rs @@ -282,7 +282,7 @@ impl EquivocationEvidenceFor { impl OffenceReportSystem, EquivocationEvidenceFor> for EquivocationReportSystem where - T: Config + pallet_authorship::Config + frame_system::offchain::CreateInherent>, + T: Config + pallet_authorship::Config + frame_system::offchain::CreateBare>, R: ReportOffence< T::AccountId, P::IdentificationTuple, @@ -298,7 +298,7 @@ where use frame_system::offchain::SubmitTransaction; let call: Call = evidence.into(); - let xt = T::create_inherent(call.into()); + let xt = T::create_bare(call.into()); let res = SubmitTransaction::>::submit_transaction(xt); match res { Ok(_) => info!(target: LOG_TARGET, "Submitted equivocation report."), diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index b069a62b1180e..a7d717e0b1b00 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -81,11 +81,11 @@ where type Extrinsic = TestXt; } -impl frame_system::offchain::CreateInherent for Test +impl frame_system::offchain::CreateBare for Test where RuntimeCall: From, { - fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + fn create_bare(call: Self::RuntimeCall) -> Self::Extrinsic { TestXt::new_bare(call) } } diff --git a/substrate/frame/benchmarking/pov/src/benchmarking.rs b/substrate/frame/benchmarking/pov/src/benchmarking.rs index d52fcc2689c4d..bf3d406d0b2b0 100644 --- a/substrate/frame/benchmarking/pov/src/benchmarking.rs +++ b/substrate/frame/benchmarking/pov/src/benchmarking.rs @@ -26,11 +26,6 @@ use frame_support::traits::UnfilteredDispatchable; use frame_system::{Pallet as System, RawOrigin}; use sp_runtime::traits::Hash; -#[cfg(feature = "std")] -frame_support::parameter_types! { - pub static StorageRootHash: Option> = None; -} - #[benchmarks] mod benchmarks { use super::*; @@ -397,32 +392,6 @@ mod benchmarks { } } - #[benchmark] - fn storage_root_is_the_same_every_time(i: Linear<0, 10>) { - #[cfg(feature = "std")] - let root = sp_io::storage::root(sp_runtime::StateVersion::V1); - - #[cfg(feature = "std")] - match (i, StorageRootHash::get()) { - (0, Some(_)) => panic!("StorageRootHash should be None initially"), - (0, None) => StorageRootHash::set(Some(root)), - (_, Some(r)) if r == root => {}, - (_, Some(r)) => - panic!("StorageRootHash should be the same every time: {:?} vs {:?}", r, root), - (_, None) => panic!("StorageRootHash should be Some after the first iteration"), - } - - // Also test that everything is reset correctly: - sp_io::storage::set(b"key1", b"value"); - - #[block] - { - sp_io::storage::set(b"key2", b"value"); - } - - sp_io::storage::set(b"key3", b"value"); - } - impl_benchmark_test_suite!(Pallet, super::mock::new_test_ext(), super::mock::Test,); } diff --git a/substrate/frame/benchmarking/src/v1.rs b/substrate/frame/benchmarking/src/v1.rs index b7461f7cb4783..8f516f4e17524 100644 --- a/substrate/frame/benchmarking/src/v1.rs +++ b/substrate/frame/benchmarking/src/v1.rs @@ -1008,7 +1008,8 @@ macro_rules! impl_benchmark { $( (stringify!($pov_name).as_bytes().to_vec(), $crate::__private::vec![ - $( ( stringify!($storage).as_bytes().to_vec(), + // Stringify sometimes includes spaces, depending on the Rust version. + $( ( stringify!($storage).replace(" ", "").as_bytes().to_vec(), stringify!($pov_mode).as_bytes().to_vec() ), )* ]), )* @@ -1039,6 +1040,7 @@ macro_rules! impl_benchmark { internal_repeats: u32, ) -> Result<$crate::__private::Vec<$crate::BenchmarkResult>, $crate::BenchmarkError> { // Map the input to the selected benchmark. + $crate::benchmarking::wipe_db(); let extrinsic = $crate::__private::str::from_utf8(extrinsic) .map_err(|_| "`extrinsic` is not a valid utf8 string!")?; let selected_benchmark = match extrinsic { diff --git a/substrate/frame/bounties/src/benchmarking.rs b/substrate/frame/bounties/src/benchmarking.rs index ca678057a9022..cd410e89d51b4 100644 --- a/substrate/frame/bounties/src/benchmarking.rs +++ b/substrate/frame/bounties/src/benchmarking.rs @@ -271,5 +271,29 @@ benchmarks_instance_pallet! { } } + poke_deposit { + // Create a bounty + let (caller, _, _, value, reason) = setup_bounty::(0, 5); // 5 bytes description + Bounties::::propose_bounty(RawOrigin::Signed(caller.clone()).into(), value, reason)?; + let bounty_id = BountyCount::::get() - 1; + let old_deposit = T::Currency::reserved_balance(&caller); + // Modify the description to be maximum length + let max_description: Vec = vec![0; T::MaximumReasonLength::get() as usize]; + let bounded_description: BoundedVec = max_description.try_into().unwrap(); + BountyDescriptions::::insert(bounty_id, &bounded_description); + + // Ensure caller has enough balance for new deposit + let new_deposit = Bounties::::calculate_bounty_deposit(&bounded_description); + let required_balance = new_deposit.saturating_add(minimum_balance::()); + T::Currency::make_free_balance_be(&caller, required_balance); + + }: _(RawOrigin::Signed(caller.clone()), bounty_id) + verify { + let bounty = crate::Bounties::::get(bounty_id).unwrap(); + assert_eq!(bounty.bond, new_deposit); + assert_eq!(T::Currency::reserved_balance(&caller), new_deposit); + assert_last_event::(Event::DepositPoked { bounty_id, proposer: caller, old_deposit: old_deposit, new_deposit: new_deposit }.into()); + } + impl_benchmark_test_suite!(Bounties, crate::tests::ExtBuilder::default().build(), crate::tests::Test) } diff --git a/substrate/frame/bounties/src/lib.rs b/substrate/frame/bounties/src/lib.rs index e95fd6addd551..f170b9960c21b 100644 --- a/substrate/frame/bounties/src/lib.rs +++ b/substrate/frame/bounties/src/lib.rs @@ -299,6 +299,8 @@ pub mod pallet { HasActiveChildBounty, /// Too many approvals are already queued. TooManyQueued, + /// User is not the proposer of the bounty. + NotProposer, } #[pallet::event] @@ -326,6 +328,13 @@ pub mod pallet { CuratorUnassigned { bounty_id: BountyIndex }, /// A bounty curator is accepted. CuratorAccepted { bounty_id: BountyIndex, curator: T::AccountId }, + /// A bounty deposit has been poked. + DepositPoked { + bounty_id: BountyIndex, + proposer: T::AccountId, + old_deposit: BalanceOf, + new_deposit: BalanceOf, + }, } /// Number of bounty proposals that have been made. @@ -885,6 +894,34 @@ pub mod pallet { Ok(()) } + + /// Poke the deposit reserved for creating a bounty proposal. + /// + /// This can be used by accounts to update their reserved amount. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Parameters: + /// - `bounty_id`: The bounty id for which to adjust the deposit. + /// + /// If the deposit is updated, the difference will be reserved/unreserved from the + /// proposer's account. + /// + /// The transaction is made free if the deposit is updated and paid otherwise. + /// + /// Emits `DepositPoked` if the deposit is updated. + #[pallet::call_index(10)] + #[pallet::weight(>::WeightInfo::poke_deposit())] + pub fn poke_deposit( + origin: OriginFor, + #[pallet::compact] bounty_id: BountyIndex, + ) -> DispatchResultWithPostInfo { + ensure_signed(origin)?; + + let deposit_updated = Self::poke_bounty_deposit(bounty_id)?; + + Ok(if deposit_updated { Pays::No } else { Pays::Yes }.into()) + } } #[pallet::hooks] @@ -986,8 +1023,7 @@ impl, I: 'static> Pallet { let index = BountyCount::::get(); // reserve deposit for new bounty - let bond = T::BountyDepositBase::get() + - T::DataDepositPerByte::get() * (bounded_description.len() as u32).into(); + let bond = Self::calculate_bounty_deposit(&bounded_description); T::Currency::reserve(&proposer, bond) .map_err(|_| Error::::InsufficientProposersBalance)?; @@ -1009,6 +1045,56 @@ impl, I: 'static> Pallet { Ok(()) } + + /// Helper function to calculate the bounty storage deposit. + fn calculate_bounty_deposit( + description: &BoundedVec, + ) -> BalanceOf { + T::BountyDepositBase::get().saturating_add( + T::DataDepositPerByte::get().saturating_mul((description.len() as u32).into()), + ) + } + + /// Helper function to poke the deposit reserved for proposing a bounty. + /// + /// Returns true if the deposit was updated and false otherwise. + fn poke_bounty_deposit(bounty_id: BountyIndex) -> Result { + let mut bounty = Bounties::::get(bounty_id).ok_or(Error::::InvalidIndex)?; + let bounty_description = + BountyDescriptions::::get(bounty_id).ok_or(Error::::InvalidIndex)?; + // ensure that the bounty status is proposed. + ensure!(bounty.status == BountyStatus::Proposed, Error::::UnexpectedStatus); + + let new_bond = Self::calculate_bounty_deposit(&bounty_description); + let old_bond = bounty.bond; + if new_bond == old_bond { + return Ok(false); + } + if new_bond > old_bond { + let extra = new_bond.saturating_sub(old_bond); + T::Currency::reserve(&bounty.proposer, extra)?; + } else { + let excess = old_bond.saturating_sub(new_bond); + let remaining_unreserved = T::Currency::unreserve(&bounty.proposer, excess); + if !remaining_unreserved.is_zero() { + defensive!( + "Failed to unreserve full amount. (Requested, Actual)", + (excess, excess.saturating_sub(remaining_unreserved)) + ); + } + } + bounty.bond = new_bond; + Bounties::::insert(bounty_id, &bounty); + + Self::deposit_event(Event::::DepositPoked { + bounty_id, + proposer: bounty.proposer, + old_deposit: old_bond, + new_deposit: new_bond, + }); + + Ok(true) + } } impl, I: 'static> pallet_treasury::SpendFunds for Pallet { diff --git a/substrate/frame/bounties/src/tests.rs b/substrate/frame/bounties/src/tests.rs index 4b74c4162c30b..ed6b60869135c 100644 --- a/substrate/frame/bounties/src/tests.rs +++ b/substrate/frame/bounties/src/tests.rs @@ -138,6 +138,7 @@ parameter_types! { pub const CuratorDepositMax: Balance = 1_000; pub const CuratorDepositMin: Balance = 3; pub static BountyUpdatePeriod: u64 = 20; + pub static DataDepositPerByte: u64 = 1; } impl Config for Test { @@ -149,7 +150,7 @@ impl Config for Test { type CuratorDepositMax = CuratorDepositMax; type CuratorDepositMin = CuratorDepositMin; type BountyValueMinimum = ConstU64<1>; - type DataDepositPerByte = ConstU64<1>; + type DataDepositPerByte = DataDepositPerByte; type MaximumReasonLength = ConstU32<16384>; type WeightInfo = (); type ChildBountyManager = (); @@ -165,7 +166,7 @@ impl Config for Test { type CuratorDepositMax = CuratorDepositMax; type CuratorDepositMin = CuratorDepositMin; type BountyValueMinimum = ConstU64<1>; - type DataDepositPerByte = ConstU64<1>; + type DataDepositPerByte = DataDepositPerByte; type MaximumReasonLength = ConstU32<16384>; type WeightInfo = (); type ChildBountyManager = (); @@ -1481,3 +1482,360 @@ fn accept_curator_sets_update_due_correctly() { ); }); } + +#[test] +fn poke_deposit_fails_for_insufficient_balance() { + ExtBuilder::default().build_and_execute(|| { + // Create a description for the bounty + let description = b"12345".to_vec(); + let bounded_description = description.clone().try_into().unwrap(); + // Create a bounty + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, description.clone())); + + // BountyDepositBase (80) + DataDepositPerByte (1) * description.len() (5) + let deposit = + pallet_bounties::Pallet::::calculate_bounty_deposit(&bounded_description); + + // Verify initial state + assert_eq!(Balances::reserved_balance(0), deposit); + assert_eq!(Balances::free_balance(0), 100 - deposit); + assert_eq!(last_event(), BountiesEvent::BountyProposed { index: 0 }); + assert_eq!( + pallet_bounties::Bounties::::get(0).unwrap(), + Bounty { + proposer: 0, + value: 50, + fee: 0, + curator_deposit: 0, + bond: deposit, + status: BountyStatus::Proposed, + } + ); + assert_eq!(pallet_bounties::BountyDescriptions::::get(0).unwrap(), description); + assert_eq!(pallet_bounties::BountyCount::::get(), 1); + + // Increase the DataDepositPerByte to be more than the total balance of the proposer + DataDepositPerByte::set(20); + + // Poke deposit should fail due to insufficient balance + assert_noop!( + Bounties::poke_deposit(RuntimeOrigin::signed(0), 0), + pallet_balances::Error::::InsufficientBalance + ); + }); +} + +#[test] +fn poke_deposit_fails_for_unsigned_origin() { + ExtBuilder::default().build_and_execute(|| { + assert_noop!(Bounties::poke_deposit(RuntimeOrigin::none(), 0), DispatchError::BadOrigin); + }); +} + +#[test] +fn poke_deposit_fails_for_non_existent_bounty() { + ExtBuilder::default().build_and_execute(|| { + assert_noop!( + Bounties::poke_deposit(RuntimeOrigin::signed(0), 0), + Error::::InvalidIndex + ); + }); +} + +#[test] +fn poke_deposit_fails_for_any_status_other_than_proposed() { + ExtBuilder::default().build_and_execute(|| { + let bounty_id = 0; + let proposer = 0; + let curator = 4; + let deposit = 85; + let mut bounty = Bounty { + proposer, + value: 50, + fee: 0, + curator_deposit: 0, + bond: deposit, + status: BountyStatus::Proposed, + }; + let description = b"12345".to_vec(); + assert_ok!(Bounties::propose_bounty( + RuntimeOrigin::signed(proposer), + 50, + description.clone() + )); + + // Verify initial state + assert_eq!(Balances::reserved_balance(proposer), deposit); + assert_eq!(Balances::reserved_balance(1), 0); + assert_eq!(Balances::free_balance(proposer), 100 - deposit); + assert_eq!(last_event(), BountiesEvent::BountyProposed { index: bounty_id }); + assert_eq!(pallet_bounties::Bounties::::get(0).unwrap(), bounty); + assert_eq!(pallet_bounties::BountyDescriptions::::get(0).unwrap(), description); + assert_eq!(pallet_bounties::BountyCount::::get(), 1); + + // Change status to approved + bounty.status = BountyStatus::Approved; + pallet_bounties::Bounties::::insert(bounty_id, &bounty); + // Poke deposit should fail due to invalid status + assert_noop!( + Bounties::poke_deposit(RuntimeOrigin::signed(proposer), bounty_id), + Error::::UnexpectedStatus + ); + + // Change status to funded + bounty.status = BountyStatus::Funded; + pallet_bounties::Bounties::::insert(bounty_id, &bounty); + // Poke deposit should fail due to invalid status + assert_noop!( + Bounties::poke_deposit(RuntimeOrigin::signed(proposer), bounty_id), + Error::::UnexpectedStatus + ); + + // Change status to curator proposed + bounty.status = BountyStatus::CuratorProposed { curator }; + pallet_bounties::Bounties::::insert(bounty_id, &bounty); + // Poke deposit should fail due to invalid status + assert_noop!( + Bounties::poke_deposit(RuntimeOrigin::signed(proposer), bounty_id), + Error::::UnexpectedStatus + ); + + // Change status to active + bounty.status = BountyStatus::Active { curator, update_due: 24 }; + pallet_bounties::Bounties::::insert(bounty_id, &bounty); + // Poke deposit should fail due to invalid status + assert_noop!( + Bounties::poke_deposit(RuntimeOrigin::signed(proposer), bounty_id), + Error::::UnexpectedStatus + ); + + // Change status to PendingPayout + bounty.status = BountyStatus::PendingPayout { curator, beneficiary: 0, unlock_at: 24 }; + pallet_bounties::Bounties::::insert(bounty_id, &bounty); + // Poke deposit should fail due to invalid status + assert_noop!( + Bounties::poke_deposit(RuntimeOrigin::signed(proposer), bounty_id), + Error::::UnexpectedStatus + ); + + // Change status to ApprovedWithCurator + bounty.status = BountyStatus::ApprovedWithCurator { curator }; + pallet_bounties::Bounties::::insert(bounty_id, &bounty); + // Poke deposit should fail due to invalid status + assert_noop!( + Bounties::poke_deposit(RuntimeOrigin::signed(proposer), bounty_id), + Error::::UnexpectedStatus + ); + }); +} + +#[test] +fn poke_deposit_works_and_charges_fee_for_unchanged_deposit() { + ExtBuilder::default().build_and_execute(|| { + let bounty_id = 0; + let proposer = 0; + let description = b"12345".to_vec(); + let bounded_description = description.clone().try_into().unwrap(); + let deposit = Bounties::calculate_bounty_deposit(&bounded_description); + let bounty = Bounty { + proposer, + value: 50, + fee: 0, + curator_deposit: 0, + bond: deposit, + status: BountyStatus::Proposed, + }; + assert_ok!(Bounties::propose_bounty( + RuntimeOrigin::signed(proposer), + 50, + description.clone() + )); + + // Verify initial state + assert_eq!(Balances::reserved_balance(proposer), deposit); + assert_eq!(Balances::free_balance(proposer), 100 - deposit); + assert_eq!(last_event(), BountiesEvent::BountyProposed { index: bounty_id }); + assert_eq!(pallet_bounties::Bounties::::get(0).unwrap(), bounty); + assert_eq!(pallet_bounties::BountyDescriptions::::get(0).unwrap(), description); + assert_eq!(pallet_bounties::BountyCount::::get(), 1); + + // Poke deposit should charge fee + let result = Bounties::poke_deposit(RuntimeOrigin::signed(proposer), bounty_id); + assert_ok!(result.as_ref()); + assert_eq!(result.unwrap(), Pays::Yes.into()); + + // Verify final state + assert_eq!(Balances::reserved_balance(proposer), deposit); + assert_eq!(pallet_bounties::Bounties::::get(0).unwrap(), bounty); + assert_eq!(pallet_bounties::BountyDescriptions::::get(0).unwrap(), description); + assert_eq!(pallet_bounties::BountyCount::::get(), 1); + assert_eq!(last_event(), BountiesEvent::BountyProposed { index: bounty_id }); + }); +} + +#[test] +fn poke_deposit_works_for_deposit_increase() { + ExtBuilder::default().build_and_execute(|| { + let bounty_id = 0; + let proposer = 0; + let description = b"12345".to_vec(); + let bounded_description = description.clone().try_into().unwrap(); + let deposit = Bounties::calculate_bounty_deposit(&bounded_description); + let mut bounty = Bounty { + proposer, + value: 50, + fee: 0, + curator_deposit: 0, + bond: deposit, + status: BountyStatus::Proposed, + }; + assert_ok!(Bounties::propose_bounty( + RuntimeOrigin::signed(proposer), + 50, + description.clone() + )); + + // Verify initial state + assert_eq!(Balances::reserved_balance(proposer), deposit); + assert_eq!(Balances::free_balance(proposer), 100 - deposit); + assert_eq!(last_event(), BountiesEvent::BountyProposed { index: bounty_id }); + assert_eq!(pallet_bounties::Bounties::::get(0).unwrap(), bounty); + assert_eq!(pallet_bounties::BountyDescriptions::::get(0).unwrap(), description); + assert_eq!(pallet_bounties::BountyCount::::get(), 1); + + // Increase the DataDepositPerByte + DataDepositPerByte::set(2); + // BountyDepositBase (80) + DataDepositPerByte (2) * description.len() (5) + let new_deposit = Bounties::calculate_bounty_deposit(&bounded_description); + + // Poke deposit should increase reserve + let result = Bounties::poke_deposit(RuntimeOrigin::signed(proposer), bounty_id); + assert_ok!(result.as_ref()); + assert_eq!(result.unwrap(), Pays::No.into()); + + // Verify final state + assert_eq!(Balances::reserved_balance(proposer), new_deposit); + assert_eq!(Balances::free_balance(proposer), 100 - new_deposit); + assert_eq!( + last_event(), + BountiesEvent::DepositPoked { bounty_id, proposer, old_deposit: deposit, new_deposit } + ); + bounty.bond = new_deposit; + assert_eq!(pallet_bounties::Bounties::::get(0).unwrap(), bounty); + assert_eq!(pallet_bounties::BountyDescriptions::::get(0).unwrap(), description); + assert_eq!(pallet_bounties::BountyCount::::get(), 1); + }); +} + +#[test] +fn poke_deposit_works_for_deposit_decrease() { + ExtBuilder::default().build_and_execute(|| { + let bounty_id = 0; + let proposer = 0; + let description = b"12345".to_vec(); + let bounded_description = description.clone().try_into().unwrap(); + DataDepositPerByte::set(2); + let deposit = Bounties::calculate_bounty_deposit(&bounded_description); + let mut bounty = Bounty { + proposer, + value: 50, + fee: 0, + curator_deposit: 0, + bond: deposit, + status: BountyStatus::Proposed, + }; + + assert_ok!(Bounties::propose_bounty( + RuntimeOrigin::signed(proposer), + 50, + description.clone() + )); + + // Verify initial state + assert_eq!(Balances::reserved_balance(proposer), deposit); + assert_eq!(Balances::free_balance(proposer), 100 - deposit); + assert_eq!(last_event(), BountiesEvent::BountyProposed { index: bounty_id }); + assert_eq!(pallet_bounties::Bounties::::get(0).unwrap(), bounty); + assert_eq!(pallet_bounties::BountyDescriptions::::get(0).unwrap(), description); + assert_eq!(pallet_bounties::BountyCount::::get(), 1); + + // Decrease the DataDepositPerByte + DataDepositPerByte::set(1); + // BountyDepositBase (80) + DataDepositPerByte (2) * description.len() (5) + let new_deposit = Bounties::calculate_bounty_deposit(&bounded_description); + + // Poke deposit should increase reserve + let result = Bounties::poke_deposit(RuntimeOrigin::signed(proposer), bounty_id); + assert_ok!(result.as_ref()); + assert_eq!(result.unwrap(), Pays::No.into()); + + // Verify final state + assert_eq!(Balances::reserved_balance(proposer), new_deposit); + assert_eq!(Balances::free_balance(proposer), 100 - new_deposit); + assert_eq!( + last_event(), + BountiesEvent::DepositPoked { bounty_id, proposer, old_deposit: deposit, new_deposit } + ); + bounty.bond = new_deposit; + assert_eq!(pallet_bounties::Bounties::::get(0).unwrap(), bounty); + assert_eq!(pallet_bounties::BountyDescriptions::::get(0).unwrap(), description); + assert_eq!(pallet_bounties::BountyCount::::get(), 1); + }); +} + +#[test] +fn poke_deposit_works_for_non_proposer() { + ExtBuilder::default().build_and_execute(|| { + let bounty_id = 0; + let proposer = 0; + let non_proposer = 1; + let description = b"12345".to_vec(); + let bounded_description = description.clone().try_into().unwrap(); + + DataDepositPerByte::set(2); + let deposit = Bounties::calculate_bounty_deposit(&bounded_description); + let mut bounty = Bounty { + proposer, + value: 50, + fee: 0, + curator_deposit: 0, + bond: deposit, + status: BountyStatus::Proposed, + }; + assert_ok!(Bounties::propose_bounty( + RuntimeOrigin::signed(proposer), + 50, + description.clone() + )); + + // Verify initial state + assert_eq!(Balances::reserved_balance(proposer), deposit); + assert_eq!(Balances::free_balance(proposer), 100 - deposit); + assert_eq!(last_event(), BountiesEvent::BountyProposed { index: bounty_id }); + assert_eq!(pallet_bounties::Bounties::::get(0).unwrap(), bounty); + assert_eq!(pallet_bounties::BountyDescriptions::::get(0).unwrap(), description); + assert_eq!(pallet_bounties::BountyCount::::get(), 1); + + // Decrease the DataDepositPerByte + DataDepositPerByte::set(1); + // BountyDepositBase (80) + DataDepositPerByte (2) * description.len() (5) + let new_deposit = Bounties::calculate_bounty_deposit(&bounded_description); + + // Poke deposit should increase reserve + let result = Bounties::poke_deposit(RuntimeOrigin::signed(non_proposer), bounty_id); + assert_ok!(result.as_ref()); + assert_eq!(result.unwrap(), Pays::No.into()); + + // Verify final state + assert_eq!(Balances::reserved_balance(proposer), new_deposit); + assert_eq!(Balances::free_balance(proposer), 100 - new_deposit); + assert_eq!( + last_event(), + BountiesEvent::DepositPoked { bounty_id, proposer, old_deposit: deposit, new_deposit } + ); + bounty.bond = new_deposit; + assert_eq!(pallet_bounties::Bounties::::get(0).unwrap(), bounty); + assert_eq!(pallet_bounties::BountyDescriptions::::get(0).unwrap(), description); + assert_eq!(pallet_bounties::BountyCount::::get(), 1); + }); +} diff --git a/substrate/frame/bounties/src/weights.rs b/substrate/frame/bounties/src/weights.rs index 4f0c50f519a13..e8777490cf062 100644 --- a/substrate/frame/bounties/src/weights.rs +++ b/substrate/frame/bounties/src/weights.rs @@ -35,9 +35,9 @@ //! Autogenerated weights for `pallet_bounties` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-04-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `4563561839a5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `b5f9d80cc353`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` // Executed Command: @@ -58,8 +58,7 @@ // --no-storage-info // --no-min-squares // --no-median-slopes -// --genesis-builder-policy=none -// --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage,pallet_election_provider_multi_block,pallet_election_provider_multi_block::signed,pallet_election_provider_multi_block::unsigned,pallet_election_provider_multi_block::verifier +// --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -84,6 +83,7 @@ pub trait WeightInfo { fn close_bounty_active() -> Weight; fn extend_bounty_expiry() -> Weight; fn spend_funds(b: u32, ) -> Weight; + fn poke_deposit() -> Weight; } /// Weights for `pallet_bounties` using the Substrate node and recommended hardware. @@ -100,12 +100,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `d` is `[0, 300]`. fn propose_bounty(d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `51` + // Measured: `342` // Estimated: `3593` - // Minimum execution time: 22_358_000 picoseconds. - Weight::from_parts(23_077_136, 3593) - // Standard Error: 95 - .saturating_add(Weight::from_parts(304, 0).saturating_mul(d.into())) + // Minimum execution time: 26_048_000 picoseconds. + Weight::from_parts(27_397_398, 3593) + // Standard Error: 189 + .saturating_add(Weight::from_parts(1_244, 0).saturating_mul(d.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -115,10 +115,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn approve_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `177` + // Measured: `434` // Estimated: `3642` - // Minimum execution time: 12_172_000 picoseconds. - Weight::from_parts(12_752_000, 3642) + // Minimum execution time: 13_955_000 picoseconds. + Weight::from_parts(14_367_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -126,10 +126,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `196` + // Measured: `454` // Estimated: `3642` - // Minimum execution time: 12_867_000 picoseconds. - Weight::from_parts(13_388_000, 3642) + // Minimum execution time: 15_789_000 picoseconds. + Weight::from_parts(16_314_000, 3642) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -139,10 +139,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn approve_bounty_with_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `176` + // Measured: `434` // Estimated: `3642` - // Minimum execution time: 14_120_000 picoseconds. - Weight::from_parts(14_539_000, 3642) + // Minimum execution time: 18_509_000 picoseconds. + Weight::from_parts(19_084_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -152,10 +152,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `334` + // Measured: `630` // Estimated: `3642` - // Minimum execution time: 37_561_000 picoseconds. - Weight::from_parts(38_471_000, 3642) + // Minimum execution time: 41_340_000 picoseconds. + Weight::from_parts(42_360_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -165,10 +165,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `330` + // Measured: `626` // Estimated: `3642` - // Minimum execution time: 28_085_000 picoseconds. - Weight::from_parts(29_224_000, 3642) + // Minimum execution time: 31_637_000 picoseconds. + Weight::from_parts(32_719_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -178,10 +178,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn award_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `232` + // Measured: `638` // Estimated: `3642` - // Minimum execution time: 14_694_000 picoseconds. - Weight::from_parts(15_430_000, 3642) + // Minimum execution time: 20_654_000 picoseconds. + Weight::from_parts(21_297_000, 3642) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -199,10 +199,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn claim_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `469` + // Measured: `1036` // Estimated: `8799` - // Minimum execution time: 106_782_000 picoseconds. - Weight::from_parts(109_479_000, 8799) + // Minimum execution time: 113_370_000 picoseconds. + Weight::from_parts(115_652_000, 8799) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) } @@ -216,10 +216,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_bounty_proposed() -> Weight { // Proof Size summary in bytes: - // Measured: `227` + // Measured: `682` // Estimated: `3642` - // Minimum execution time: 36_755_000 picoseconds. - Weight::from_parts(37_464_000, 3642) + // Minimum execution time: 44_456_000 picoseconds. + Weight::from_parts(45_490_000, 3642) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -235,10 +235,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `ChildBounties::ParentTotalChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn close_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `437` + // Measured: `952` // Estimated: `6196` - // Minimum execution time: 74_374_000 picoseconds. - Weight::from_parts(75_148_000, 6196) + // Minimum execution time: 80_493_000 picoseconds. + Weight::from_parts(82_826_000, 6196) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -246,10 +246,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn extend_bounty_expiry() -> Weight { // Proof Size summary in bytes: - // Measured: `232` + // Measured: `490` // Estimated: `3642` - // Minimum execution time: 13_524_000 picoseconds. - Weight::from_parts(13_905_000, 3642) + // Minimum execution time: 16_142_000 picoseconds. + Weight::from_parts(17_033_000, 3642) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -262,18 +262,33 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 100]`. fn spend_funds(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + b * (293 ±0)` + // Measured: `71 + b * (298 ±0)` // Estimated: `1887 + b * (5206 ±0)` - // Minimum execution time: 1_076_000 picoseconds. - Weight::from_parts(1_160_000, 1887) - // Standard Error: 8_955 - .saturating_add(Weight::from_parts(35_597_841, 0).saturating_mul(b.into())) + // Minimum execution time: 3_407_000 picoseconds. + Weight::from_parts(3_479_000, 1887) + // Standard Error: 7_458 + .saturating_add(Weight::from_parts(34_548_145, 0).saturating_mul(b.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 5206).saturating_mul(b.into())) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:1 w:0) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `839` + // Estimated: `3779` + // Minimum execution time: 32_478_000 picoseconds. + Weight::from_parts(34_043_000, 3779) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } } // For backwards compatibility and tests. @@ -289,12 +304,12 @@ impl WeightInfo for () { /// The range of component `d` is `[0, 300]`. fn propose_bounty(d: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `51` + // Measured: `342` // Estimated: `3593` - // Minimum execution time: 22_358_000 picoseconds. - Weight::from_parts(23_077_136, 3593) - // Standard Error: 95 - .saturating_add(Weight::from_parts(304, 0).saturating_mul(d.into())) + // Minimum execution time: 26_048_000 picoseconds. + Weight::from_parts(27_397_398, 3593) + // Standard Error: 189 + .saturating_add(Weight::from_parts(1_244, 0).saturating_mul(d.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -304,10 +319,10 @@ impl WeightInfo for () { /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn approve_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `177` + // Measured: `434` // Estimated: `3642` - // Minimum execution time: 12_172_000 picoseconds. - Weight::from_parts(12_752_000, 3642) + // Minimum execution time: 13_955_000 picoseconds. + Weight::from_parts(14_367_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -315,10 +330,10 @@ impl WeightInfo for () { /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn propose_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `196` + // Measured: `454` // Estimated: `3642` - // Minimum execution time: 12_867_000 picoseconds. - Weight::from_parts(13_388_000, 3642) + // Minimum execution time: 15_789_000 picoseconds. + Weight::from_parts(16_314_000, 3642) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -328,10 +343,10 @@ impl WeightInfo for () { /// Proof: `Bounties::BountyApprovals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) fn approve_bounty_with_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `176` + // Measured: `434` // Estimated: `3642` - // Minimum execution time: 14_120_000 picoseconds. - Weight::from_parts(14_539_000, 3642) + // Minimum execution time: 18_509_000 picoseconds. + Weight::from_parts(19_084_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -341,10 +356,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn unassign_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `334` + // Measured: `630` // Estimated: `3642` - // Minimum execution time: 37_561_000 picoseconds. - Weight::from_parts(38_471_000, 3642) + // Minimum execution time: 41_340_000 picoseconds. + Weight::from_parts(42_360_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -354,10 +369,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn accept_curator() -> Weight { // Proof Size summary in bytes: - // Measured: `330` + // Measured: `626` // Estimated: `3642` - // Minimum execution time: 28_085_000 picoseconds. - Weight::from_parts(29_224_000, 3642) + // Minimum execution time: 31_637_000 picoseconds. + Weight::from_parts(32_719_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -367,10 +382,10 @@ impl WeightInfo for () { /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn award_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `232` + // Measured: `638` // Estimated: `3642` - // Minimum execution time: 14_694_000 picoseconds. - Weight::from_parts(15_430_000, 3642) + // Minimum execution time: 20_654_000 picoseconds. + Weight::from_parts(21_297_000, 3642) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -388,10 +403,10 @@ impl WeightInfo for () { /// Proof: `ChildBounties::ParentChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn claim_bounty() -> Weight { // Proof Size summary in bytes: - // Measured: `469` + // Measured: `1036` // Estimated: `8799` - // Minimum execution time: 106_782_000 picoseconds. - Weight::from_parts(109_479_000, 8799) + // Minimum execution time: 113_370_000 picoseconds. + Weight::from_parts(115_652_000, 8799) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) } @@ -405,10 +420,10 @@ impl WeightInfo for () { /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) fn close_bounty_proposed() -> Weight { // Proof Size summary in bytes: - // Measured: `227` + // Measured: `682` // Estimated: `3642` - // Minimum execution time: 36_755_000 picoseconds. - Weight::from_parts(37_464_000, 3642) + // Minimum execution time: 44_456_000 picoseconds. + Weight::from_parts(45_490_000, 3642) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -424,10 +439,10 @@ impl WeightInfo for () { /// Proof: `ChildBounties::ParentTotalChildBounties` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) fn close_bounty_active() -> Weight { // Proof Size summary in bytes: - // Measured: `437` + // Measured: `952` // Estimated: `6196` - // Minimum execution time: 74_374_000 picoseconds. - Weight::from_parts(75_148_000, 6196) + // Minimum execution time: 80_493_000 picoseconds. + Weight::from_parts(82_826_000, 6196) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -435,10 +450,10 @@ impl WeightInfo for () { /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) fn extend_bounty_expiry() -> Weight { // Proof Size summary in bytes: - // Measured: `232` + // Measured: `490` // Estimated: `3642` - // Minimum execution time: 13_524_000 picoseconds. - Weight::from_parts(13_905_000, 3642) + // Minimum execution time: 16_142_000 picoseconds. + Weight::from_parts(17_033_000, 3642) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -451,16 +466,31 @@ impl WeightInfo for () { /// The range of component `b` is `[0, 100]`. fn spend_funds(b: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `0 + b * (293 ±0)` + // Measured: `71 + b * (298 ±0)` // Estimated: `1887 + b * (5206 ±0)` - // Minimum execution time: 1_076_000 picoseconds. - Weight::from_parts(1_160_000, 1887) - // Standard Error: 8_955 - .saturating_add(Weight::from_parts(35_597_841, 0).saturating_mul(b.into())) + // Minimum execution time: 3_407_000 picoseconds. + Weight::from_parts(3_479_000, 1887) + // Standard Error: 7_458 + .saturating_add(Weight::from_parts(34_548_145, 0).saturating_mul(b.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(b.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 5206).saturating_mul(b.into())) } + /// Storage: `Bounties::Bounties` (r:1 w:1) + /// Proof: `Bounties::Bounties` (`max_values`: None, `max_size`: Some(177), added: 2652, mode: `MaxEncodedLen`) + /// Storage: `Bounties::BountyDescriptions` (r:1 w:0) + /// Proof: `Bounties::BountyDescriptions` (`max_values`: None, `max_size`: Some(314), added: 2789, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `839` + // Estimated: `3779` + // Minimum execution time: 32_478_000 picoseconds. + Weight::from_parts(34_043_000, 3779) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } } diff --git a/substrate/frame/election-provider-multi-block/src/mock/mod.rs b/substrate/frame/election-provider-multi-block/src/mock/mod.rs index 1a208c8bb46a7..0513e01270141 100644 --- a/substrate/frame/election-provider-multi-block/src/mock/mod.rs +++ b/substrate/frame/election-provider-multi-block/src/mock/mod.rs @@ -309,11 +309,11 @@ where type Extrinsic = Extrinsic; } -impl frame_system::offchain::CreateInherent for Runtime +impl frame_system::offchain::CreateBare for Runtime where RuntimeCall: From, { - fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + fn create_bare(call: Self::RuntimeCall) -> Self::Extrinsic { Extrinsic::new_bare(call) } } diff --git a/substrate/frame/election-provider-multi-block/src/unsigned/miner.rs b/substrate/frame/election-provider-multi-block/src/unsigned/miner.rs index bafc78cd9d6ed..bef8f86cfa1bc 100644 --- a/substrate/frame/election-provider-multi-block/src/unsigned/miner.rs +++ b/substrate/frame/election-provider-multi-block/src/unsigned/miner.rs @@ -785,7 +785,7 @@ impl OffchainWorkerMiner { "unsigned::ocw-miner", "miner submitting a solution as an unsigned transaction" ); - let xt = T::create_inherent(call.into()); + let xt = T::create_bare(call.into()); frame_system::offchain::SubmitTransaction::>::submit_transaction(xt) .map(|_| { sublog!( @@ -1036,7 +1036,7 @@ mod trimming { (40, Support { total: 40, voters: vec![(40, 40)] }) ], vec![ - (30, Support { total: 11, voters: vec![(7, 7), (5, 2), (6, 2)] }), + (30, Support { total: 11, voters: vec![(5, 2), (6, 2), (7, 7)] }), (40, Support { total: 7, voters: vec![(5, 3), (6, 4)] }) ], vec![(40, Support { total: 9, voters: vec![(2, 2), (3, 3), (4, 4)] })] @@ -1080,7 +1080,7 @@ mod trimming { // page only. vec![(40, Support { total: 40, voters: vec![(40, 40)] })], vec![ - (30, Support { total: 11, voters: vec![(7, 7), (5, 2), (6, 2)] }), + (30, Support { total: 11, voters: vec![(5, 2), (6, 2), (7, 7)] }), (40, Support { total: 7, voters: vec![(5, 3), (6, 4)] }) ], vec![(40, Support { total: 9, voters: vec![(2, 2), (3, 3), (4, 4)] })] @@ -1122,7 +1122,7 @@ mod trimming { vec![ vec![], vec![ - (30, Support { total: 11, voters: vec![(7, 7), (5, 2), (6, 2)] }), + (30, Support { total: 11, voters: vec![(5, 2), (6, 2), (7, 7)] }), (40, Support { total: 7, voters: vec![(5, 3), (6, 4)] }) ], vec![(40, Support { total: 9, voters: vec![(2, 2), (3, 3), (4, 4)] })] @@ -1160,11 +1160,11 @@ mod trimming { assert!(VerifierPallet::queued_score().is_some()); assert_eq!( - dbg!(supports), + supports, vec![ vec![], vec![ - (30, Support { total: 9, voters: vec![(7, 7), (6, 2)] }), + (30, Support { total: 9, voters: vec![(6, 2), (7, 7)] }), (40, Support { total: 4, voters: vec![(6, 4)] }) ], vec![(40, Support { total: 9, voters: vec![(2, 2), (3, 3), (4, 4)] })] @@ -1203,7 +1203,7 @@ mod trimming { (40, Support { total: 40, voters: vec![(40, 40)] }) ], vec![ - (30, Support { total: 9, voters: vec![(7, 7), (6, 2)] }), + (30, Support { total: 9, voters: vec![(6, 2), (7, 7)] }), (40, Support { total: 9, voters: vec![(5, 5), (6, 4)] }) /* notice how * 5's stake is * re-distributed @@ -1287,7 +1287,7 @@ mod trimming { (40, Support { total: 40, voters: vec![(40, 40)] }) ], vec![ - (30, Support { total: 14, voters: vec![(5, 5), (7, 7), (6, 2)] }), + (30, Support { total: 14, voters: vec![(5, 5), (6, 2), (7, 7)] }), (40, Support { total: 4, voters: vec![(6, 4)] }) ], vec![(40, Support { total: 7, voters: vec![(3, 3), (4, 4)] })] @@ -1412,12 +1412,12 @@ mod base_miner { assert_eq!( supports, vec![vec![ - (10, Support { total: 30, voters: vec![(1, 10), (8, 10), (4, 5), (5, 5)] }), + (10, Support { total: 30, voters: vec![(1, 10), (4, 5), (5, 5), (8, 10)] }), ( 40, Support { total: 40, - voters: vec![(2, 10), (3, 10), (6, 10), (4, 5), (5, 5)] + voters: vec![(2, 10), (3, 10), (4, 5), (5, 5), (6, 10)] } ) ]] @@ -1472,7 +1472,7 @@ mod base_miner { // voter 6 (index 1) is backing 40 (index 3). // voter 8 (index 3) is backing 10 (index 0) votes1: vec![(1, 3), (3, 0)], - // voter 5 (index 0) is backing 40 (index 10) and 10 (index 0) + // voter 5 (index 0) is backing 40 (index 3) and 10 (index 0) votes2: vec![(0, [(0, PerU16::from_parts(32768))], 3)], ..Default::default() }, @@ -1500,8 +1500,8 @@ mod base_miner { vec![ // page0, supports from voters 5, 6, 7, 8 vec![ - (10, Support { total: 15, voters: vec![(8, 10), (5, 5)] }), - (40, Support { total: 15, voters: vec![(6, 10), (5, 5)] }) + (10, Support { total: 15, voters: vec![(5, 5), (8, 10)] }), + (40, Support { total: 15, voters: vec![(5, 5), (6, 10)] }) ], // page1 supports from voters 1, 2, 3, 4 vec![ @@ -1591,13 +1591,13 @@ mod base_miner { ], // page 1: 5, 6, 7, 8 vec![ - (30, Support { total: 20, voters: vec![(7, 10), (5, 5), (6, 5)] }), + (30, Support { total: 20, voters: vec![(5, 5), (6, 5), (7, 10)] }), (40, Support { total: 10, voters: vec![(5, 5), (6, 5)] }) ], // page 2: 1, 2, 3, 4 vec![ (30, Support { total: 5, voters: vec![(2, 5)] }), - (40, Support { total: 25, voters: vec![(3, 10), (4, 10), (2, 5)] }) + (40, Support { total: 25, voters: vec![(2, 5), (3, 10), (4, 10)] }) ] ] .try_from_unbounded_paged() @@ -1761,8 +1761,8 @@ mod base_miner { vec![], // supports from voters 5, 6, 7, 8 vec![ - (10, Support { total: 15, voters: vec![(8, 10), (5, 5)] }), - (40, Support { total: 15, voters: vec![(6, 10), (5, 5)] }) + (10, Support { total: 15, voters: vec![(5, 5), (8, 10)] }), + (40, Support { total: 15, voters: vec![(5, 5), (6, 10)] }) ], // supports from voters 1, 2, 3, 4 vec![ diff --git a/substrate/frame/election-provider-multi-block/src/unsigned/mod.rs b/substrate/frame/election-provider-multi-block/src/unsigned/mod.rs index 18ba2370683ce..5ab23b9e965f8 100644 --- a/substrate/frame/election-provider-multi-block/src/unsigned/mod.rs +++ b/substrate/frame/election-provider-multi-block/src/unsigned/mod.rs @@ -89,7 +89,7 @@ mod pallet { CommonError, }; use frame_support::pallet_prelude::*; - use frame_system::{offchain::CreateInherent, pallet_prelude::*}; + use frame_system::{offchain::CreateBare, pallet_prelude::*}; use sp_runtime::traits::SaturatedConversion; use sp_std::prelude::*; @@ -104,7 +104,7 @@ mod pallet { #[pallet::config] #[pallet::disable_frame_system_supertrait_check] - pub trait Config: crate::Config + CreateInherent> { + pub trait Config: crate::Config + CreateBare> { /// The repeat threshold of the offchain worker. /// /// For example, if it is 5, that means that at least 5 blocks will elapse between attempts diff --git a/substrate/frame/election-provider-multi-block/src/verifier/impls.rs b/substrate/frame/election-provider-multi-block/src/verifier/impls.rs index 3ab08753cfac4..acc57475226a3 100644 --- a/substrate/frame/election-provider-multi-block/src/verifier/impls.rs +++ b/substrate/frame/election-provider-multi-block/src/verifier/impls.rs @@ -898,7 +898,8 @@ pub fn feasibility_check_page_inner_with_snapshot( let voter_index = helpers::voter_index_fn_usize::(&cache); // Then convert solution -> assignment. This will fail if any of the indices are - // gibberish. + // gibberish. It will also ensure each assignemnt (voter) is unique, and all targets within it + // are unique. let assignments = partial_solution .into_assignment(voter_at, target_at) .map_err::(Into::into)?; diff --git a/substrate/frame/election-provider-multi-block/src/verifier/tests.rs b/substrate/frame/election-provider-multi-block/src/verifier/tests.rs index 9dfc056881417..08f67f27d5fb5 100644 --- a/substrate/frame/election-provider-multi-block/src/verifier/tests.rs +++ b/substrate/frame/election-provider-multi-block/src/verifier/tests.rs @@ -28,7 +28,7 @@ use frame_election_provider_support::Support; use frame_support::{assert_noop, assert_ok}; use sp_core::bounded_vec; use sp_npos_elections::ElectionScore; -use sp_runtime::{traits::Bounded, Perbill}; +use sp_runtime::{traits::Bounded, PerU16, Perbill}; mod feasibility_check { use super::*; @@ -189,6 +189,52 @@ mod feasibility_check { }) } + #[test] + fn prevents_duplicate_voter_index() { + ExtBuilder::verifier().pages(1).build_and_execute(|| { + roll_to_snapshot_created(); + + // let's build a manual, bogus solution with duplicate voters, on top of page 0 of + // snapshot (see `mock/staking.rs`). + let faulty_page = TestNposSolution { + // voter index 0 is giving 100% of stake to target index 0 + votes1: vec![(0, 0)], + // and again 50% to target index 0 and target index 1. Both votes are "valid", + // as in they are in the snapshot. + votes2: vec![(0, [(0, PerU16::from_percent(50))], 1)], + ..Default::default() + }; + + assert_noop!( + VerifierPallet::feasibility_check_page_inner(faulty_page, 0), + FeasibilityError::NposElection( + frame_election_provider_support::Error::DuplicateVoter + ), + ); + }); + } + + #[test] + fn prevents_duplicate_target_index() { + ExtBuilder::verifier().pages(1).build_and_execute(|| { + roll_to_snapshot_created(); + + // A bad solution with duplicate targets for a single voter in votes2. + let faulty_page = TestNposSolution { + // 50% to 0, and then the rest to 0 again, not valid. + votes2: vec![(0, [(0, PerU16::from_percent(50))], 0)], + ..Default::default() + }; + + assert_noop!( + VerifierPallet::feasibility_check_page_inner(faulty_page, 0), + FeasibilityError::NposElection( + frame_election_provider_support::Error::DuplicateTarget + ), + ); + }); + } + #[test] fn heuristic_max_backers_per_winner_per_page() { ExtBuilder::verifier().max_backers_per_winner(2).build_and_execute(|| { diff --git a/substrate/frame/election-provider-multi-phase/Cargo.toml b/substrate/frame/election-provider-multi-phase/Cargo.toml index b67756ad20972..ec8adc28bc6f2 100644 --- a/substrate/frame/election-provider-multi-phase/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/Cargo.toml @@ -34,6 +34,11 @@ frame-benchmarking = { optional = true, workspace = true } rand = { features = ["alloc", "small_rng"], optional = true, workspace = true } strum = { features = ["derive"], optional = true, workspace = true } +# optional for remote testing +hex = { workspace = true, default-features = true, optional = true } +remote-externalities = { workspace = true, default-features = true, optional = true } +tokio = { features = ["macros"], workspace = true, default-features = true, optional = true } + [dev-dependencies] frame-benchmarking = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } @@ -42,7 +47,9 @@ rand = { workspace = true, default-features = true } sp-npos-elections = { workspace = true } sp-tracing = { workspace = true, default-features = true } + [features] +remote-mining = ["hex", "remote-externalities", "tokio"] default = ["std"] std = [ "codec/std", diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index 33e7777665762..f657475780bd9 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -257,7 +257,7 @@ use frame_support::{ weights::Weight, DefaultNoBound, EqNoBound, PartialEqNoBound, }; -use frame_system::{ensure_none, offchain::CreateInherent, pallet_prelude::BlockNumberFor}; +use frame_system::{ensure_none, offchain::CreateBare, pallet_prelude::BlockNumberFor}; use scale_info::TypeInfo; use sp_arithmetic::{ traits::{CheckedAdd, Zero}, @@ -279,6 +279,8 @@ use sp_runtime::TryRuntimeError; mod benchmarking; #[cfg(test)] mod mock; +#[cfg(all(test, feature = "remote-mining"))] +mod remote_mining; #[macro_use] pub mod helpers; @@ -489,9 +491,9 @@ where /// These are stored together because they are often accessed together. #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, Default, TypeInfo)] #[scale_info(skip_type_params(T))] -pub struct RoundSnapshot { +pub struct RoundSnapshot { /// All of the voters. - pub voters: Vec, + pub voters: Vec, /// All of the targets. pub targets: Vec, } @@ -613,7 +615,7 @@ pub mod pallet { use sp_runtime::traits::Convert; #[pallet::config] - pub trait Config: frame_system::Config + CreateInherent> { + pub trait Config: frame_system::Config + CreateBare> { #[allow(deprecated)] type RuntimeEvent: From> + IsType<::RuntimeEvent> diff --git a/substrate/frame/election-provider-multi-phase/src/mock.rs b/substrate/frame/election-provider-multi-phase/src/mock.rs index c23d226f84780..c3ae993a7c5b8 100644 --- a/substrate/frame/election-provider-multi-phase/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/src/mock.rs @@ -459,11 +459,11 @@ where type Extrinsic = Extrinsic; } -impl frame_system::offchain::CreateInherent for Runtime +impl frame_system::offchain::CreateBare for Runtime where RuntimeCall: From, { - fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + fn create_bare(call: Self::RuntimeCall) -> Self::Extrinsic { Extrinsic::new_bare(call) } } diff --git a/substrate/frame/election-provider-multi-phase/src/remote_mining.rs b/substrate/frame/election-provider-multi-phase/src/remote_mining.rs new file mode 100644 index 0000000000000..5b4580809c391 --- /dev/null +++ b/substrate/frame/election-provider-multi-phase/src/remote_mining.rs @@ -0,0 +1,265 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Remote mining tests for Kusama and Polkadot. +//! +//! Run like this: +//! +//! ```ignore +//! RUST_LOG=remote-ext=info,runtime::election-provider=debug cargo test --release --features remote-mining -p pallet-election-provider-multi-phase mine_for_ -- --test-threads 1 +//! ``` +//! +//! See the comments below on how to feed specific hash. + +use crate::{ElectionCompute, Miner, MinerConfig, RawSolution, RoundSnapshot}; +use codec::Decode; +use core::marker::PhantomData; +use frame_election_provider_support::generate_solution_type; +use frame_support::{ + traits::Get, + weights::constants::{WEIGHT_PROOF_SIZE_PER_MB, WEIGHT_REF_TIME_PER_SECOND}, +}; +use remote_externalities::{Builder, Mode, OnlineConfig, Transport}; +use sp_core::{ConstU32, H256}; +use sp_npos_elections::BalancingConfig; +use sp_runtime::{Perbill, Weight}; + +pub mod polkadot { + use super::*; + + pub struct MinerConfig; + + pub struct MaxWeight; + impl Get for MaxWeight { + fn get() -> Weight { + Weight::from_parts( + WEIGHT_REF_TIME_PER_SECOND.saturating_mul(2), + WEIGHT_PROOF_SIZE_PER_MB.saturating_mul(5), + ) + } + } + + generate_solution_type!( + #[compact] + pub struct PolkadotSolution::< + VoterIndex = u32, + TargetIndex = u16, + Accuracy = sp_runtime::PerU16, + MaxVoters = ConstU32<22_500>, + >(16) + ); + + /// Some configs are a bit inconsistent, but we don't care about them for now. + impl crate::MinerConfig for MinerConfig { + type AccountId = sp_runtime::AccountId32; + type MaxBackersPerWinner = ConstU32<1024>; + type MaxLength = ConstU32<{ 4 * 1024 * 1024 }>; + type MaxVotesPerVoter = ConstU32<16>; + type MaxWeight = MaxWeight; + type MaxWinners = ConstU32<1000>; + type Solution = PolkadotSolution; + + fn solution_weight( + _voters: u32, + _targets: u32, + _active_voters: u32, + _degree: u32, + ) -> Weight { + Default::default() + } + } +} + +pub mod kusama { + use super::*; + pub struct MinerConfig; + + pub struct MaxWeight; + impl Get for MaxWeight { + fn get() -> Weight { + Weight::from_parts( + WEIGHT_REF_TIME_PER_SECOND.saturating_mul(2), + WEIGHT_PROOF_SIZE_PER_MB.saturating_mul(5), + ) + } + } + + generate_solution_type!( + #[compact] + pub struct PolkadotSolution::< + VoterIndex = u32, + TargetIndex = u16, + Accuracy = sp_runtime::PerU16, + MaxVoters = ConstU32<12_500>, + >(24) + ); + + /// Some configs are a bit inconsistent, but we don't care about them for now. + impl crate::MinerConfig for MinerConfig { + type AccountId = sp_runtime::AccountId32; + type MaxBackersPerWinner = ConstU32<1024>; + type MaxLength = ConstU32<{ 4 * 1024 * 1024 }>; + type MaxVotesPerVoter = ConstU32<24>; + type MaxWeight = MaxWeight; + type MaxWinners = ConstU32<1000>; + type Solution = PolkadotSolution; + + fn solution_weight( + _voters: u32, + _targets: u32, + _active_voters: u32, + _degree: u32, + ) -> Weight { + Default::default() + } + } +} + +pub struct HackyGetSnapshot(PhantomData); + +type UntypedSnapshotOf = RoundSnapshot< + ::AccountId, + frame_election_provider_support::Voter< + ::AccountId, + ::MaxVotesPerVoter, + >, +>; + +impl HackyGetSnapshot { + fn snapshot() -> UntypedSnapshotOf + where + UntypedSnapshotOf: Decode, + { + let key = [ + sp_core::hashing::twox_128(b"ElectionProviderMultiPhase"), + sp_core::hashing::twox_128(b"Snapshot"), + ] + .concat(); + frame_support::storage::unhashed::get::>(&key).unwrap() + } + + fn desired_targets() -> u32 { + let key = [ + sp_core::hashing::twox_128(b"ElectionProviderMultiPhase"), + sp_core::hashing::twox_128(b"DesiredTargets"), + ] + .concat(); + frame_support::storage::unhashed::get::(&key).unwrap() + } +} + +pub type FakeBlock = sp_runtime::testing::Block>; + +pub struct Balancing; +impl Get> for Balancing { + fn get() -> Option { + Some(BalancingConfig { iterations: 10, tolerance: 0 }) + } +} +pub type SolverOf = frame_election_provider_support::SequentialPhragmen< + ::AccountId, + Perbill, + Balancing, +>; + +fn test_for_network() +where + UntypedSnapshotOf: Decode, +{ + let snapshot = HackyGetSnapshot::::snapshot(); + let desired_targets = HackyGetSnapshot::::desired_targets(); + + let (solution, score, _size, _trimming) = + Miner::::mine_solution_with_snapshot::>( + snapshot.voters.clone(), + snapshot.targets.clone(), + desired_targets, + ) + .unwrap(); + + let raw_solution = RawSolution { round: 0, solution, score }; + + let _ready_solution = Miner::::feasibility_check( + raw_solution, + ElectionCompute::Signed, + desired_targets, + snapshot, + 0, + Default::default(), + ) + .unwrap(); +} + +#[tokio::test] +async fn mine_for_polkadot() { + sp_tracing::try_init_simple(); + + // good way to find good block hashes: https://polkadot.subscan.io/event?page=1&time_dimension=date&module=electionprovidermultiphase&event_id=solutionstored + // we are just looking for blocks with snapshot present, that's all. + let block_hash_str = std::option_env!("BLOCK_HASH") + // known good polkadot hash + .unwrap_or("047f1f5b1081fdaa72c9224d0ea302553738556758dc53269b1bfe6a069986bb") + .to_string(); + let block_hash = H256::from_slice(hex::decode(block_hash_str).unwrap().as_ref()); + let online = OnlineConfig { + at: Some(block_hash), + pallets: vec!["ElectionProviderMultiPhase".to_string()], + transport: Transport::from( + std::option_env!("WS").unwrap_or("wss://rpc.ibp.network/polkadot").to_string(), + ), + ..Default::default() + }; + + let _ = Builder::::default() + .mode(Mode::Online(online)) + .build() + .await + .unwrap() + .execute_with(|| { + test_for_network::(); + }); +} + +#[tokio::test] +async fn mine_for_kusama() { + sp_tracing::try_init_simple(); + + // good way to find good block hashes: https://kusama.subscan.io/event?page=1&time_dimension=date&module=electionprovidermultiphase&event_id=solutionstored + // we are just looking for blocks with snapshot present, that's all. + let block_hash_str = std::option_env!("BLOCK_HASH") + // known good kusama hash + .unwrap_or("d5d9f5e098fcb41915c85e6695eddc18c0bc4aa4976ad0d9bf5f4713039bca26") + .to_string(); + let block_hash = H256::from_slice(hex::decode(block_hash_str).unwrap().as_ref()); + let online = OnlineConfig { + at: Some(block_hash), + pallets: vec!["ElectionProviderMultiPhase".to_string()], + transport: Transport::from( + std::option_env!("WS").unwrap_or("wss://rpc.ibp.network/kusama").to_string(), + ), + ..Default::default() + }; + + let _ = Builder::::default() + .mode(Mode::Online(online)) + .build() + .await + .unwrap() + .execute_with(|| { + test_for_network::(); + }); +} diff --git a/substrate/frame/election-provider-multi-phase/src/unsigned.rs b/substrate/frame/election-provider-multi-phase/src/unsigned.rs index c93e2be5a2fe2..f78589c4dd2ad 100644 --- a/substrate/frame/election-provider-multi-phase/src/unsigned.rs +++ b/substrate/frame/election-provider-multi-phase/src/unsigned.rs @@ -32,7 +32,7 @@ use frame_support::{ BoundedVec, }; use frame_system::{ - offchain::{CreateInherent, SubmitTransaction}, + offchain::{CreateBare, SubmitTransaction}, pallet_prelude::BlockNumberFor, }; use scale_info::TypeInfo; @@ -192,7 +192,7 @@ fn ocw_solution_exists() -> bool { matches!(StorageValueRef::persistent(OFFCHAIN_CACHED_CALL).get::>(), Ok(Some(_))) } -impl>> Pallet { +impl>> Pallet { /// Mine a new npos solution. /// /// The Npos Solver type, `S`, must have the same AccountId and Error type as the @@ -292,7 +292,7 @@ impl>> Pallet { fn submit_call(call: Call) -> Result<(), MinerError> { log!(debug, "miner submitting a solution as an unsigned transaction"); - let xt = T::create_inherent(call.into()); + let xt = T::create_bare(call.into()); SubmitTransaction::>::submit_transaction(xt) .map_err(|_| MinerError::PoolSubmissionFailed) } @@ -618,6 +618,13 @@ impl Miner { let is_trimmed = TrimmingStatus { weight: weight_trimmed, length: length_trimmed, edges: edges_trimmed }; + log_no_system!( + debug, + "feasible solution mined: trimmed? {:?}, score: {:?}, encoded size: {:?}", + is_trimmed, + score, + solution.encoded_size() + ); Ok((solution, score, size, is_trimmed)) } @@ -1975,7 +1982,7 @@ mod tests { vec![ ( 10, - BoundedSupport { total: 25, voters: bounded_vec![(1, 11), (5, 5), (4, 9)] } + BoundedSupport { total: 25, voters: bounded_vec![(1, 11), (4, 9), (5, 5)] } ), (20, BoundedSupport { total: 22, voters: bounded_vec![(2, 12), (5, 10)] }), (30, BoundedSupport { total: 18, voters: bounded_vec![(3, 13), (4, 5)] }) diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index 88224f68edd45..292ade8b3f8c3 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -350,11 +350,11 @@ where type Extrinsic = Extrinsic; } -impl frame_system::offchain::CreateInherent for Runtime +impl frame_system::offchain::CreateBare for Runtime where RuntimeCall: From, { - fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + fn create_bare(call: Self::RuntimeCall) -> Self::Extrinsic { Extrinsic::new_bare(call) } } diff --git a/substrate/frame/election-provider-support/solution-type/src/single_page.rs b/substrate/frame/election-provider-support/solution-type/src/single_page.rs index c921be34b3430..e64b99b073bae 100644 --- a/substrate/frame/election-provider-support/solution-type/src/single_page.rs +++ b/substrate/frame/election-provider-support/solution-type/src/single_page.rs @@ -153,9 +153,9 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { voter_at: impl Fn(Self::VoterIndex) -> Option, target_at: impl Fn(Self::TargetIndex) -> Option, ) -> Result<_fepsp::Vec<_feps::Assignment>, _feps::Error> { - let mut #assignment_name: _fepsp::Vec<_feps::Assignment> = Default::default(); + let mut #assignment_name: _fepsp::BTreeMap> = Default::default(); #into_impl - Ok(#assignment_name) + Ok(#assignment_name.into_values().collect()) } fn voter_count(&self) -> usize { @@ -426,13 +426,18 @@ pub(crate) fn into_impl( let into_impl_single = { let name = vote_field(1); quote!( - for (voter_index, target_index) in self.#name { - #assignments.push(_feps::Assignment { - who: voter_at(voter_index).or_invalid_index()?, - distribution: vec![ - (target_at(target_index).or_invalid_index()?, #per_thing::one()) - ], - }) + for (voter_index, target_index) in self.#name {; + if #assignments.contains_key(&voter_index) { + return Err(_feps::Error::DuplicateVoter); + } else { + #assignments.insert( + voter_index, + _feps::Assignment { + who: voter_at(voter_index).or_invalid_index()?, + distribution: vec![(target_at(target_index).or_invalid_index()?, #per_thing::one())], + } + ); + } } ) }; @@ -442,10 +447,21 @@ pub(crate) fn into_impl( let name = vote_field(c); quote!( for (voter_index, inners, t_last_idx) in self.#name { + if #assignments.contains_key(&voter_index) { + return Err(_feps::Error::DuplicateVoter); + } + + let mut targets_seen = _fepsp::BTreeSet::new(); + let mut sum = #per_thing::zero(); let mut inners_parsed = inners .iter() .map(|(ref t_idx, p)| { + if targets_seen.contains(t_idx) { + return Err(_feps::Error::DuplicateTarget); + } else { + targets_seen.insert(t_idx); + } sum = _fepsp::sp_arithmetic::traits::Saturating::saturating_add(sum, *p); let target = target_at(*t_idx).or_invalid_index()?; Ok((target, *p)) @@ -456,6 +472,13 @@ pub(crate) fn into_impl( return Err(_feps::Error::SolutionWeightOverflow); } + // check that the last target index is also unique. + if targets_seen.contains(&t_last_idx) { + return Err(_feps::Error::DuplicateTarget); + } else { + // no need to insert, we are done. + } + // defensive only. Since Percent doesn't have `Sub`. let p_last = _fepsp::sp_arithmetic::traits::Saturating::saturating_sub( #per_thing::one(), @@ -464,10 +487,13 @@ pub(crate) fn into_impl( inners_parsed.push((target_at(t_last_idx).or_invalid_index()?, p_last)); - #assignments.push(_feps::Assignment { - who: voter_at(voter_index).or_invalid_index()?, - distribution: inners_parsed, - }); + #assignments.insert( + voter_index, + _feps::Assignment { + who: voter_at(voter_index).or_invalid_index()?, + distribution: inners_parsed, + } + ); } ) }) diff --git a/substrate/frame/election-provider-support/src/lib.rs b/substrate/frame/election-provider-support/src/lib.rs index eb87fe8d3c129..7c50b001b85ce 100644 --- a/substrate/frame/election-provider-support/src/lib.rs +++ b/substrate/frame/election-provider-support/src/lib.rs @@ -232,7 +232,10 @@ use sp_runtime::TryRuntimeError; // re-export for the solution macro, with the dependencies of the macro. #[doc(hidden)] pub mod private { - pub use alloc::{collections::btree_set::BTreeSet, vec::Vec}; + pub use alloc::{ + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + vec::Vec, + }; pub use codec; pub use scale_info; pub use sp_arithmetic; diff --git a/substrate/frame/election-provider-support/src/tests.rs b/substrate/frame/election-provider-support/src/tests.rs index af6206693d5ad..cfac56b340ea3 100644 --- a/substrate/frame/election-provider-support/src/tests.rs +++ b/substrate/frame/election-provider-support/src/tests.rs @@ -246,6 +246,65 @@ mod solution_type { ); } + #[test] + fn prevents_target_duplicate_into_assignment() { + let voter_at = |a: u32| -> Option { Some(a as AccountId) }; + let target_at = |a: u16| -> Option { Some(a as AccountId) }; + + // case 1: duplicate target in votes2. + let solution = TestSolution { votes2: vec![(0, [(1, p(50))], 1)], ..Default::default() }; + assert_eq!( + solution.into_assignment(&voter_at, &target_at).unwrap_err(), + NposError::DuplicateTarget, + ); + + // case 2: duplicate target in votes3. + let solution = + TestSolution { votes3: vec![(0, [(1, p(25)), (2, p(50))], 1)], ..Default::default() }; + assert_eq!( + solution.into_assignment(&voter_at, &target_at).unwrap_err(), + NposError::DuplicateTarget, + ); + } + + #[test] + fn prevents_voter_duplicate_into_assignment() { + let voter_at = |a: u32| -> Option { Some(a as AccountId) }; + let target_at = |a: u16| -> Option { Some(a as AccountId) }; + + // case 1: there is a duplicate among two different fields + let solution = TestSolution { + // voter index 0 is present here + votes1: vec![(0, 0), (1, 0)], + // voter index 0 is also present here + votes2: vec![(0, [(1, p(50))], 2)], + ..Default::default() + }; + + assert_eq!( + solution.into_assignment(&voter_at, &target_at).unwrap_err(), + NposError::DuplicateVoter, + ); + + // case 2: there is a duplicate in the same field + let solution = TestSolution { votes1: vec![(0, 0), (0, 1)], ..Default::default() }; + assert_eq!( + solution.into_assignment(&voter_at, &target_at).unwrap_err(), + NposError::DuplicateVoter, + ); + + // case 2.1: there is a duplicate in the same fieild, a bit more complex + let solution = TestSolution { + votes1: vec![(0, 0)], + votes2: vec![(1, [(1, p(50))], 2), (1, [(3, p(50))], 4)], + ..Default::default() + }; + assert_eq!( + solution.into_assignment(&voter_at, &target_at).unwrap_err(), + NposError::DuplicateVoter, + ); + } + #[test] fn from_and_into_assignment_works() { let voters = vec![2 as AccountId, 4, 1, 5, 3]; diff --git a/substrate/frame/examples/offchain-worker/src/lib.rs b/substrate/frame/examples/offchain-worker/src/lib.rs index 92215f49bd8bc..ee0a52eea4451 100644 --- a/substrate/frame/examples/offchain-worker/src/lib.rs +++ b/substrate/frame/examples/offchain-worker/src/lib.rs @@ -59,7 +59,7 @@ use frame_support::traits::Get; use frame_system::{ self as system, offchain::{ - AppCrypto, CreateInherent, CreateSignedTransaction, SendSignedTransaction, + AppCrypto, CreateBare, CreateSignedTransaction, SendSignedTransaction, SendUnsignedTransaction, SignedPayload, Signer, SigningTypes, SubmitTransaction, }, pallet_prelude::BlockNumberFor, @@ -131,7 +131,7 @@ pub mod pallet { /// This pallet's configuration trait #[pallet::config] pub trait Config: - CreateSignedTransaction> + CreateInherent> + frame_system::Config + CreateSignedTransaction> + CreateBare> + frame_system::Config { /// The identifier type for an offchain worker. type AuthorityId: AppCrypto; @@ -508,7 +508,7 @@ impl Pallet { // implement unsigned validation logic, as any mistakes can lead to opening DoS or spam // attack vectors. See validation logic docs for more details. // - let xt = T::create_inherent(call.into()); + let xt = T::create_bare(call.into()); SubmitTransaction::>::submit_transaction(xt) .map_err(|()| "Unable to submit unsigned transaction.")?; diff --git a/substrate/frame/examples/offchain-worker/src/tests.rs b/substrate/frame/examples/offchain-worker/src/tests.rs index a8af7d2e31147..230873a2dca57 100644 --- a/substrate/frame/examples/offchain-worker/src/tests.rs +++ b/substrate/frame/examples/offchain-worker/src/tests.rs @@ -121,11 +121,11 @@ where } } -impl frame_system::offchain::CreateInherent for Test +impl frame_system::offchain::CreateBare for Test where RuntimeCall: From, { - fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + fn create_bare(call: Self::RuntimeCall) -> Self::Extrinsic { Extrinsic::new_bare(call) } } diff --git a/substrate/frame/examples/tasks/src/lib.rs b/substrate/frame/examples/tasks/src/lib.rs index 7d51617497d65..6b3b4adbacfaa 100644 --- a/substrate/frame/examples/tasks/src/lib.rs +++ b/substrate/frame/examples/tasks/src/lib.rs @@ -19,7 +19,7 @@ #![cfg_attr(not(feature = "std"), no_std)] use frame_support::dispatch::DispatchResult; -use frame_system::offchain::CreateInherent; +use frame_system::offchain::CreateBare; #[cfg(feature = "experimental")] use frame_system::offchain::SubmitTransaction; // Re-export pallet items so that they can be accessed from the crate namespace. @@ -77,7 +77,7 @@ pub mod pallet { let call = frame_system::Call::::do_task { task: runtime_task.into() }; // Submit the task as an unsigned transaction - let xt = >>::create_inherent(call.into()); + let xt = >>::create_bare(call.into()); let res = SubmitTransaction::>::submit_transaction(xt); match res { Ok(_) => log::info!(target: LOG_TARGET, "Submitted the task."), @@ -91,7 +91,7 @@ pub mod pallet { } #[pallet::config] - pub trait Config: CreateInherent> + frame_system::Config { + pub trait Config: CreateBare> + frame_system::Config { type RuntimeTask: frame_support::traits::Task + IsType<::RuntimeTask> + From>; diff --git a/substrate/frame/examples/tasks/src/mock.rs b/substrate/frame/examples/tasks/src/mock.rs index 3dc9153c94a06..e6053dfe56dff 100644 --- a/substrate/frame/examples/tasks/src/mock.rs +++ b/substrate/frame/examples/tasks/src/mock.rs @@ -48,11 +48,11 @@ where type Extrinsic = Extrinsic; } -impl frame_system::offchain::CreateInherent for Runtime +impl frame_system::offchain::CreateBare for Runtime where RuntimeCall: From, { - fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + fn create_bare(call: Self::RuntimeCall) -> Self::Extrinsic { Extrinsic::new_bare(call) } } diff --git a/substrate/frame/executive/src/lib.rs b/substrate/frame/executive/src/lib.rs index fe702e1fc3951..5f9142a1dbb82 100644 --- a/substrate/frame/executive/src/lib.rs +++ b/substrate/frame/executive/src/lib.rs @@ -161,9 +161,8 @@ use frame_support::{ migrations::MultiStepMigrator, pallet_prelude::InvalidTransaction, traits::{ - BeforeAllRuntimeMigrations, EnsureInherentsAreFirst, ExecuteBlock, OffchainWorker, - OnFinalize, OnIdle, OnInitialize, OnPoll, OnRuntimeUpgrade, PostInherents, - PostTransactions, PreInherents, + BeforeAllRuntimeMigrations, ExecuteBlock, IsInherent, OffchainWorker, OnFinalize, OnIdle, + OnInitialize, OnPoll, OnRuntimeUpgrade, PostInherents, PostTransactions, PreInherents, }, weights::{Weight, WeightMeter}, }; @@ -174,7 +173,7 @@ use sp_runtime::{ self, Applyable, CheckEqual, Checkable, Dispatchable, Header, NumberFor, One, ValidateUnsigned, Zero, }, - transaction_validity::{TransactionSource, TransactionValidity}, + transaction_validity::{TransactionSource, TransactionValidity, TransactionValidityError}, ApplyExtrinsicResult, ExtrinsicInclusionMode, }; @@ -196,6 +195,31 @@ pub type CheckedOf = >::Checked; pub type CallOf = as Applyable>::Call; pub type OriginOf = as Dispatchable>::RuntimeOrigin; +#[derive(PartialEq)] +pub enum ExecutiveError { + InvalidInherentPosition(usize), + OnlyInherentsAllowed, + ApplyExtrinsic(TransactionValidityError), + Custom(&'static str), +} + +impl core::fmt::Debug for ExecutiveError { + fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result { + match self { + ExecutiveError::InvalidInherentPosition(i) => + write!(fmt, "Invalid inherent position for extrinsic at index {}", i), + ExecutiveError::OnlyInherentsAllowed => + write!(fmt, "Only inherents are allowed in this block"), + ExecutiveError::ApplyExtrinsic(e) => write!( + fmt, + "ExecuteBlockError applying extrinsic: {}", + Into::<&'static str>::into(*e) + ), + ExecutiveError::Custom(err) => write!(fmt, "{err}"), + } + } +} + /// Main entry point for certain runtime actions as e.g. `execute_block`. /// /// Generic parameters: @@ -226,7 +250,7 @@ pub struct Executive< ); impl< - System: frame_system::Config + EnsureInherentsAreFirst, + System: frame_system::Config + IsInherent, Block: traits::Block< Header = frame_system::pallet_prelude::HeaderFor, Hash = System::Hash, @@ -265,7 +289,7 @@ where #[cfg(feature = "try-runtime")] impl< - System: frame_system::Config + EnsureInherentsAreFirst, + System: frame_system::Config + IsInherent, Block: traits::Block< Header = frame_system::pallet_prelude::HeaderFor, Hash = System::Hash, @@ -304,7 +328,7 @@ where state_root_check: bool, signature_check: bool, select: frame_try_runtime::TryStateSelect, - ) -> Result { + ) -> Result { log::info!( target: LOG_TARGET, "try-runtime: executing block #{:?} / state root check: {:?} / signature check: {:?} / try-state-select: {:?}", @@ -315,55 +339,18 @@ where ); let mode = Self::initialize_block(block.header()); - let num_inherents = Self::initial_checks(&block) as usize; + Self::initial_checks(&block); let (header, extrinsics) = block.deconstruct(); - // Check if there are any forbidden non-inherents in the block. - if mode == ExtrinsicInclusionMode::OnlyInherents && extrinsics.len() > num_inherents { - return Err("Only inherents allowed".into()) - } - - let try_apply_extrinsic = |uxt: Block::Extrinsic| -> ApplyExtrinsicResult { - sp_io::init_tracing(); - let encoded = uxt.encode(); - let encoded_len = encoded.len(); - - let is_inherent = System::is_inherent(&uxt); - // skip signature verification. - let xt = if signature_check { - uxt.check(&Default::default()) - } else { - uxt.unchecked_into_checked_i_know_what_i_am_doing(&Default::default()) - }?; - - let dispatch_info = xt.get_dispatch_info(); - if !is_inherent && !>::inherents_applied() { - Self::inherents_applied(); - } - - >::note_extrinsic(encoded); - let r = Applyable::apply::(xt, &dispatch_info, encoded_len)?; - - if r.is_err() && dispatch_info.class == DispatchClass::Mandatory { - return Err(InvalidTransaction::BadMandatory.into()) - } - - >::note_applied_extrinsic(&r, dispatch_info); - - Ok(r.map(|_| ()).map_err(|e| e.error)) - }; - // Apply extrinsics: - for e in extrinsics.iter() { - if let Err(err) = try_apply_extrinsic(e.clone()) { - log::error!( - target: LOG_TARGET, "transaction {:?} failed due to {:?}. Aborting the rest of the block execution.", - e, - err, - ); - break - } - } + let signature_check = if signature_check { + Block::Extrinsic::check + } else { + Block::Extrinsic::unchecked_into_checked_i_know_what_i_am_doing + }; + Self::apply_extrinsics(mode, extrinsics.into_iter(), |uxt, is_inherent| { + Self::do_apply_extrinsic(uxt, is_inherent, signature_check) + })?; // In this case there were no transactions to trigger this state transition: if !>::inherents_applied() { @@ -382,12 +369,13 @@ where , >>::try_state(*header.number(), select.clone()) - .inspect_err(|e| { + .map_err(|e| { log::error!(target: LOG_TARGET, "failure: {:?}", e); + ExecutiveError::Custom(e.into()) })?; if select.any() { let res = AllPalletsWithSystem::try_decode_entire_state(); - Self::log_decode_result(res)?; + Self::log_decode_result(res).map_err(|e| ExecutiveError::Custom(e.into()))?; } drop(_guard); @@ -500,7 +488,7 @@ where } impl< - System: frame_system::Config + EnsureInherentsAreFirst, + System: frame_system::Config + IsInherent, Block: traits::Block< Header = frame_system::pallet_prelude::HeaderFor, Hash = System::Hash, @@ -614,8 +602,7 @@ where last.map(|v| v.was_upgraded(¤t)).unwrap_or(true) } - /// Returns the number of inherents in the block. - fn initial_checks(block: &Block) -> u32 { + fn initial_checks(block: &Block) { sp_tracing::enter_span!(sp_tracing::Level::TRACE, "initial_checks"); let header = block.header(); @@ -627,11 +614,6 @@ where *header.parent_hash(), "Parent hash should be valid.", ); - - match System::ensure_inherents_are_first(block) { - Ok(num) => num, - Err(i) => panic!("Invalid inherent position for extrinsic at index {}", i), - } } /// Actually execute all transitions for `block`. @@ -641,20 +623,21 @@ where sp_tracing::info_span!("execute_block", ?block); // Execute `on_runtime_upgrade` and `on_initialize`. let mode = Self::initialize_block(block.header()); - let num_inherents = Self::initial_checks(&block) as usize; - let (header, extrinsics) = block.deconstruct(); - let num_extrinsics = extrinsics.len(); + Self::initial_checks(&block); - if mode == ExtrinsicInclusionMode::OnlyInherents && num_extrinsics > num_inherents { - // Invalid block - panic!("Only inherents are allowed in this block") + let (header, extrinsics) = block.deconstruct(); + if let Err(e) = Self::apply_extrinsics( + mode, + extrinsics.into_iter(), + |uxt, is_inherent| { + Self::do_apply_extrinsic(uxt, is_inherent, Block::Extrinsic::check) + } + ) { + panic!("{:?}", e) } - Self::apply_extrinsics(extrinsics.into_iter()); - // In this case there were no transactions to trigger this state transition: if !>::inherents_applied() { - defensive_assert!(num_inherents == num_extrinsics); Self::inherents_applied(); } @@ -687,13 +670,39 @@ where } /// Execute given extrinsics. - fn apply_extrinsics(extrinsics: impl Iterator) { - extrinsics.into_iter().for_each(|e| { - if let Err(e) = Self::apply_extrinsic(e) { - let err: &'static str = e.into(); - panic!("{}", err) + fn apply_extrinsics( + mode: ExtrinsicInclusionMode, + extrinsics: impl Iterator, + mut apply_extrinsic: impl FnMut(Block::Extrinsic, bool) -> ApplyExtrinsicResult, + ) -> Result<(), ExecutiveError> { + let mut first_non_inherent_idx = 0; + for (idx, uxt) in extrinsics.into_iter().enumerate() { + let is_inherent = System::is_inherent(&uxt); + if is_inherent { + // Check if inherents are first + if first_non_inherent_idx != idx { + return Err(ExecutiveError::InvalidInherentPosition(idx)); + } + first_non_inherent_idx += 1; + } else { + // Check if there are any forbidden non-inherents in the block. + if mode == ExtrinsicInclusionMode::OnlyInherents { + return Err(ExecutiveError::OnlyInherentsAllowed) + } } - }); + + log::debug!(target: LOG_TARGET, "Executing transaction: {:?}", uxt); + if let Err(e) = apply_extrinsic(uxt, is_inherent) { + log::error!( + target: LOG_TARGET, + "Transaction({idx}) failed due to {e:?}. \ + Aborting the rest of the block execution.", + ); + return Err(ExecutiveError::ApplyExtrinsic(e.into())); + } + } + + Ok(()) } /// Finalize the block - it is up the caller to ensure that all header fields are valid @@ -771,19 +780,23 @@ where /// /// This doesn't attempt to validate anything regarding the block, but it builds a list of uxt /// hashes. - pub fn apply_extrinsic(uxt: Block::Extrinsic) -> ApplyExtrinsicResult { + fn do_apply_extrinsic( + uxt: Block::Extrinsic, + is_inherent: bool, + check: impl FnOnce( + Block::Extrinsic, + &Context, + ) -> Result, TransactionValidityError>, + ) -> ApplyExtrinsicResult { sp_io::init_tracing(); let encoded = uxt.encode(); let encoded_len = encoded.len(); sp_tracing::enter_span!(sp_tracing::info_span!("apply_extrinsic", ext=?sp_core::hexdisplay::HexDisplay::from(&encoded))); - // We use the dedicated `is_inherent` check here, since just relying on `Mandatory` dispatch - // class does not capture optional inherents. - let is_inherent = System::is_inherent(&uxt); - // Verify that the signature is good. - let xt = uxt.check(&Default::default())?; + let xt = check(uxt, &Context::default())?; + let dispatch_info = xt.get_dispatch_info(); if !is_inherent && !>::inherents_applied() { @@ -812,6 +825,15 @@ where Ok(r.map(|_| ()).map_err(|e| e.error)) } + /// Apply extrinsic outside of the block execution function. + /// + /// This doesn't attempt to validate anything regarding the block, but it builds a list of uxt + /// hashes. + pub fn apply_extrinsic(uxt: Block::Extrinsic) -> ApplyExtrinsicResult { + let is_inherent = System::is_inherent(&uxt); + Self::do_apply_extrinsic(uxt, is_inherent, Block::Extrinsic::check) + } + fn final_checks(header: &frame_system::pallet_prelude::HeaderFor) { sp_tracing::enter_span!(sp_tracing::Level::TRACE, "final_checks"); // remove temporaries diff --git a/substrate/frame/executive/src/tests.rs b/substrate/frame/executive/src/tests.rs index 313d9bb9634a1..325451099807c 100644 --- a/substrate/frame/executive/src/tests.rs +++ b/substrate/frame/executive/src/tests.rs @@ -1298,7 +1298,7 @@ fn try_execute_block_works() { /// Same as `extrinsic_while_exts_forbidden_errors` but using the try-runtime function. #[test] #[cfg(feature = "try-runtime")] -#[should_panic = "Only inherents allowed"] +#[should_panic = "Only inherents are allowed in this block"] fn try_execute_tx_forbidden_errors() { let xt1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); @@ -1325,64 +1325,78 @@ fn try_execute_tx_forbidden_errors() { }); } -/// Check that `ensure_inherents_are_first` reports the correct indices. +/// Test if `apply_extrinsics` validates if the inherents are first. #[test] -fn ensure_inherents_are_first_works() { +fn apply_extrinsics_checks_inherents_are_first() { let in1 = UncheckedXt::new_bare(RuntimeCall::Custom(custom::Call::inherent {})); let in2 = UncheckedXt::new_bare(RuntimeCall::Custom2(custom2::Call::inherent {})); let xt2 = UncheckedXt::new_signed(call_transfer(33, 0), 1, 1.into(), tx_ext(0, 0)); - // Mocked empty header: - let header = new_test_ext(1).execute_with(|| { - Executive::initialize_block(&Header::new_from_number(1)); - Executive::finalize_block() - }); - new_test_ext(1).execute_with(|| { - assert_ok!(Runtime::ensure_inherents_are_first(&Block::new(header.clone(), vec![]),), 0); assert_ok!( - Runtime::ensure_inherents_are_first(&Block::new(header.clone(), vec![xt2.clone()]),), - 0 + Executive::apply_extrinsics( + ExtrinsicInclusionMode::AllExtrinsics, + [].into_iter(), + |_, _| Ok(Ok(())) + ), + () ); assert_ok!( - Runtime::ensure_inherents_are_first(&Block::new(header.clone(), vec![in1.clone()])), - 1 + Executive::apply_extrinsics( + ExtrinsicInclusionMode::AllExtrinsics, + [xt2.clone()].into_iter(), + |_, _| Ok(Ok(())) + ), + () + ); + assert_ok!( + Executive::apply_extrinsics( + ExtrinsicInclusionMode::AllExtrinsics, + [in1.clone()].into_iter(), + |_, _| Ok(Ok(())) + ), + () ); assert_ok!( - Runtime::ensure_inherents_are_first(&Block::new( - header.clone(), - vec![in1.clone(), xt2.clone()] - ),), - 1 + Executive::apply_extrinsics( + ExtrinsicInclusionMode::AllExtrinsics, + [in1.clone(), xt2.clone()].into_iter(), + |_, _| Ok(Ok(())) + ), + () ); assert_ok!( - Runtime::ensure_inherents_are_first(&Block::new( - header.clone(), - vec![in2.clone(), in1.clone(), xt2.clone()] - ),), - 2 + Executive::apply_extrinsics( + ExtrinsicInclusionMode::AllExtrinsics, + [in2.clone(), in1.clone(), xt2.clone()].into_iter(), + |_, _| Ok(Ok(())) + ), + () ); - assert_eq!( - Runtime::ensure_inherents_are_first(&Block::new( - header.clone(), - vec![xt2.clone(), in1.clone()] - ),), - Err(1) + assert_err!( + Executive::apply_extrinsics( + ExtrinsicInclusionMode::AllExtrinsics, + [xt2.clone(), in1.clone()].into_iter(), + |_, _| Ok(Ok(())) + ), + ExecutiveError::InvalidInherentPosition(1) ); - assert_eq!( - Runtime::ensure_inherents_are_first(&Block::new( - header.clone(), - vec![xt2.clone(), xt2.clone(), in1.clone()] - ),), - Err(2) + assert_err!( + Executive::apply_extrinsics( + ExtrinsicInclusionMode::AllExtrinsics, + [xt2.clone(), xt2.clone(), in1.clone()].into_iter(), + |_, _| Ok(Ok(())) + ), + ExecutiveError::InvalidInherentPosition(2) ); - assert_eq!( - Runtime::ensure_inherents_are_first(&Block::new( - header.clone(), - vec![xt2.clone(), xt2.clone(), xt2.clone(), in2.clone()] - ),), - Err(3) + assert_err!( + Executive::apply_extrinsics( + ExtrinsicInclusionMode::AllExtrinsics, + [xt2.clone(), xt2.clone(), xt2.clone(), in2.clone()].into_iter(), + |_, _| Ok(Ok(())) + ), + ExecutiveError::InvalidInherentPosition(3) ); }); } @@ -1440,7 +1454,7 @@ fn callbacks_in_block_execution_works_inner(mbms_active: bool) { match header { Err(e) => { - let err = e.downcast::<&str>().unwrap(); + let err = e.downcast::().unwrap(); assert_eq!(*err, "Only inherents are allowed in this block"); assert!( MbmActive::get() && n_tx > 0, diff --git a/substrate/frame/grandpa/src/equivocation.rs b/substrate/frame/grandpa/src/equivocation.rs index 4ebdbc1eecd30..029540d2aeacf 100644 --- a/substrate/frame/grandpa/src/equivocation.rs +++ b/substrate/frame/grandpa/src/equivocation.rs @@ -122,7 +122,7 @@ impl (EquivocationProof>, T::KeyOwnerProof), > for EquivocationReportSystem where - T: Config + pallet_authorship::Config + frame_system::offchain::CreateInherent>, + T: Config + pallet_authorship::Config + frame_system::offchain::CreateBare>, R: ReportOffence< T::AccountId, P::IdentificationTuple, @@ -144,7 +144,7 @@ where equivocation_proof: Box::new(equivocation_proof), key_owner_proof, }; - let xt = T::create_inherent(call.into()); + let xt = T::create_bare(call.into()); let res = SubmitTransaction::>::submit_transaction(xt); match res { Ok(_) => info!(target: LOG_TARGET, "Submitted equivocation report"), diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index 034858c70e167..bf1b05ec11c36 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -80,11 +80,11 @@ where type Extrinsic = TestXt; } -impl frame_system::offchain::CreateInherent for Test +impl frame_system::offchain::CreateBare for Test where RuntimeCall: From, { - fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + fn create_bare(call: Self::RuntimeCall) -> Self::Extrinsic { TestXt::new_bare(call) } } diff --git a/substrate/frame/im-online/src/lib.rs b/substrate/frame/im-online/src/lib.rs index 3eb9d5cc57b0a..6358fc600a36b 100644 --- a/substrate/frame/im-online/src/lib.rs +++ b/substrate/frame/im-online/src/lib.rs @@ -95,7 +95,7 @@ use frame_support::{ BoundedSlice, WeakBoundedVec, }; use frame_system::{ - offchain::{CreateInherent, SubmitTransaction}, + offchain::{CreateBare, SubmitTransaction}, pallet_prelude::*, }; pub use pallet::*; @@ -261,7 +261,7 @@ pub mod pallet { pub struct Pallet(_); #[pallet::config] - pub trait Config: CreateInherent> + frame_system::Config { + pub trait Config: CreateBare> + frame_system::Config { /// The identifier type for an authority. type AuthorityId: Member + Parameter @@ -643,7 +643,7 @@ impl Pallet { call, ); - let xt = T::create_inherent(call.into()); + let xt = T::create_bare(call.into()); SubmitTransaction::>::submit_transaction(xt) .map_err(|_| OffchainErr::SubmitTransaction)?; diff --git a/substrate/frame/im-online/src/mock.rs b/substrate/frame/im-online/src/mock.rs index 11f5e1548101f..80834c68a216b 100644 --- a/substrate/frame/im-online/src/mock.rs +++ b/substrate/frame/im-online/src/mock.rs @@ -197,11 +197,11 @@ where type Extrinsic = Extrinsic; } -impl frame_system::offchain::CreateInherent for Runtime +impl frame_system::offchain::CreateBare for Runtime where RuntimeCall: From, { - fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + fn create_bare(call: Self::RuntimeCall) -> Self::Extrinsic { Extrinsic::new_bare(call) } } diff --git a/substrate/frame/message-queue/src/lib.rs b/substrate/frame/message-queue/src/lib.rs index d8e6f673a4afb..ede4d825e937c 100644 --- a/substrate/frame/message-queue/src/lib.rs +++ b/substrate/frame/message-queue/src/lib.rs @@ -212,7 +212,7 @@ use frame_support::{ defensive, pallet_prelude::*, traits::{ - BatchFootprint, Defensive, DefensiveSaturating, DefensiveTruncateFrom, EnqueueMessage, + BatchesFootprints, Defensive, DefensiveSaturating, DefensiveTruncateFrom, EnqueueMessage, ExecuteOverweightError, Footprint, ProcessMessage, ProcessMessageError, QueueFootprint, QueueFootprintQuery, QueuePausedQuery, ServiceQueues, }, @@ -1830,10 +1830,10 @@ impl QueueFootprintQuery> for Pallet { origin: MessageOriginOf, msgs: impl Iterator>, total_pages_limit: u32, - ) -> Vec { - let mut batches_footprints = vec![]; + ) -> BatchesFootprints { + let mut batches_footprints = BatchesFootprints::default(); - let mut new_pages_count = 0; + let mut new_page = false; let mut total_pages_count = 0; let mut current_page_pos: usize = T::HeapSize::get().into() as usize; @@ -1842,31 +1842,27 @@ impl QueueFootprintQuery> for Pallet { total_pages_count = book.end - book.begin; if let Some(page) = Pages::::get(origin, book.end - 1) { current_page_pos = page.heap_pos(); + batches_footprints.first_page_pos = current_page_pos; } } - let mut msgs = msgs.enumerate().peekable(); - let mut total_msgs_size = 0; - while let Some((idx, msg)) = msgs.peek() { + let mut msgs = msgs.peekable(); + while let Some(msg) = msgs.peek() { if total_pages_count > total_pages_limit { return batches_footprints; } match Page::::can_append_message_at(current_page_pos, msg.len()) { Ok(new_pos) => { - total_msgs_size += msg.len(); current_page_pos = new_pos; - batches_footprints.push(BatchFootprint { - msgs_count: idx + 1, - size_in_bytes: total_msgs_size, - new_pages_count, - }); + batches_footprints.push(msg, new_page); + new_page = false; msgs.next(); }, Err(_) => { // Would not fit into the current page. // We start a new one and try again in the next iteration. - new_pages_count += 1; + new_page = true; total_pages_count += 1; current_page_pos = 0; }, diff --git a/substrate/frame/message-queue/src/tests.rs b/substrate/frame/message-queue/src/tests.rs index f2247187e232b..4f7d9997f7ae4 100644 --- a/substrate/frame/message-queue/src/tests.rs +++ b/substrate/frame/message-queue/src/tests.rs @@ -21,7 +21,9 @@ use crate::{mock::*, *}; -use frame_support::{assert_noop, assert_ok, assert_storage_noop, StorageNoopGuard}; +use frame_support::{ + assert_noop, assert_ok, assert_storage_noop, traits::BatchFootprint, StorageNoopGuard, +}; use rand::{rngs::StdRng, Rng, SeedableRng}; use sp_crypto_hashing::blake2_256; @@ -2144,6 +2146,7 @@ fn check_get_batches_footprints( origin: MessageOrigin, sizes: &[u32], total_pages_limit: u32, + expected_first_page_pos: usize, expected_new_pages_counts: Vec, ) { let mut msgs = vec![]; @@ -2158,7 +2161,8 @@ fn check_get_batches_footprints( msgs.iter().map(|msg| msg.as_bounded_slice()), total_pages_limit, ); - assert_eq!(batches_footprints.len(), expected_new_pages_counts.len()); + assert_eq!(batches_footprints.first_page_pos, expected_first_page_pos); + assert_eq!(batches_footprints.footprints.len(), expected_new_pages_counts.len()); let mut total_size = 0; let mut expected_batches_footprint = vec![]; @@ -2170,7 +2174,7 @@ fn check_get_batches_footprints( new_pages_count: *expected_new_pages_count, }); } - assert_eq!(batches_footprints, expected_batches_footprint); + assert_eq!(batches_footprints.footprints, expected_batches_footprint); } #[test] @@ -2182,15 +2186,16 @@ fn get_batches_footprints_works() { build_and_execute::(|| { // Perform some checks with an empty queue - check_get_batches_footprints(Here, &[max_message_len], 0, vec![]); - check_get_batches_footprints(Here, &[max_message_len], 1, vec![1]); + check_get_batches_footprints(Here, &[max_message_len], 0, 0, vec![]); + check_get_batches_footprints(Here, &[max_message_len], 1, 0, vec![1]); - check_get_batches_footprints(Here, &[max_message_len, 1], 1, vec![1]); - check_get_batches_footprints(Here, &[max_message_len, 1], 2, vec![1, 2]); + check_get_batches_footprints(Here, &[max_message_len, 1], 1, 0, vec![1]); + check_get_batches_footprints(Here, &[max_message_len, 1], 2, 0, vec![1, 2]); check_get_batches_footprints( Here, &[max_message_len - 2 * header_size, 1, 1], 1, + 0, vec![1, 1], ); @@ -2198,19 +2203,20 @@ fn get_batches_footprints_works() { MessageQueue::enqueue_message(msg("A".repeat(max_message_len as usize).as_str()), Here); MessageQueue::enqueue_message(msg(""), Here); // Now, let's perform some more checks - check_get_batches_footprints(Here, &[max_message_len - header_size], 1, vec![]); - check_get_batches_footprints(Here, &[max_message_len - header_size], 2, vec![0]); + check_get_batches_footprints(Here, &[max_message_len - header_size], 1, 5, vec![]); + check_get_batches_footprints(Here, &[max_message_len - header_size], 2, 5, vec![0]); - check_get_batches_footprints(Here, &[max_message_len - header_size, 1], 2, vec![0]); - check_get_batches_footprints(Here, &[max_message_len - header_size, 1], 3, vec![0, 1]); + check_get_batches_footprints(Here, &[max_message_len - header_size, 1], 2, 5, vec![0]); + check_get_batches_footprints(Here, &[max_message_len - header_size, 1], 3, 5, vec![0, 1]); check_get_batches_footprints( Here, &[max_message_len - header_size, max_message_len - 2 * header_size, 1, 1], 3, + 5, vec![0, 1, 1], ); // Check that we can append messages to a different origin - check_get_batches_footprints(There, &[max_message_len], 1, vec![1]); + check_get_batches_footprints(There, &[max_message_len], 1, 0, vec![1]); }); } diff --git a/substrate/frame/mixnet/src/lib.rs b/substrate/frame/mixnet/src/lib.rs index 7e728b8fba3fd..401f42fc8b08f 100644 --- a/substrate/frame/mixnet/src/lib.rs +++ b/substrate/frame/mixnet/src/lib.rs @@ -179,7 +179,7 @@ pub mod pallet { pub struct Pallet(_); #[pallet::config] - pub trait Config: frame_system::Config + CreateInherent> { + pub trait Config: frame_system::Config + CreateBare> { /// The maximum number of authorities per session. #[pallet::constant] type MaxAuthorities: Get; @@ -532,7 +532,7 @@ impl Pallet { return false; }; let call = Call::register { registration, signature }; - let xt = T::create_inherent(call.into()); + let xt = T::create_bare(call.into()); match SubmitTransaction::>::submit_transaction(xt) { Ok(()) => true, Err(()) => { diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index 031cf81cef987..61a8cf53533a9 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -169,11 +169,11 @@ where type RuntimeCall = RuntimeCall; } -impl frame_system::offchain::CreateInherent for Test +impl frame_system::offchain::CreateBare for Test where RuntimeCall: From, { - fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + fn create_bare(call: Self::RuntimeCall) -> Self::Extrinsic { UncheckedExtrinsic::new_bare(call) } } diff --git a/substrate/frame/revive/rpc/src/apis/health_api.rs b/substrate/frame/revive/rpc/src/apis/health_api.rs index 509c88c14e022..7a9ab4ec02209 100644 --- a/substrate/frame/revive/rpc/src/apis/health_api.rs +++ b/substrate/frame/revive/rpc/src/apis/health_api.rs @@ -48,7 +48,10 @@ impl SystemHealthRpcServer for SystemHealthRpcServerImpl { tokio::try_join!(self.client.sync_state(), self.client.system_health())?; let latest = self.client.latest_block().await.number(); - if sync_state.current_block > latest { + + // Compare against `latest + 1` to avoid a false positive if the health check runs + // immediately after a new block is produced but before the cache updates. + if sync_state.current_block > latest + 1 { log::warn!( target: LOG_TARGET, "Client is out of sync. Current block: {}, latest cache block: {latest}", diff --git a/substrate/frame/revive/rpc/src/block_info_provider.rs b/substrate/frame/revive/rpc/src/block_info_provider.rs index 9c574add30df0..808cd8c8a16d6 100644 --- a/substrate/frame/revive/rpc/src/block_info_provider.rs +++ b/substrate/frame/revive/rpc/src/block_info_provider.rs @@ -106,24 +106,36 @@ impl BlockInfoProvider for SubxtBlockInfoProvider { &self, block_number: SubstrateBlockNumber, ) -> Result>, ClientError> { - if block_number == self.latest_block().await.number() { - return Ok(Some(self.latest_block().await)); - } else if block_number == self.latest_finalized_block().await.number() { - return Ok(Some(self.latest_finalized_block().await)); + let latest = self.latest_block().await; + if block_number == latest.number() { + return Ok(Some(latest)); + } + + let latest_finalized = self.latest_finalized_block().await; + if block_number == latest_finalized.number() { + return Ok(Some(latest_finalized)); } let Some(hash) = self.rpc.chain_get_block_hash(Some(block_number.into())).await? else { return Ok(None); }; - self.block_by_hash(&hash).await + match self.api.blocks().at(hash).await { + Ok(block) => Ok(Some(Arc::new(block))), + Err(subxt::Error::Block(subxt::error::BlockError::NotFound(_))) => Ok(None), + Err(err) => Err(err.into()), + } } async fn block_by_hash(&self, hash: &H256) -> Result>, ClientError> { - if hash == &self.latest_block().await.hash() { - return Ok(Some(self.latest_block().await)); - } else if hash == &self.latest_finalized_block().await.hash() { - return Ok(Some(self.latest_finalized_block().await)); + let latest = self.latest_block().await; + if hash == &latest.hash() { + return Ok(Some(latest)); + } + + let latest_finalized = self.latest_finalized_block().await; + if hash == &latest_finalized.hash() { + return Ok(Some(latest_finalized)); } match self.api.blocks().at(*hash).await { diff --git a/substrate/frame/revive/rpc/src/cli.rs b/substrate/frame/revive/rpc/src/cli.rs index 4f3113795c6e4..495bec1763e50 100644 --- a/substrate/frame/revive/rpc/src/cli.rs +++ b/substrate/frame/revive/rpc/src/cli.rs @@ -222,11 +222,15 @@ pub fn run(cmd: CliCommand) -> anyhow::Result<()> { let fut1 = client.subscribe_and_cache_new_blocks(SubscriptionType::BestBlocks); let fut2 = client.subscribe_and_cache_new_blocks(SubscriptionType::FinalizedBlocks); - if let Some(index_last_n_blocks) = index_last_n_blocks { + let res = if let Some(index_last_n_blocks) = index_last_n_blocks { let fut3 = client.subscribe_and_cache_blocks(index_last_n_blocks); - tokio::join!(fut1, fut2, fut3); + tokio::try_join!(fut1, fut2, fut3).map(|_| ()) } else { - tokio::join!(fut1, fut2); + tokio::try_join!(fut1, fut2).map(|_| ()) + }; + + if let Err(err) = res { + panic!("Block subscription task failed: {err:?}",) } }); diff --git a/substrate/frame/revive/rpc/src/client.rs b/substrate/frame/revive/rpc/src/client.rs index 2eb1fb913a327..a73310534fb46 100644 --- a/substrate/frame/revive/rpc/src/client.rs +++ b/substrate/frame/revive/rpc/src/client.rs @@ -28,7 +28,11 @@ use crate::{ BlockInfoProvider, BlockTag, FeeHistoryProvider, ReceiptProvider, SubxtBlockInfoProvider, TracerType, TransactionInfo, LOG_TARGET, }; -use jsonrpsee::types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObjectOwned}; +use jsonrpsee::{ + core::traits::ToRpcParams, + rpc_params, + types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObjectOwned}, +}; use pallet_revive::{ evm::{ decode_revert_reason, Block, BlockNumberOrTag, BlockNumberOrTagOrHash, FeeHistoryResult, @@ -49,11 +53,9 @@ use subxt::{ }, }, config::Header, - error::RpcError, Config, OnlineClient, }; use thiserror::Error; -use tokio::sync::RwLock; /// The substrate block type. pub type SubstrateBlock = subxt::blocks::Block>; @@ -67,9 +69,6 @@ pub type SubstrateBlockNumber = ::Number; /// The substrate block hash type. pub type SubstrateBlockHash = ::Hash; -/// Type alias for shared data. -pub type Shared = Arc>; - /// The runtime balance type. pub type Balance = u128; @@ -82,22 +81,6 @@ pub enum SubscriptionType { FinalizedBlocks, } -/// Unwrap the original `jsonrpsee::core::client::Error::Call` error. -fn unwrap_call_err(err: &subxt::error::RpcError) -> Option { - use subxt::backend::rpc::reconnecting_rpc_client; - match err { - subxt::error::RpcError::ClientError(err) => { - match err.downcast_ref::() { - Some(reconnecting_rpc_client::Error::RpcError( - jsonrpsee::core::client::Error::Call(err), - )) => Some(err.clone().into_owned()), - _ => None, - } - }, - _ => None, - } -} - /// The error type for the client. #[derive(Error, Debug)] pub enum ClientError { @@ -107,9 +90,8 @@ pub enum ClientError { /// A [`subxt::Error`] wrapper error. #[error(transparent)] SubxtError(#[from] subxt::Error), - /// A [`RpcError`] wrapper error. #[error(transparent)] - RpcError(#[from] RpcError), + RpcError(#[from] subxt::ext::subxt_rpcs::Error), /// A [`sqlx::Error`] wrapper error. #[error(transparent)] SqlxError(#[from] sqlx::Error), @@ -148,16 +130,11 @@ const REVERT_CODE: i32 = 3; impl From for ErrorObjectOwned { fn from(err: ClientError) -> Self { match err { - ClientError::SubxtError(subxt::Error::Rpc(err)) | ClientError::RpcError(err) => { - if let Some(err) = unwrap_call_err(&err) { - return err; - } - ErrorObjectOwned::owned::>( - CALL_EXECUTION_FAILED_CODE, - err.to_string(), - None, - ) - }, + ClientError::SubxtError(subxt::Error::Rpc(subxt::error::RpcError::ClientError( + subxt::ext::subxt_rpcs::Error::User(err), + ))) | + ClientError::RpcError(subxt::ext::subxt_rpcs::Error::User(err)) => + ErrorObjectOwned::owned::>(err.code, err.message, None), ClientError::TransactError(EthTransactError::Data(data)) => { let msg = match decode_revert_reason(&data) { Some(reason) => format!("execution reverted: {reason}"), @@ -318,12 +295,12 @@ impl Client { if err.is_disconnected_will_reconnect() { log::warn!( target: LOG_TARGET, - "The RPC connection was lost and we may have missed a few blocks" + "The RPC connection was lost and we may have missed a few blocks ({subscription_type:?}): {err:?}" ); continue; } - log::error!(target: LOG_TARGET, "Failed to fetch block: {err:?}"); + log::error!(target: LOG_TARGET, "Failed to fetch block ({subscription_type:?}): {err:?}"); return Err(err.into()); }, }; @@ -342,44 +319,41 @@ impl Client { } /// Start the block subscription, and populate the block cache. - pub async fn subscribe_and_cache_new_blocks(&self, subscription_type: SubscriptionType) { + pub async fn subscribe_and_cache_new_blocks( + &self, + subscription_type: SubscriptionType, + ) -> Result<(), ClientError> { log::info!(target: LOG_TARGET, "🔌 Subscribing to new blocks ({subscription_type:?})"); - let res = self - .subscribe_new_blocks(subscription_type, |block| async { - let (signed_txs, receipts): (Vec<_>, Vec<_>) = - self.receipt_provider.insert_block_receipts(&block).await?.into_iter().unzip(); - - let evm_block = - self.evm_block_from_receipts(&block, &receipts, signed_txs, false).await; - self.block_provider.update_latest(block, subscription_type).await; - - self.fee_history_provider.update_fee_history(&evm_block, &receipts).await; - Ok(()) - }) - .await; - - if let Err(err) = res { - log::error!(target: LOG_TARGET, "Block subscription error: {err:?}"); - } + self.subscribe_new_blocks(subscription_type, |block| async { + let (signed_txs, receipts): (Vec<_>, Vec<_>) = + self.receipt_provider.insert_block_receipts(&block).await?.into_iter().unzip(); + + let evm_block = + self.evm_block_from_receipts(&block, &receipts, signed_txs, false).await; + self.block_provider.update_latest(block, subscription_type).await; + + self.fee_history_provider.update_fee_history(&evm_block, &receipts).await; + Ok(()) + }) + .await } /// Cache old blocks up to the given block number. - pub async fn subscribe_and_cache_blocks(&self, index_last_n_blocks: SubstrateBlockNumber) { + pub async fn subscribe_and_cache_blocks( + &self, + index_last_n_blocks: SubstrateBlockNumber, + ) -> Result<(), ClientError> { let last = self.latest_block().await.number().saturating_sub(1); let range = last.saturating_sub(index_last_n_blocks)..last; log::info!(target: LOG_TARGET, "🗄️ Indexing past blocks in range {range:?}"); - let res = self - .subscribe_past_blocks(range, |block| async move { - self.receipt_provider.insert_block_receipts(&block).await?; - Ok(()) - }) - .await; - - if let Err(err) = res { - log::error!(target: LOG_TARGET, "Past Block subscription error: {err:?}"); - } else { - log::info!(target: LOG_TARGET, "🗄️ Finished indexing past blocks"); - } + self.subscribe_past_blocks(range, |block| async move { + self.receipt_provider.insert_block_receipts(&block).await?; + Ok(()) + }) + .await?; + + log::info!(target: LOG_TARGET, "🗄️ Finished indexing past blocks"); + Ok(()) } /// Get the block hash for the given block number or tag. @@ -388,8 +362,8 @@ impl Client { at: BlockNumberOrTagOrHash, ) -> Result { match at { - BlockNumberOrTagOrHash::H256(hash) => Ok(hash), - BlockNumberOrTagOrHash::U256(block_number) => { + BlockNumberOrTagOrHash::BlockHash(hash) => Ok(hash), + BlockNumberOrTagOrHash::BlockNumber(block_number) => { let n: SubstrateBlockNumber = (block_number).try_into().map_err(|_| ClientError::ConversionFailed)?; let hash = self.get_block_hash(n).await?.ok_or(ClientError::BlockNotFound)?; @@ -557,11 +531,8 @@ impl Client { >, ClientError, > { - let res = self - .rpc_client - .request("chain_getBlock".to_string(), subxt::rpc_params![block_hash].build()) - .await - .unwrap(); + let params = rpc_params![block_hash].to_rpc_params().unwrap_or_default(); + let res = self.rpc_client.request("chain_getBlock".to_string(), params).await.unwrap(); let signed_block: sp_runtime::generic::SignedBlock< sp_runtime::generic::Block< diff --git a/substrate/frame/revive/rpc/src/lib.rs b/substrate/frame/revive/rpc/src/lib.rs index cffdccb668f44..3f72de15a132a 100644 --- a/substrate/frame/revive/rpc/src/lib.rs +++ b/substrate/frame/revive/rpc/src/lib.rs @@ -322,7 +322,10 @@ impl EthRpcServer for EthRpcServerImpl { ) -> RpcResult> { let Some(receipt) = self .client - .receipt_by_hash_and_index(&block_hash, transaction_index.as_usize()) + .receipt_by_hash_and_index( + &block_hash, + transaction_index.try_into().map_err(|_| EthRpcError::ConversionError)?, + ) .await else { return Ok(None); diff --git a/substrate/frame/revive/rpc/src/subxt_client.rs b/substrate/frame/revive/rpc/src/subxt_client.rs index cb1f1e53e0bb5..08d9343797e31 100644 --- a/substrate/frame/revive/rpc/src/subxt_client.rs +++ b/substrate/frame/revive/rpc/src/subxt_client.rs @@ -17,7 +17,7 @@ //! The generated subxt client. //! Generated against a substrate chain configured with [`pallet_revive`] using: //! subxt metadata --url ws://localhost:9944 -o rpc/revive_chain.scale -use subxt::config::{signed_extensions, Config, PolkadotConfig}; +pub use subxt::config::PolkadotConfig as SrcChainConfig; #[subxt::subxt( runtime_metadata_path = "revive_chain.metadata", @@ -66,40 +66,3 @@ use subxt::config::{signed_extensions, Config, PolkadotConfig}; )] mod src_chain {} pub use src_chain::*; - -/// The configuration for the source chain. -pub enum SrcChainConfig {} -impl Config for SrcChainConfig { - type Hash = sp_core::H256; - type AccountId = ::AccountId; - type Address = ::Address; - type Signature = ::Signature; - type Hasher = BlakeTwo256; - type Header = subxt::config::substrate::SubstrateHeader; - type AssetId = ::AssetId; - type ExtrinsicParams = signed_extensions::AnyOf< - Self, - ( - signed_extensions::CheckSpecVersion, - signed_extensions::CheckTxVersion, - signed_extensions::CheckNonce, - signed_extensions::CheckGenesis, - signed_extensions::CheckMortality, - signed_extensions::ChargeAssetTxPayment, - signed_extensions::ChargeTransactionPayment, - signed_extensions::CheckMetadataHash, - ), - >; -} - -/// A type that can hash values using the blaks2_256 algorithm. -/// TODO remove once subxt is updated -#[derive(Debug, Clone, Copy, PartialEq, Eq, codec::Encode)] -pub struct BlakeTwo256; - -impl subxt::config::Hasher for BlakeTwo256 { - type Output = sp_core::H256; - fn hash(s: &[u8]) -> Self::Output { - sp_crypto_hashing::blake2_256(s).into() - } -} diff --git a/substrate/frame/revive/src/call_builder.rs b/substrate/frame/revive/src/call_builder.rs index eb882378e3d2b..8fe0d1a6e77fe 100644 --- a/substrate/frame/revive/src/call_builder.rs +++ b/substrate/frame/revive/src/call_builder.rs @@ -33,7 +33,8 @@ use crate::{ transient_storage::MeterEntry, wasm::{PreparedCall, Runtime}, BalanceOf, Code, CodeInfoOf, Config, ContractInfo, ContractInfoOf, DepositLimit, Error, - GasMeter, MomentOf, Origin, Pallet as Contracts, PristineCode, WasmBlob, Weight, + GasMeter, MomentOf, NonceAlreadyIncremented, Origin, Pallet as Contracts, PristineCode, + WasmBlob, Weight, }; use alloc::{vec, vec::Vec}; use frame_support::{storage::child, traits::fungible::Mutate}; @@ -82,7 +83,7 @@ where let dest = contract.account_id.clone(); let origin = Origin::from_account_id(contract.caller.clone()); - let storage_meter = Meter::new(&origin, default_deposit_limit::(), 0u32.into()).unwrap(); + let storage_meter = Meter::new(default_deposit_limit::()); #[cfg(feature = "runtime-benchmarks")] { @@ -115,7 +116,7 @@ where /// Set the meter's storage deposit limit. pub fn set_storage_deposit_limit(&mut self, balance: BalanceOf) { - self.storage_meter = Meter::new(&self.origin, balance, 0u32.into()).unwrap(); + self.storage_meter = Meter::new(balance); } /// Set the call's origin. @@ -270,6 +271,7 @@ where Code::Upload(module.code), data, salt, + NonceAlreadyIncremented::No, ); let address = outcome.result?.addr; diff --git a/substrate/frame/revive/src/evm/api/rpc_types.rs b/substrate/frame/revive/src/evm/api/rpc_types.rs index 5f6d81f701468..195c1c5fd4623 100644 --- a/substrate/frame/revive/src/evm/api/rpc_types.rs +++ b/substrate/frame/revive/src/evm/api/rpc_types.rs @@ -22,7 +22,7 @@ use sp_core::{H160, U256}; impl From for BlockNumberOrTagOrHash { fn from(b: BlockNumberOrTag) -> Self { match b { - BlockNumberOrTag::U256(n) => BlockNumberOrTagOrHash::U256(n), + BlockNumberOrTag::U256(n) => BlockNumberOrTagOrHash::BlockNumber(n), BlockNumberOrTag::BlockTag(t) => BlockNumberOrTagOrHash::BlockTag(t), } } @@ -142,6 +142,25 @@ fn can_deserialize_input_or_data_field_from_generic_transaction() { ); } +#[test] +fn test_block_number_or_tag_or_hash_deserialization() { + let val: BlockNumberOrTagOrHash = serde_json::from_str("\"latest\"").unwrap(); + assert_eq!(val, BlockTag::Latest.into()); + + for s in ["\"0x1a\"", r#"{ "blockNumber": "0x1a" }"#] { + let val: BlockNumberOrTagOrHash = serde_json::from_str(s).unwrap(); + assert!(matches!(val, BlockNumberOrTagOrHash::BlockNumber(n) if n == 26u64.into())); + } + + for s in [ + "\"0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"", + r#"{ "blockHash": "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" }"#, + ] { + let val: BlockNumberOrTagOrHash = serde_json::from_str(s).unwrap(); + assert_eq!(val, BlockNumberOrTagOrHash::BlockHash(H256([0xaau8; 32]))); + } +} + #[test] fn logs_bloom_works() { let receipt: ReceiptInfo = serde_json::from_str( diff --git a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs index e4bae8778f13b..549dde9dea954 100644 --- a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs +++ b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs @@ -23,7 +23,7 @@ use codec::{Decode, Encode}; use derive_more::{From, TryInto}; pub use ethereum_types::*; use scale_info::TypeInfo; -use serde::{Deserialize, Deserializer, Serialize}; +use serde::{de::Error, Deserialize, Deserializer, Serialize}; /// Input of a `GenericTransaction` #[derive( @@ -74,9 +74,7 @@ fn deserialize_input_or_data<'d, D: Deserializer<'d>>(d: D) -> Result Self { @@ -193,13 +189,13 @@ impl<'a> serde::Deserialize<'a> for BlockNumberOrTagOrHash { #[serde(untagged)] pub enum BlockNumberOrTagOrHashWithAlias { BlockTag(BlockTag), - U256(U256), - BlockNumber { + BlockNumber(U64), + NestedBlockNumber { #[serde(rename = "blockNumber")] block_number: U256, }, - H256(H256), - BlockHash { + BlockHash(H256), + NestedBlockHash { #[serde(rename = "blockHash")] block_hash: H256, }, @@ -208,20 +204,23 @@ impl<'a> serde::Deserialize<'a> for BlockNumberOrTagOrHash { let r = BlockNumberOrTagOrHashWithAlias::deserialize(de)?; Ok(match r { BlockNumberOrTagOrHashWithAlias::BlockTag(val) => BlockNumberOrTagOrHash::BlockTag(val), - BlockNumberOrTagOrHashWithAlias::U256(val) | - BlockNumberOrTagOrHashWithAlias::BlockNumber { block_number: val } => - BlockNumberOrTagOrHash::U256(val), - BlockNumberOrTagOrHashWithAlias::H256(val) | - BlockNumberOrTagOrHashWithAlias::BlockHash { block_hash: val } => - BlockNumberOrTagOrHash::H256(val), + BlockNumberOrTagOrHashWithAlias::BlockNumber(val) => { + let val: u64 = + val.try_into().map_err(|_| D::Error::custom("u64 conversion failed"))?; + BlockNumberOrTagOrHash::BlockNumber(val.into()) + }, + + BlockNumberOrTagOrHashWithAlias::NestedBlockNumber { block_number: val } => + BlockNumberOrTagOrHash::BlockNumber(val), + BlockNumberOrTagOrHashWithAlias::BlockHash(val) | + BlockNumberOrTagOrHashWithAlias::NestedBlockHash { block_hash: val } => + BlockNumberOrTagOrHash::BlockHash(val), }) } } /// filter -#[derive( - Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, -)] +#[derive(Debug, Default, Clone, Serialize, Deserialize, Eq, PartialEq)] pub struct Filter { /// Address(es) pub address: Option, @@ -240,9 +239,7 @@ pub struct Filter { } /// Filter results -#[derive( - Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, -)] +#[derive(Debug, Clone, Serialize, Deserialize, From, TryInto, Eq, PartialEq)] #[serde(untagged)] pub enum FilterResults { /// new block or transaction hashes @@ -317,9 +314,7 @@ pub struct GenericTransaction { } /// Receipt information -#[derive( - Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, -)] +#[derive(Debug, Default, Clone, Serialize, Deserialize, Eq, PartialEq)] pub struct ReceiptInfo { /// blob gas price /// The actual value per gas deducted from the sender's account for blob gas. Only specified @@ -387,9 +382,7 @@ pub struct ReceiptInfo { } /// Syncing status -#[derive( - Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, -)] +#[derive(Debug, Clone, Serialize, Deserialize, From, TryInto, Eq, PartialEq)] #[serde(untagged)] pub enum SyncingStatus { /// Syncing progress @@ -405,9 +398,7 @@ impl Default for SyncingStatus { } /// Transaction information -#[derive( - Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, -)] +#[derive(Debug, Default, Clone, Serialize, Deserialize, Eq, PartialEq)] pub struct TransactionInfo { /// block hash #[serde(rename = "blockHash")] @@ -426,9 +417,7 @@ pub struct TransactionInfo { pub transaction_signed: TransactionSigned, } -#[derive( - Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, -)] +#[derive(Debug, Clone, Serialize, Deserialize, From, TryInto, Eq, PartialEq)] #[serde(untagged)] pub enum TransactionUnsigned { Transaction4844Unsigned(Transaction4844Unsigned), @@ -446,9 +435,7 @@ impl Default for TransactionUnsigned { pub type AccessList = Vec; /// Address(es) -#[derive( - Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, -)] +#[derive(Debug, Clone, Serialize, Deserialize, From, TryInto, Eq, PartialEq)] #[serde(untagged)] pub enum AddressOrAddresses { /// Address @@ -475,9 +462,7 @@ pub type Addresses = Vec
; /// and containing the set of transactions usually taken from local mempool. Before the merge /// transition is finalized, any call querying for `finalized` or `safe` block MUST be responded to /// with `-39001: Unknown block` error -#[derive( - Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, -)] +#[derive(Debug, Default, Clone, Serialize, Deserialize, Eq, PartialEq)] pub enum BlockTag { #[serde(rename = "earliest")] Earliest, @@ -495,9 +480,7 @@ pub enum BlockTag { /// Filter Topics pub type FilterTopics = Vec; -#[derive( - Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, -)] +#[derive(Debug, Clone, Serialize, Deserialize, From, TryInto, Eq, PartialEq)] #[serde(untagged)] pub enum HashesOrTransactionInfos { /// Transaction hashes @@ -512,9 +495,7 @@ impl Default for HashesOrTransactionInfos { } /// log -#[derive( - Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, -)] +#[derive(Debug, Default, Clone, Serialize, Deserialize, Eq, PartialEq)] pub struct Log { /// address pub address: Address, @@ -545,9 +526,7 @@ pub struct Log { } /// Syncing progress -#[derive( - Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, -)] +#[derive(Debug, Default, Clone, Serialize, Deserialize, Eq, PartialEq)] pub struct SyncingProgress { /// Current block #[serde(rename = "currentBlock", skip_serializing_if = "Option::is_none")] @@ -561,9 +540,7 @@ pub struct SyncingProgress { } /// EIP-1559 transaction. -#[derive( - Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, -)] +#[derive(Debug, Default, Clone, Serialize, Deserialize, Eq, PartialEq)] pub struct Transaction1559Unsigned { /// accessList /// EIP-2930 access list @@ -603,9 +580,7 @@ pub struct Transaction1559Unsigned { } /// EIP-2930 transaction. -#[derive( - Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, -)] +#[derive(Debug, Default, Clone, Serialize, Deserialize, Eq, PartialEq)] pub struct Transaction2930Unsigned { /// accessList /// EIP-2930 access list @@ -634,9 +609,7 @@ pub struct Transaction2930Unsigned { } /// EIP-4844 transaction. -#[derive( - Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, -)] +#[derive(Debug, Default, Clone, Serialize, Deserialize, Eq, PartialEq)] pub struct Transaction4844Unsigned { /// accessList /// EIP-2930 access list @@ -678,9 +651,7 @@ pub struct Transaction4844Unsigned { } /// Legacy transaction. -#[derive( - Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, -)] +#[derive(Debug, Default, Clone, Serialize, Deserialize, Eq, PartialEq)] pub struct TransactionLegacyUnsigned { /// chainId /// Chain ID that this transaction is valid on. @@ -704,9 +675,7 @@ pub struct TransactionLegacyUnsigned { pub value: U256, } -#[derive( - Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, -)] +#[derive(Debug, Clone, Serialize, Deserialize, From, TryInto, Eq, PartialEq)] #[serde(untagged)] pub enum TransactionSigned { Transaction4844Signed(Transaction4844Signed), @@ -721,9 +690,7 @@ impl Default for TransactionSigned { } /// Validator withdrawal -#[derive( - Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, -)] +#[derive(Debug, Default, Clone, Serialize, Deserialize, Eq, PartialEq)] pub struct Withdrawal { /// recipient address for withdrawal value pub address: Address, @@ -747,9 +714,7 @@ pub struct AccessListEntry { } /// Filter Topic List Entry -#[derive( - Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, -)] +#[derive(Debug, Clone, Serialize, Deserialize, From, TryInto, Eq, PartialEq)] #[serde(untagged)] pub enum FilterTopic { /// Single Topic Match @@ -764,9 +729,7 @@ impl Default for FilterTopic { } /// Signed 1559 Transaction -#[derive( - Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, -)] +#[derive(Debug, Default, Clone, Serialize, Deserialize, Eq, PartialEq)] pub struct Transaction1559Signed { #[serde(flatten)] pub transaction_1559_unsigned: Transaction1559Unsigned, @@ -786,9 +749,7 @@ pub struct Transaction1559Signed { } /// Signed 2930 Transaction -#[derive( - Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, -)] +#[derive(Debug, Default, Clone, Serialize, Deserialize, Eq, PartialEq)] pub struct Transaction2930Signed { #[serde(flatten)] pub transaction_2930_unsigned: Transaction2930Unsigned, @@ -808,9 +769,7 @@ pub struct Transaction2930Signed { } /// Signed 4844 Transaction -#[derive( - Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, -)] +#[derive(Debug, Default, Clone, Serialize, Deserialize, Eq, PartialEq)] pub struct Transaction4844Signed { #[serde(flatten)] pub transaction_4844_unsigned: Transaction4844Unsigned, @@ -825,9 +784,7 @@ pub struct Transaction4844Signed { } /// Signed Legacy Transaction -#[derive( - Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, -)] +#[derive(Debug, Default, Clone, Serialize, Deserialize, Eq, PartialEq)] pub struct TransactionLegacySigned { #[serde(flatten)] pub transaction_legacy_unsigned: TransactionLegacyUnsigned, diff --git a/substrate/frame/revive/src/evm/runtime.rs b/substrate/frame/revive/src/evm/runtime.rs index 6a8a169c102d0..1620316578389 100644 --- a/substrate/frame/revive/src/evm/runtime.rs +++ b/substrate/frame/revive/src/evm/runtime.rs @@ -27,7 +27,7 @@ use alloc::vec::Vec; use codec::{Decode, DecodeWithMemTracking, Encode}; use frame_support::{ dispatch::{DispatchInfo, GetDispatchInfo}, - traits::{ExtrinsicCall, InherentBuilder, SignedTransactionBuilder}, + traits::{InherentBuilder, IsSubType, SignedTransactionBuilder}, }; use pallet_transaction_payment::OnChargeTransaction; use scale_info::{StaticTypeInfo, TypeInfo}; @@ -35,7 +35,7 @@ use sp_core::{Get, H256, U256}; use sp_runtime::{ generic::{self, CheckedExtrinsic, ExtrinsicFormat}, traits::{ - self, Checkable, Dispatchable, ExtrinsicLike, ExtrinsicMetadata, IdentifyAccount, Member, + Checkable, Dispatchable, ExtrinsicCall, ExtrinsicLike, ExtrinsicMetadata, TransactionExtension, }, transaction_validity::{InvalidTransaction, TransactionValidityError}, @@ -118,7 +118,6 @@ impl ExtrinsicCall } } -use sp_runtime::traits::MaybeDisplay; type OnChargeTransactionBalanceOf = <::OnChargeTransaction as OnChargeTransaction>::Balance; impl Checkable @@ -131,27 +130,24 @@ where OnChargeTransactionBalanceOf: Into>, BalanceOf: Into + TryFrom, MomentOf: Into, - CallOf: From> + TryInto>, + CallOf: From> + IsSubType>, ::Hash: frame_support::traits::IsType, // required by Checkable for `generic::UncheckedExtrinsic` - LookupSource: Member + MaybeDisplay, - CallOf: Encode + Member + Dispatchable, - Signature: Member + traits::Verify, - ::Signer: IdentifyAccount>, - E::Extension: Encode + TransactionExtension>, - Lookup: traits::Lookup>, + generic::UncheckedExtrinsic, Signature, E::Extension>: + Checkable< + Lookup, + Checked = CheckedExtrinsic, CallOf, E::Extension>, + >, { type Checked = CheckedExtrinsic, CallOf, E::Extension>; fn check(self, lookup: &Lookup) -> Result { if !self.0.is_signed() { - if let Ok(call) = self.0.function.clone().try_into() { - if let crate::Call::eth_transact { payload } = call { - let checked = E::try_into_checked_extrinsic(payload, self.encoded_size())?; - return Ok(checked) - }; - } + if let Some(crate::Call::eth_transact { payload }) = self.0.function.is_sub_type() { + let checked = E::try_into_checked_extrinsic(payload.to_vec(), self.encoded_size())?; + return Ok(checked) + }; } self.0.check(lookup) } @@ -417,7 +413,7 @@ mod test { use frame_support::{error::LookupError, traits::fungible::Mutate}; use pallet_revive_fixtures::compile_module; use sp_runtime::{ - traits::{Checkable, DispatchTransaction}, + traits::{self, Checkable, DispatchTransaction}, MultiAddress, MultiSignature, }; type AccountIdOf = ::AccountId; diff --git a/substrate/frame/revive/src/exec.rs b/substrate/frame/revive/src/exec.rs index 25a1d1ebc2146..9161d7f39013d 100644 --- a/substrate/frame/revive/src/exec.rs +++ b/substrate/frame/revive/src/exec.rs @@ -26,7 +26,7 @@ use crate::{ tracing::if_tracing, transient_storage::TransientStorage, BalanceOf, CodeInfo, CodeInfoOf, Config, ContractInfo, ContractInfoOf, ConversionPrecision, - Error, Event, ImmutableData, ImmutableDataOf, Pallet as Contracts, + Error, Event, ImmutableData, ImmutableDataOf, NonceAlreadyIncremented, Pallet as Contracts, }; use alloc::vec::Vec; use core::{fmt::Debug, marker::PhantomData, mem}; @@ -614,6 +614,7 @@ enum FrameArgs<'a, T: Config, E> { salt: Option<&'a [u8; 32]>, /// The input data is used in the contract address derivation of the new contract. input_data: &'a [u8], + nonce_already_incremented: NonceAlreadyIncremented, }, } @@ -807,6 +808,7 @@ where input_data: Vec, salt: Option<&[u8; 32]>, skip_transfer: bool, + nonce_already_incremented: NonceAlreadyIncremented, ) -> Result<(H160, ExecReturnValue), ExecError> { let (mut stack, executable) = Self::new( FrameArgs::Instantiate { @@ -814,6 +816,7 @@ where executable, salt, input_data: input_data.as_ref(), + nonce_already_incremented, }, Origin::from_account_id(origin), gas_meter, @@ -874,7 +877,6 @@ where storage_meter, BalanceOf::::max_value(), false, - true, )? else { return Ok(None); @@ -908,7 +910,6 @@ where storage_meter: &mut storage::meter::GenericMeter, deposit_limit: BalanceOf, read_only: bool, - origin_is_caller: bool, ) -> Result, ExecutableOrPrecompile)>, ExecError> { let (account_id, contract_info, executable, delegate, entry_point) = match frame_args { FrameArgs::Call { dest, cached_info, delegated_call } => { @@ -972,7 +973,13 @@ where (dest, contract, executable, delegated_call, ExportedFunction::Call) }, - FrameArgs::Instantiate { sender, executable, salt, input_data } => { + FrameArgs::Instantiate { + sender, + executable, + salt, + input_data, + nonce_already_incremented, + } => { let deployer = T::AddressMapper::to_address(&sender); let account_nonce = >::account_nonce(&sender); let address = if let Some(salt) = salt { @@ -983,7 +990,7 @@ where &deployer, // the Nonce from the origin has been incremented pre-dispatch, so we // need to subtract 1 to get the nonce at the time of the call. - if origin_is_caller { + if matches!(nonce_already_incremented, NonceAlreadyIncremented::Yes) { account_nonce.saturating_sub(1u32.into()).saturated_into() } else { account_nonce.saturated_into() @@ -1059,7 +1066,6 @@ where nested_storage, deposit_limit, read_only, - false, )? { self.frames.try_push(frame).map_err(|_| Error::::MaxCallDepthReached)?; Ok(Some(executable)) @@ -1122,7 +1128,8 @@ where if self.skip_transfer { T::Currency::set_balance(account_id, ed); } else { - T::Currency::transfer(origin, account_id, ed, Preservation::Preserve)?; + T::Currency::transfer(origin, account_id, ed, Preservation::Preserve) + .map_err(|_| >::StorageDepositNotEnoughFunds)?; } // A consumer is added at account creation and removed it on termination, otherwise @@ -1374,10 +1381,13 @@ where let ed = ::Currency::minimum_balance(); with_transaction(|| -> TransactionOutcome { match T::Currency::transfer(origin, to, ed, Preservation::Preserve) - .and_then(|_| T::Currency::transfer(from, to, value, Preservation::Preserve)) - { + .map_err(|_| Error::::StorageDepositNotEnoughFunds.into()) + .and_then(|_| { + T::Currency::transfer(from, to, value, Preservation::Preserve) + .map_err(|_| Error::::TransferFailed.into()) + }) { Ok(_) => TransactionOutcome::Commit(Ok(Default::default())), - Err(_) => TransactionOutcome::Rollback(Err(Error::::TransferFailed.into())), + Err(err) => TransactionOutcome::Rollback(Err(err)), } }) } @@ -1665,6 +1675,7 @@ where executable, salt, input_data: input_data.as_ref(), + nonce_already_incremented: NonceAlreadyIncremented::No, }, value.try_into().map_err(|_| Error::::BalanceConversionFailed)?, gas_limit, diff --git a/substrate/frame/revive/src/exec/tests.rs b/substrate/frame/revive/src/exec/tests.rs index b04a9360aee78..ef2b165fd337b 100644 --- a/substrate/frame/revive/src/exec/tests.rs +++ b/substrate/frame/revive/src/exec/tests.rs @@ -199,7 +199,7 @@ fn it_works() { static TestData: Vec = vec![0]; } - let value = Default::default(); + let value = 0; let mut gas_meter = GasMeter::::new(GAS_LIMIT); let exec_ch = MockLoader::insert(Call, |_ctx, _executable| { TestData::mutate(|data| data.push(1)); @@ -208,8 +208,7 @@ fn it_works() { ExtBuilder::default().build().execute_with(|| { place_contract(&BOB, exec_ch); - let mut storage_meter = - storage::meter::Meter::new(&Origin::from_account_id(ALICE), 0, value).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); assert_matches!( MockStack::run_call( @@ -273,7 +272,7 @@ fn transfer_to_nonexistent_account_works() { set_balance(&BOB, ed + value); assert_err!( MockStack::transfer(&Origin::from_account_id(ALICE), &BOB, &DJANGO, value.into()), - >::TransferFailed + >::StorageDepositNotEnoughFunds, ); // Do not reap the sender account @@ -302,7 +301,7 @@ fn correct_transfer_on_call() { set_balance(&ALICE, 100); let balance = get_balance(&BOB_FALLBACK); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, value).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); let _ = MockStack::run_call( origin.clone(), @@ -341,7 +340,7 @@ fn correct_transfer_on_delegate_call() { set_balance(&ALICE, 100); let balance = get_balance(&BOB_FALLBACK); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 55).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); assert_ok!(MockStack::run_call( origin, @@ -374,7 +373,7 @@ fn delegate_call_missing_contract() { set_balance(&ALICE, 100); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 55).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); // contract code missing should still succeed to mimic EVM behavior. assert_ok!(MockStack::run_call( @@ -415,7 +414,7 @@ fn changes_are_reverted_on_failing_call() { set_balance(&ALICE, 100); let balance = get_balance(&BOB); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 55).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); let output = MockStack::run_call( origin, @@ -438,17 +437,20 @@ fn changes_are_reverted_on_failing_call() { fn balance_too_low() { // This test verifies that a contract can't send value if it's // balance is too low. - let from = ALICE; - let origin = Origin::from_account_id(ALICE); - let dest = BOB; + let from = BOB; + let dest = CHARLIE; ExtBuilder::default().build().execute_with(|| { - set_balance(&from, 0); + let ed = ::Currency::minimum_balance(); + set_balance(&ALICE, ed * 2); + set_balance(&from, ed + 99); - let result = MockStack::transfer(&origin, &from, &dest, 100u64.into()); + let result = + MockStack::transfer(&Origin::from_account_id(ALICE), &from, &dest, 100u64.into()); assert_eq!(result, Err(Error::::TransferFailed.into())); - assert_eq!(get_balance(&from), 0); + assert_eq!(get_balance(&ALICE), ed * 2); + assert_eq!(get_balance(&from), ed + 99); assert_eq!(get_balance(&dest), 0); }); } @@ -463,7 +465,7 @@ fn output_is_returned_on_success() { ExtBuilder::default().build().execute_with(|| { let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); place_contract(&BOB, return_ch); let result = MockStack::run_call( @@ -493,7 +495,7 @@ fn output_is_returned_on_failure() { ExtBuilder::default().build().execute_with(|| { place_contract(&BOB, return_ch); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); let result = MockStack::run_call( origin, @@ -522,7 +524,7 @@ fn input_data_to_call() { ExtBuilder::default().build().execute_with(|| { place_contract(&BOB, input_data_ch); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); let result = MockStack::run_call( origin, @@ -553,9 +555,7 @@ fn input_data_to_instantiate() { let mut gas_meter = GasMeter::::new(GAS_LIMIT); let executable = MockExecutable::from_storage(input_data_ch, &mut gas_meter).unwrap(); set_balance(&ALICE, min_balance * 10_000); - let origin = Origin::from_account_id(ALICE); - let mut storage_meter = - storage::meter::Meter::new(&origin, deposit_limit::(), min_balance).unwrap(); + let mut storage_meter = storage::meter::Meter::new(deposit_limit::()); let result = MockStack::run_instantiate( ALICE, @@ -566,6 +566,7 @@ fn input_data_to_instantiate() { vec![1, 2, 3, 4], Some(&[0; 32]), false, + NonceAlreadyIncremented::Yes, ); assert_matches!(result, Ok(_)); }); @@ -578,7 +579,7 @@ fn max_depth() { parameter_types! { static ReachedBottom: bool = false; } - let value = Default::default(); + let value = 0; let recurse_ch = MockLoader::insert(Call, |ctx, _| { // Try to call into yourself. let r = ctx.ext.call( @@ -610,7 +611,7 @@ fn max_depth() { set_balance(&BOB, 1); place_contract(&BOB, recurse_ch); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, value).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); let result = MockStack::run_call( origin, @@ -672,7 +673,7 @@ fn caller_returns_proper_values() { place_contract(&BOB, bob_ch); place_contract(&CHARLIE, charlie_ch); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); let result = MockStack::run_call( origin, @@ -735,7 +736,7 @@ fn origin_returns_proper_values() { place_contract(&BOB, bob_ch); place_contract(&CHARLIE, charlie_ch); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); let result = MockStack::run_call( origin, @@ -768,7 +769,7 @@ fn is_contract_returns_proper_values() { place_contract(&BOB, bob_ch); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); let result = MockStack::run_call( origin, BOB_ADDR, @@ -802,7 +803,7 @@ fn to_account_id_returns_proper_values() { ExtBuilder::default().build().execute_with(|| { place_contract(&BOB, bob_code_hash); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); let result = MockStack::run_call( origin, BOB_ADDR, @@ -838,7 +839,7 @@ fn code_hash_returns_proper_values() { ); place_contract(&BOB, bob_code_hash); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); // ALICE (not contract) -> BOB (contract) let result = MockStack::run_call( origin, @@ -864,7 +865,7 @@ fn own_code_hash_returns_proper_values() { ExtBuilder::default().build().execute_with(|| { place_contract(&BOB, bob_ch); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); // ALICE (not contract) -> BOB (contract) let result = MockStack::run_call( origin, @@ -900,7 +901,7 @@ fn caller_is_origin_returns_proper_values() { place_contract(&BOB, code_bob); place_contract(&CHARLIE, code_charlie); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); // ALICE -> BOB (caller is origin) -> CHARLIE (caller is not origin) let result = MockStack::run_call( origin, @@ -926,7 +927,7 @@ fn root_caller_succeeds() { ExtBuilder::default().build().execute_with(|| { place_contract(&BOB, code_bob); let origin = Origin::Root; - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); // root -> BOB (caller is root) let result = MockStack::run_call( origin, @@ -952,7 +953,7 @@ fn root_caller_does_not_succeed_when_value_not_zero() { ExtBuilder::default().build().execute_with(|| { place_contract(&BOB, code_bob); let origin = Origin::Root; - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); // root -> BOB (caller is root) let result = MockStack::run_call( origin, @@ -988,7 +989,7 @@ fn root_caller_succeeds_with_consecutive_calls() { place_contract(&BOB, code_bob); place_contract(&CHARLIE, code_charlie); let origin = Origin::Root; - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); // root -> BOB (caller is root) -> CHARLIE (caller is not root) let result = MockStack::run_call( origin, @@ -1033,7 +1034,7 @@ fn address_returns_proper_values() { place_contract(&BOB, bob_ch); place_contract(&CHARLIE, charlie_ch); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); let result = MockStack::run_call( origin, @@ -1056,8 +1057,7 @@ fn refuse_instantiate_with_value_below_existential_deposit() { ExtBuilder::default().existential_deposit(15).build().execute_with(|| { let mut gas_meter = GasMeter::::new(GAS_LIMIT); let executable = MockExecutable::from_storage(dummy_ch, &mut gas_meter).unwrap(); - let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); assert_matches!( MockStack::run_instantiate( @@ -1069,6 +1069,7 @@ fn refuse_instantiate_with_value_below_existential_deposit() { vec![], Some(&[0; 32]), false, + NonceAlreadyIncremented::Yes, ), Err(_) ); @@ -1090,9 +1091,7 @@ fn instantiation_work_with_success_output() { let mut gas_meter = GasMeter::::new(GAS_LIMIT); let executable = MockExecutable::from_storage(dummy_ch, &mut gas_meter).unwrap(); set_balance(&ALICE, min_balance * 1000); - let origin = Origin::from_account_id(ALICE); - let mut storage_meter = - storage::meter::Meter::new(&origin, min_balance * 100, min_balance).unwrap(); + let mut storage_meter = storage::meter::Meter::new(min_balance * 100); let instantiated_contract_address = assert_matches!( MockStack::run_instantiate( @@ -1104,6 +1103,7 @@ fn instantiation_work_with_success_output() { vec![], Some(&[0 ;32]), false, + NonceAlreadyIncremented::Yes, ), Ok((address, ref output)) if output.data == vec![80, 65, 83, 83] => address ); @@ -1136,9 +1136,7 @@ fn instantiation_fails_with_failing_output() { let mut gas_meter = GasMeter::::new(GAS_LIMIT); let executable = MockExecutable::from_storage(dummy_ch, &mut gas_meter).unwrap(); set_balance(&ALICE, min_balance * 1000); - let origin = Origin::from_account_id(ALICE); - let mut storage_meter = - storage::meter::Meter::new(&origin, min_balance * 100, min_balance).unwrap(); + let mut storage_meter = storage::meter::Meter::new(min_balance * 100); let instantiated_contract_address = assert_matches!( MockStack::run_instantiate( @@ -1150,6 +1148,7 @@ fn instantiation_fails_with_failing_output() { vec![], Some(&[0; 32]), false, + NonceAlreadyIncremented::Yes, ), Ok((address, ref output)) if output.data == vec![70, 65, 73, 76] => address ); @@ -1200,8 +1199,7 @@ fn instantiation_from_contract() { set_balance(&ALICE, min_balance * 100); place_contract(&BOB, instantiator_ch); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = - storage::meter::Meter::new(&origin, min_balance * 10, min_balance * 10).unwrap(); + let mut storage_meter = storage::meter::Meter::new(min_balance * 10); assert_matches!( MockStack::run_call( @@ -1267,7 +1265,7 @@ fn instantiation_traps() { set_balance(&BOB_FALLBACK, 100); place_contract(&BOB, instantiator_ch); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(200); assert_matches!( MockStack::run_call( @@ -1299,9 +1297,7 @@ fn termination_from_instantiate_fails() { let mut gas_meter = GasMeter::::new(GAS_LIMIT); let executable = MockExecutable::from_storage(terminate_ch, &mut gas_meter).unwrap(); set_balance(&ALICE, 10_000); - let origin = Origin::from_account_id(ALICE); - let mut storage_meter = - storage::meter::Meter::new(&origin, deposit_limit::(), 100).unwrap(); + let mut storage_meter = storage::meter::Meter::new(deposit_limit::()); assert_eq!( MockStack::run_instantiate( @@ -1313,6 +1309,7 @@ fn termination_from_instantiate_fails() { vec![], Some(&[0; 32]), false, + NonceAlreadyIncremented::Yes, ), Err(ExecError { error: Error::::TerminatedInConstructor.into(), @@ -1370,7 +1367,7 @@ fn in_memory_changes_not_discarded() { place_contract(&BOB, code_bob); place_contract(&CHARLIE, code_charlie); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); let result = MockStack::run_call( origin, @@ -1428,9 +1425,7 @@ fn recursive_call_during_constructor_is_balance_transfer() { let mut gas_meter = GasMeter::::new(GAS_LIMIT); let executable = MockExecutable::from_storage(code, &mut gas_meter).unwrap(); set_balance(&ALICE, min_balance * 10_000); - let origin = Origin::from_account_id(ALICE); - let mut storage_meter = - storage::meter::Meter::new(&origin, deposit_limit::(), min_balance).unwrap(); + let mut storage_meter = storage::meter::Meter::new(deposit_limit::()); let result = MockStack::run_instantiate( ALICE, @@ -1441,6 +1436,7 @@ fn recursive_call_during_constructor_is_balance_transfer() { vec![], Some(&[0; 32]), false, + NonceAlreadyIncremented::Yes, ); assert_matches!(result, Ok(_)); }); @@ -1478,7 +1474,7 @@ fn cannot_send_more_balance_than_available_to_self() { set_balance(&ALICE, min_balance * 10); place_contract(&BOB, code_hash); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); MockStack::run_call( origin, BOB_ADDR, @@ -1508,7 +1504,7 @@ fn call_reentry_direct_recursion() { place_contract(&BOB, code_bob); place_contract(&CHARLIE, code_charlie); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); // Calling another contract should succeed assert_ok!(MockStack::run_call( @@ -1569,7 +1565,7 @@ fn call_deny_reentry() { place_contract(&BOB, code_bob); place_contract(&CHARLIE, code_charlie); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); // BOB -> CHARLIE -> BOB fails as BOB denies reentry. assert_err!( @@ -1605,7 +1601,7 @@ fn call_runtime_works() { set_balance(&ALICE, min_balance * 10); place_contract(&BOB, code_hash); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); System::reset_events(); MockStack::run_call( origin, @@ -1677,7 +1673,7 @@ fn call_runtime_filter() { set_balance(&ALICE, min_balance * 10); place_contract(&BOB, code_hash); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); System::reset_events(); MockStack::run_call( origin, @@ -1787,10 +1783,7 @@ fn nonce() { MockExecutable::from_storage(succ_succ_code, &mut gas_meter).unwrap(); set_balance(&ALICE, min_balance * 10_000); set_balance(&BOB, min_balance * 10_000); - let origin = Origin::from_account_id(BOB); - let mut storage_meter = - storage::meter::Meter::new(&origin, deposit_limit::(), min_balance * 100) - .unwrap(); + let mut storage_meter = storage::meter::Meter::new(deposit_limit::()); // fail should not increment MockStack::run_instantiate( @@ -1802,6 +1795,7 @@ fn nonce() { vec![], Some(&[0; 32]), false, + NonceAlreadyIncremented::Yes, ) .ok(); assert_eq!(System::account_nonce(&ALICE), 0); @@ -1815,6 +1809,7 @@ fn nonce() { vec![], Some(&[0; 32]), false, + NonceAlreadyIncremented::Yes, )); assert_eq!(System::account_nonce(&ALICE), 1); @@ -1827,6 +1822,7 @@ fn nonce() { vec![], Some(&[0; 32]), false, + NonceAlreadyIncremented::Yes, )); assert_eq!(System::account_nonce(&ALICE), 2); @@ -1839,6 +1835,7 @@ fn nonce() { vec![], Some(&[0; 32]), false, + NonceAlreadyIncremented::Yes, )); assert_eq!(System::account_nonce(&ALICE), 3); }); @@ -1897,8 +1894,7 @@ fn set_storage_works() { set_balance(&ALICE, min_balance * 1000); place_contract(&BOB, code_hash); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = - storage::meter::Meter::new(&origin, deposit_limit::(), 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(deposit_limit::()); assert_ok!(MockStack::run_call( origin, BOB_ADDR, @@ -1996,8 +1992,7 @@ fn set_storage_varsized_key_works() { set_balance(&ALICE, min_balance * 1000); place_contract(&BOB, code_hash); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = - storage::meter::Meter::new(&origin, deposit_limit::(), 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(deposit_limit::()); assert_ok!(MockStack::run_call( origin, BOB_ADDR, @@ -2035,8 +2030,7 @@ fn get_storage_works() { set_balance(&ALICE, min_balance * 1000); place_contract(&BOB, code_hash); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = - storage::meter::Meter::new(&origin, deposit_limit::(), 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(deposit_limit::()); assert_ok!(MockStack::run_call( origin, BOB_ADDR, @@ -2074,8 +2068,7 @@ fn get_storage_size_works() { set_balance(&ALICE, min_balance * 1000); place_contract(&BOB, code_hash); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = - storage::meter::Meter::new(&origin, deposit_limit::(), 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(deposit_limit::()); assert_ok!(MockStack::run_call( origin, BOB_ADDR, @@ -2124,8 +2117,7 @@ fn get_storage_varsized_key_works() { set_balance(&ALICE, min_balance * 1000); place_contract(&BOB, code_hash); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = - storage::meter::Meter::new(&origin, deposit_limit::(), 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(deposit_limit::()); assert_ok!(MockStack::run_call( origin, BOB_ADDR, @@ -2174,8 +2166,7 @@ fn get_storage_size_varsized_key_works() { set_balance(&ALICE, min_balance * 1000); place_contract(&BOB, code_hash); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = - storage::meter::Meter::new(&origin, deposit_limit::(), 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(deposit_limit::()); assert_ok!(MockStack::run_call( origin, BOB_ADDR, @@ -2249,8 +2240,7 @@ fn set_transient_storage_works() { ExtBuilder::default().build().execute_with(|| { place_contract(&BOB, code_hash); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = - storage::meter::Meter::new(&origin, deposit_limit::(), 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(deposit_limit::()); assert_ok!(MockStack::run_call( origin, BOB_ADDR, @@ -2319,7 +2309,7 @@ fn get_transient_storage_works() { place_contract(&BOB, code_bob); place_contract(&CHARLIE, code_charlie); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); let result = MockStack::run_call( origin, @@ -2358,7 +2348,7 @@ fn get_transient_storage_size_works() { ExtBuilder::default().build().execute_with(|| { place_contract(&BOB, code_hash); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); assert_ok!(MockStack::run_call( origin, BOB_ADDR, @@ -2419,7 +2409,7 @@ fn rollback_transient_storage_works() { place_contract(&BOB, code_bob); place_contract(&CHARLIE, code_charlie); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); let result = MockStack::run_call( origin, @@ -2451,7 +2441,7 @@ fn ecdsa_to_eth_address_returns_proper_value() { place_contract(&BOB, bob_ch); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); let result = MockStack::run_call( origin, BOB_ADDR, @@ -2523,7 +2513,7 @@ fn last_frame_output_works_on_instantiate() { set_balance(&BOB, 100); place_contract(&BOB, instantiator_ch); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(200); MockStack::run_call( origin, @@ -2591,7 +2581,7 @@ fn last_frame_output_works_on_nested_call() { place_contract(&BOB, code_bob); place_contract(&CHARLIE, code_charlie); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); let result = MockStack::run_call( origin, @@ -2659,7 +2649,7 @@ fn last_frame_output_is_always_reset() { ExtBuilder::default().build().execute_with(|| { place_contract(&BOB, code_bob); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); let result = MockStack::run_call( origin, @@ -2707,7 +2697,7 @@ fn immutable_data_access_checks_work() { set_balance(&BOB, 100); place_contract(&BOB, instantiator_ch); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(200); MockStack::run_call( origin, @@ -2766,7 +2756,7 @@ fn correct_immutable_data_in_delegate_call() { place_contract(&CHARLIE, charlie_ch); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(200); // Place unique immutable data for each contract >::insert::<_, ImmutableData>( @@ -2813,7 +2803,7 @@ fn immutable_data_set_overrides() { .execute_with(|| { set_balance(&ALICE, 1000); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(200); let mut gas_meter = GasMeter::::new(GAS_LIMIT); let addr = MockStack::run_instantiate( @@ -2825,6 +2815,7 @@ fn immutable_data_set_overrides() { vec![], None, false, + NonceAlreadyIncremented::Yes, ) .unwrap() .0; @@ -2871,7 +2862,7 @@ fn immutable_data_set_errors_with_empty_data() { set_balance(&BOB, 100); place_contract(&BOB, instantiator_ch); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 200, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(200); MockStack::run_call( origin, @@ -2926,7 +2917,7 @@ fn block_hash_returns_proper_values() { place_contract(&BOB, bob_code_hash); let origin = Origin::from_account_id(ALICE); - let mut storage_meter = storage::meter::Meter::new(&origin, 0, 0).unwrap(); + let mut storage_meter = storage::meter::Meter::new(0); assert_matches!( MockStack::run_call( origin, diff --git a/substrate/frame/revive/src/lib.rs b/substrate/frame/revive/src/lib.rs index c248dda126c6a..04d09d6eb8341 100644 --- a/substrate/frame/revive/src/lib.rs +++ b/substrate/frame/revive/src/lib.rs @@ -57,7 +57,7 @@ use codec::{Codec, Decode, Encode}; use environmental::*; use frame_support::{ dispatch::{ - DispatchErrorWithPostInfo, DispatchInfo, DispatchResultWithPostInfo, GetDispatchInfo, Pays, + DispatchErrorWithPostInfo, DispatchResultWithPostInfo, GetDispatchInfo, Pays, PostDispatchInfo, RawOrigin, }, ensure, @@ -67,7 +67,7 @@ use frame_support::{ tokens::{Fortitude::Polite, Preservation::Preserve}, ConstU32, ConstU64, Contains, EnsureOrigin, Get, IsType, OriginTrait, Time, }, - weights::{Weight, WeightMeter}, + weights::WeightMeter, BoundedVec, RuntimeDebugNoBound, }; use frame_system::{ @@ -76,7 +76,6 @@ use frame_system::{ Pallet as System, }; use scale_info::TypeInfo; -use sp_core::{H160, H256, U256}; use sp_runtime::{ traits::{BadOrigin, Bounded, Convert, Dispatchable, Saturating, Zero}, AccountId32, DispatchError, @@ -87,7 +86,13 @@ pub use crate::{ exec::{MomentOf, Origin}, pallet::*, }; +pub use codec; +pub use frame_support::{self, dispatch::DispatchInfo, weights::Weight}; +pub use frame_system::{self, limits::BlockWeights}; +pub use pallet_transaction_payment; pub use primitives::*; +pub use sp_core::{H160, H256, U256}; +pub use sp_runtime; pub use weights::WeightInfo; #[cfg(doc)] @@ -513,6 +518,18 @@ pub mod pallet { AddressMapping, } + #[derive(Clone, Copy, Debug, PartialEq, Eq)] + pub enum NonceAlreadyIncremented { + /// Indicates that the nonce has not been incremented yet. + /// + /// This happens when the instantiation is triggered by a dry-run or another contract. + No, + /// Indicates that the nonce has already been incremented. + /// + /// This happens when the instantiation is triggered by a transaction. + Yes, + } + /// A mapping from a contract's code hash to its code. #[pallet::storage] pub(crate) type PristineCode = StorageMap<_, Identity, H256, CodeVec>; @@ -797,6 +814,7 @@ pub mod pallet { Code::Existing(code_hash), data, salt, + NonceAlreadyIncremented::Yes, ); if let Ok(retval) = &output.result { if retval.result.did_revert() { @@ -861,6 +879,7 @@ pub mod pallet { Code::Upload(code), data, salt, + NonceAlreadyIncremented::Yes, ); if let Ok(retval) = &output.result { if retval.result.did_revert() { @@ -1033,8 +1052,9 @@ where let try_call = || { let origin = Origin::from_runtime_origin(origin)?; let mut storage_meter = match storage_deposit_limit { - DepositLimit::Balance(limit) => StorageMeter::new(&origin, limit, value)?, - DepositLimit::Unchecked => StorageMeter::new_unchecked(BalanceOf::::max_value()), + DepositLimit::Balance(limit) => StorageMeter::new(limit), + DepositLimit::UnsafeOnlyForDryRun => + StorageMeter::new_unchecked(BalanceOf::::max_value()), }; let result = ExecStack::>::run_call( origin.clone(), @@ -1074,13 +1094,14 @@ where code: Code, data: Vec, salt: Option<[u8; 32]>, + nonce_already_incremented: NonceAlreadyIncremented, ) -> ContractResult> { let mut gas_meter = GasMeter::new(gas_limit); let mut storage_deposit = Default::default(); let unchecked_deposit_limit = storage_deposit_limit.is_unchecked(); let mut storage_deposit_limit = match storage_deposit_limit { DepositLimit::Balance(limit) => limit, - DepositLimit::Unchecked => BalanceOf::::max_value(), + DepositLimit::UnsafeOnlyForDryRun => BalanceOf::::max_value(), }; let try_instantiate = || { @@ -1104,7 +1125,7 @@ where let mut storage_meter = if unchecked_deposit_limit { StorageMeter::new_unchecked(storage_deposit_limit) } else { - StorageMeter::new(&instantiate_origin, storage_deposit_limit, value)? + StorageMeter::new(storage_deposit_limit) }; let result = ExecStack::>::run_instantiate( @@ -1116,6 +1137,7 @@ where data, salt.as_ref(), unchecked_deposit_limit, + nonce_already_incremented, ); storage_deposit = storage_meter .try_into_deposit(&instantiate_origin, unchecked_deposit_limit)? @@ -1161,7 +1183,7 @@ where let storage_deposit_limit = if tx.gas.is_some() { DepositLimit::Balance(BalanceOf::::max_value()) } else { - DepositLimit::Unchecked + DepositLimit::UnsafeOnlyForDryRun }; if tx.nonce.is_none() { @@ -1285,6 +1307,7 @@ where Code::Upload(code.to_vec()), data.to_vec(), None, + NonceAlreadyIncremented::No, ); let returned_data = match result.result { @@ -1618,3 +1641,214 @@ sp_api::decl_runtime_apis! { } } + +/// This macro wraps substrate's `impl_runtime_apis!` and implements `pallet_revive` runtime APIs. +/// +/// # Parameters +/// - `$Runtime`: The runtime type to implement the APIs for. +/// - `$Executive`: The Executive type of the runtime. +/// - `$EthExtra`: Type for additional Ethereum runtime extension. +/// - `$($rest:tt)*`: Remaining input to be forwarded to the underlying `impl_runtime_apis!`. +#[macro_export] +macro_rules! impl_runtime_apis_plus_revive { + ($Runtime: ty, $Executive: ty, $EthExtra: ty, $($rest:tt)*) => { + + impl_runtime_apis! { + $($rest)* + + impl pallet_revive::ReviveApi for $Runtime { + fn balance(address: $crate::H160) -> $crate::U256 { + $crate::Pallet::::evm_balance(&address) + } + + fn block_gas_limit() -> $crate::U256 { + $crate::Pallet::::evm_block_gas_limit() + } + + fn gas_price() -> $crate::U256 { + $crate::Pallet::::evm_gas_price() + } + + fn nonce(address: $crate::H160) -> Nonce { + use $crate::AddressMapper; + let account = ::AddressMapper::to_account_id(&address); + $crate::frame_system::Pallet::::account_nonce(account) + } + + fn eth_transact( + tx: $crate::evm::GenericTransaction, + ) -> Result<$crate::EthTransactInfo, $crate::EthTransactError> { + use $crate::{ + codec::Encode, evm::runtime::EthExtra, frame_support::traits::Get, + sp_runtime::traits::TransactionExtension, + sp_runtime::traits::Block as BlockT + }; + + let tx_fee = |pallet_call, mut dispatch_info: $crate::DispatchInfo| { + let call = + ::RuntimeCall::from(pallet_call); + dispatch_info.extension_weight = + <$EthExtra>::get_eth_extension(0, 0u32.into()).weight(&call); + + let uxt: ::Extrinsic = + $crate::sp_runtime::generic::UncheckedExtrinsic::new_bare(call).into(); + + $crate::pallet_transaction_payment::Pallet::::compute_fee( + uxt.encoded_size() as u32, + &dispatch_info, + 0u32.into(), + ) + }; + + let blockweights: $crate::BlockWeights = + ::BlockWeights::get(); + $crate::Pallet::::bare_eth_transact(tx, blockweights.max_block, tx_fee) + } + + fn call( + origin: AccountId, + dest: $crate::H160, + value: Balance, + gas_limit: Option<$crate::Weight>, + storage_deposit_limit: Option, + input_data: Vec, + ) -> $crate::ContractResult<$crate::ExecReturnValue, Balance> { + use $crate::frame_support::traits::Get; + let blockweights: $crate::BlockWeights = + ::BlockWeights::get(); + + let origin = + ::RuntimeOrigin::signed(origin); + $crate::Pallet::::bare_call( + origin, + dest, + value, + gas_limit.unwrap_or(blockweights.max_block), + $crate::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), + input_data, + ) + } + + fn instantiate( + origin: AccountId, + value: Balance, + gas_limit: Option<$crate::Weight>, + storage_deposit_limit: Option, + code: $crate::Code, + data: Vec, + salt: Option<[u8; 32]>, + ) -> $crate::ContractResult<$crate::InstantiateReturnValue, Balance> { + use $crate::frame_support::traits::Get; + let blockweights: $crate::BlockWeights = + ::BlockWeights::get(); + + let origin = + ::RuntimeOrigin::signed(origin); + $crate::Pallet::::bare_instantiate( + origin, + value, + gas_limit.unwrap_or(blockweights.max_block), + $crate::DepositLimit::Balance(storage_deposit_limit.unwrap_or(u128::MAX)), + code, + data, + salt, + $crate::NonceAlreadyIncremented::No, + ) + } + + fn upload_code( + origin: AccountId, + code: Vec, + storage_deposit_limit: Option, + ) -> $crate::CodeUploadResult { + let origin = + ::RuntimeOrigin::signed(origin); + $crate::Pallet::::bare_upload_code( + origin, + code, + storage_deposit_limit.unwrap_or(u128::MAX), + ) + } + + fn get_storage_var_key( + address: $crate::H160, + key: Vec, + ) -> $crate::GetStorageResult { + $crate::Pallet::::get_storage_var_key(address, key) + } + + fn get_storage(address: $crate::H160, key: [u8; 32]) -> $crate::GetStorageResult { + $crate::Pallet::::get_storage(address, key) + } + + fn trace_block( + block: Block, + tracer_type: $crate::evm::TracerType, + ) -> Vec<(u32, $crate::evm::Trace)> { + use $crate::{sp_runtime::traits::Block, tracing::trace}; + let mut tracer = $crate::Pallet::::evm_tracer(tracer_type); + let mut traces = vec![]; + let (header, extrinsics) = block.deconstruct(); + <$Executive>::initialize_block(&header); + for (index, ext) in extrinsics.into_iter().enumerate() { + let t = tracer.as_tracing(); + trace(t, || { + let _ = <$Executive>::apply_extrinsic(ext); + }); + + if let Some(tx_trace) = tracer.collect_trace() { + traces.push((index as u32, tx_trace)); + } + } + + traces + } + + fn trace_tx( + block: Block, + tx_index: u32, + tracer_type: $crate::evm::TracerType, + ) -> Option<$crate::evm::Trace> { + use $crate::{sp_runtime::traits::Block, tracing::trace}; + + let mut tracer = $crate::Pallet::::evm_tracer(tracer_type); + let (header, extrinsics) = block.deconstruct(); + + <$Executive>::initialize_block(&header); + for (index, ext) in extrinsics.into_iter().enumerate() { + if index as u32 == tx_index { + let t = tracer.as_tracing(); + trace(t, || { + let _ = <$Executive>::apply_extrinsic(ext); + }); + break; + } else { + let _ = <$Executive>::apply_extrinsic(ext); + } + } + + tracer.collect_trace() + } + + fn trace_call( + tx: $crate::evm::GenericTransaction, + tracer_type: $crate::evm::TracerType, + ) -> Result<$crate::evm::Trace, $crate::EthTransactError> { + use $crate::tracing::trace; + let mut tracer = $crate::Pallet::::evm_tracer(tracer_type); + let t = tracer.as_tracing(); + + let result = trace(t, || Self::eth_transact(tx)); + + if let Some(trace) = tracer.collect_trace() { + Ok(trace) + } else if let Err(err) = result { + Err(err) + } else { + Ok(tracer.empty_trace()) + } + } + } + } + }; +} diff --git a/substrate/frame/revive/src/primitives.rs b/substrate/frame/revive/src/primitives.rs index e2900bd027b69..c23febb46dd0d 100644 --- a/substrate/frame/revive/src/primitives.rs +++ b/substrate/frame/revive/src/primitives.rs @@ -31,7 +31,7 @@ use sp_runtime::{ #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] pub enum DepositLimit { /// Allows bypassing all balance transfer checks. - Unchecked, + UnsafeOnlyForDryRun, /// Specifies a maximum allowable balance for a deposit. Balance(Balance), @@ -40,7 +40,7 @@ pub enum DepositLimit { impl DepositLimit { pub fn is_unchecked(&self) -> bool { match self { - Self::Unchecked => true, + Self::UnsafeOnlyForDryRun => true, _ => false, } } diff --git a/substrate/frame/revive/src/storage.rs b/substrate/frame/revive/src/storage.rs index a761223aadfdd..2a25230c2e6ce 100644 --- a/substrate/frame/revive/src/storage.rs +++ b/substrate/frame/revive/src/storage.rs @@ -188,6 +188,7 @@ impl ContractInfo { if let Some(storage_meter) = storage_meter { let mut diff = meter::Diff::default(); + let key_len = key.len() as u32; match (old_len, new_value.as_ref().map(|v| v.len() as u32)) { (Some(old_len), Some(new_len)) => if new_len > old_len { @@ -196,11 +197,11 @@ impl ContractInfo { diff.bytes_removed = old_len - new_len; }, (None, Some(new_len)) => { - diff.bytes_added = new_len; + diff.bytes_added = new_len.saturating_add(key_len); diff.items_added = 1; }, (Some(old_len), None) => { - diff.bytes_removed = old_len; + diff.bytes_removed = old_len.saturating_add(key_len); diff.items_removed = 1; }, (None, None) => (), diff --git a/substrate/frame/revive/src/storage/meter.rs b/substrate/frame/revive/src/storage/meter.rs index ddd4a3bae87f0..edb463ee3a131 100644 --- a/substrate/frame/revive/src/storage/meter.rs +++ b/substrate/frame/revive/src/storage/meter.rs @@ -54,29 +54,12 @@ pub type GenericMeter = RawMeter; /// /// This mostly exists for testing so that the charging can be mocked. pub trait Ext { - /// This checks whether `origin` is able to afford the storage deposit limit. - /// - /// It is necessary to do this check beforehand so that the charge won't fail later on. - /// - /// `origin`: The origin of the call stack from which is responsible for putting down a deposit. - /// `limit`: The limit with which the meter was constructed. - /// `min_leftover`: How much `free_balance` in addition to the existential deposit (ed) should - /// be left inside the `origin` account. - /// - /// Returns the limit that should be used by the meter. If origin can't afford the `limit` - /// it returns `Err`. - fn check_limit( - origin: &T::AccountId, - limit: BalanceOf, - min_leftover: BalanceOf, - ) -> Result, DispatchError>; /// This is called to inform the implementer that some balance should be charged due to /// some interaction of the `origin` with a `contract`. /// /// The balance transfer can either flow from `origin` to `contract` or the other way /// around depending on whether `amount` constitutes a `Charge` or a `Refund`. - /// It should be used in combination with `check_limit` to check that no more balance than this - /// limit is ever charged. + /// It will fail in case the `origin` has not enough balance to cover all storage deposits. fn charge( origin: &T::AccountId, contract: &T::AccountId, @@ -342,22 +325,12 @@ where T: Config, E: Ext, { - /// Create new storage meter for the specified `origin` and `limit`. + /// Create new storage limiting storage deposits to the passed `limit`. /// - /// This tries to [`Ext::check_limit`] on `origin` and fails if this is not possible. - pub fn new( - origin: &Origin, - limit: BalanceOf, - min_leftover: BalanceOf, - ) -> Result { - // Check the limit only if the origin is not root. - return match origin { - Origin::Root => Ok(Self { limit, ..Default::default() }), - Origin::Signed(o) => { - let limit = E::check_limit(o, limit, min_leftover)?; - Ok(Self { limit, ..Default::default() }) - }, - } + /// If the limit larger then what the origin can afford we will just fail + /// when collecting the deposits in `try_into_deposit`. + pub fn new(limit: BalanceOf) -> Self { + Self { limit, ..Default::default() } } /// Create new storage meter without checking the limit. @@ -382,12 +355,18 @@ where Origin::Root => return Ok(Deposit::Charge(Zero::zero())), Origin::Signed(o) => o, }; - for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Refund(_))) { - E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; - } - for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Charge(_))) { - E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; - } + let try_charge = || { + for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Refund(_))) + { + E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; + } + for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Charge(_))) + { + E::charge(origin, &charge.contract, &charge.amount, &charge.state)?; + } + Ok(()) + }; + try_charge().map_err(|_: DispatchError| >::StorageDepositNotEnoughFunds)?; } Ok(self.total_deposit) @@ -462,17 +441,6 @@ impl> RawMeter { } impl Ext for ReservingExt { - fn check_limit( - origin: &T::AccountId, - limit: BalanceOf, - min_leftover: BalanceOf, - ) -> Result, DispatchError> { - let limit = T::Currency::reducible_balance(origin, Preservation::Preserve, Polite) - .saturating_sub(min_leftover) - .min(limit); - Ok(limit) - } - fn charge( origin: &T::AccountId, contract: &T::AccountId, @@ -482,8 +450,6 @@ impl Ext for ReservingExt { match amount { Deposit::Charge(amount) | Deposit::Refund(amount) if amount.is_zero() => return Ok(()), Deposit::Charge(amount) => { - // This could fail if the `origin` does not have enough liquidity. Ideally, though, - // this should have been checked before with `check_limit`. T::Currency::transfer_and_hold( &HoldReason::StorageDepositReserve.into(), origin, @@ -550,13 +516,6 @@ mod tests { static TestExtTestValue: TestExt = Default::default(); } - #[derive(Debug, PartialEq, Eq, Clone)] - struct LimitCheck { - origin: AccountIdOf, - limit: BalanceOf, - min_leftover: BalanceOf, - } - #[derive(Debug, PartialEq, Eq, Clone)] struct Charge { origin: AccountIdOf, @@ -567,30 +526,16 @@ mod tests { #[derive(Default, Debug, PartialEq, Eq, Clone)] pub struct TestExt { - limit_checks: Vec, charges: Vec, } impl TestExt { fn clear(&mut self) { - self.limit_checks.clear(); self.charges.clear(); } } impl Ext for TestExt { - fn check_limit( - origin: &AccountIdOf, - limit: BalanceOf, - min_leftover: BalanceOf, - ) -> Result, DispatchError> { - TestExtTestValue::mutate(|ext| { - ext.limit_checks - .push(LimitCheck { origin: origin.clone(), limit, min_leftover }) - }); - Ok(limit) - } - fn charge( origin: &AccountIdOf, contract: &AccountIdOf, @@ -645,15 +590,9 @@ mod tests { fn new_reserves_balance_works() { clear_ext(); - TestMeter::new(&Origin::from_account_id(ALICE), 1_000, 0).unwrap(); + TestMeter::new(1_000); - assert_eq!( - TestExtTestValue::get(), - TestExt { - limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], - ..Default::default() - } - ) + assert_eq!(TestExtTestValue::get(), TestExt { ..Default::default() }) } /// Previously, passing a limit of 0 meant unlimited storage for a nested call. @@ -663,7 +602,7 @@ mod tests { fn nested_zero_limit_requested() { clear_ext(); - let meter = TestMeter::new(&Origin::from_account_id(ALICE), 1_000, 0).unwrap(); + let meter = TestMeter::new(1_000); assert_eq!(meter.available(), 1_000); let nested0 = meter.nested(BalanceOf::::zero()); assert_eq!(nested0.available(), 0); @@ -673,7 +612,7 @@ mod tests { fn nested_some_limit_requested() { clear_ext(); - let meter = TestMeter::new(&Origin::from_account_id(ALICE), 1_000, 0).unwrap(); + let meter = TestMeter::new(1_000); assert_eq!(meter.available(), 1_000); let nested0 = meter.nested(500); assert_eq!(nested0.available(), 500); @@ -683,7 +622,7 @@ mod tests { fn nested_all_limit_requested() { clear_ext(); - let meter = TestMeter::new(&Origin::from_account_id(ALICE), 1_000, 0).unwrap(); + let meter = TestMeter::new(1_000); assert_eq!(meter.available(), 1_000); let nested0 = meter.nested(1_000); assert_eq!(nested0.available(), 1_000); @@ -693,7 +632,7 @@ mod tests { fn nested_over_limit_requested() { clear_ext(); - let meter = TestMeter::new(&Origin::from_account_id(ALICE), 1_000, 0).unwrap(); + let meter = TestMeter::new(1_000); assert_eq!(meter.available(), 1_000); let nested0 = meter.nested(2_000); assert_eq!(nested0.available(), 1_000); @@ -703,7 +642,7 @@ mod tests { fn empty_charge_works() { clear_ext(); - let mut meter = TestMeter::new(&Origin::from_account_id(ALICE), 1_000, 0).unwrap(); + let mut meter = TestMeter::new(1_000); assert_eq!(meter.available(), 1_000); // an empty charge does not create a `Charge` entry @@ -711,13 +650,7 @@ mod tests { nested0.charge(&Default::default()); meter.absorb(nested0, &BOB, None); - assert_eq!( - TestExtTestValue::get(), - TestExt { - limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], - ..Default::default() - } - ) + assert_eq!(TestExtTestValue::get(), TestExt { ..Default::default() }) } #[test] @@ -727,7 +660,6 @@ mod tests { origin: Origin::::from_account_id(ALICE), deposit: Deposit::Refund(28), expected: TestExt { - limit_checks: vec![LimitCheck { origin: ALICE, limit: 100, min_leftover: 0 }], charges: vec![ Charge { origin: ALICE, @@ -753,14 +685,14 @@ mod tests { ChargingTestCase { origin: Origin::::Root, deposit: Deposit::Charge(0), - expected: TestExt { limit_checks: vec![], charges: vec![] }, + expected: TestExt { charges: vec![] }, }, ]; for test_case in test_cases { clear_ext(); - let mut meter = TestMeter::new(&test_case.origin, 100, 0).unwrap(); + let mut meter = TestMeter::new(100); assert_eq!(meter.available(), 100); let mut nested0_info = new_info(StorageInfo { @@ -824,7 +756,6 @@ mod tests { origin: Origin::::from_account_id(ALICE), deposit: Deposit::Refund(108), expected: TestExt { - limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], charges: vec![ Charge { origin: ALICE, @@ -844,14 +775,14 @@ mod tests { ChargingTestCase { origin: Origin::::Root, deposit: Deposit::Charge(0), - expected: TestExt { limit_checks: vec![], charges: vec![] }, + expected: TestExt { charges: vec![] }, }, ]; for test_case in test_cases { clear_ext(); - let mut meter = TestMeter::new(&test_case.origin, 1_000, 0).unwrap(); + let mut meter = TestMeter::new(1_000); assert_eq!(meter.available(), 1_000); let mut nested0 = meter.nested(BalanceOf::::max_value()); diff --git a/substrate/frame/revive/src/test_utils/builder.rs b/substrate/frame/revive/src/test_utils/builder.rs index 799c633deef0b..120fde86e9bb7 100644 --- a/substrate/frame/revive/src/test_utils/builder.rs +++ b/substrate/frame/revive/src/test_utils/builder.rs @@ -18,7 +18,7 @@ use super::{deposit_limit, GAS_LIMIT}; use crate::{ address::AddressMapper, AccountIdOf, BalanceOf, Code, Config, ContractResult, DepositLimit, - ExecReturnValue, InstantiateReturnValue, OriginFor, Pallet, Weight, + ExecReturnValue, InstantiateReturnValue, NonceAlreadyIncremented, OriginFor, Pallet, Weight, }; use alloc::{vec, vec::Vec}; use frame_support::pallet_prelude::DispatchResultWithPostInfo; @@ -138,6 +138,7 @@ builder!( code: Code, data: Vec, salt: Option<[u8; 32]>, + nonce_already_incremented: NonceAlreadyIncremented, ) -> ContractResult>; /// Build the instantiate call and unwrap the result. @@ -165,6 +166,7 @@ builder!( code, data: vec![], salt: Some([0; 32]), + nonce_already_incremented: NonceAlreadyIncremented::Yes, } } ); diff --git a/substrate/frame/revive/src/tests.rs b/substrate/frame/revive/src/tests.rs index 69f5ed60d51dc..fd2d4a376b357 100644 --- a/substrate/frame/revive/src/tests.rs +++ b/substrate/frame/revive/src/tests.rs @@ -1024,16 +1024,17 @@ fn delegate_call_with_deposit_limit() { .build_and_unwrap_contract(); // Delegate call will write 1 storage and deposit of 2 (1 item) + 32 (bytes) is required. + // + 32 + 16 for blake2_128concat // Fails, not enough deposit let ret = builder::bare_call(caller_addr) .value(1337) - .data((callee_addr, 33u64).encode()) + .data((callee_addr, 81u64).encode()) .build_and_unwrap_result(); assert_return_code!(ret, RuntimeReturnCode::OutOfResources); assert_ok!(builder::call(caller_addr) .value(1337) - .data((callee_addr, 34u64).encode()) + .data((callee_addr, 82u64).encode()) .build()); }); } @@ -1123,14 +1124,14 @@ fn cannot_self_destruct_through_storage_refund_after_price_change() { info_deposit + min_balance ); - // Create 100 bytes of storage with a price of per byte and a single storage item of - // price 2 + // Create 100 (16 + 32 bytes for key for blake128 concat) bytes of storage with a + // price of per byte and a single storage item of price 2 assert_ok!(builder::call(contract.addr).data(100u32.to_le_bytes().to_vec()).build()); - assert_eq!(get_contract(&contract.addr).total_deposit(), info_deposit + 102); + assert_eq!(get_contract(&contract.addr).total_deposit(), info_deposit + 100 + 16 + 32 + 2); // Increase the byte price and trigger a refund. This should not have any influence // because the removal is pro rata and exactly those 100 bytes should have been - // removed. + // removed as we didn't delete the key. DEPOSIT_PER_BYTE.with(|c| *c.borrow_mut() = 500); assert_ok!(builder::call(contract.addr).data(0u32.to_le_bytes().to_vec()).build()); @@ -1139,7 +1140,9 @@ fn cannot_self_destruct_through_storage_refund_after_price_change() { ::Currency::total_balance(&contract.account_id), get_contract(&contract.addr).total_deposit() + min_balance, ); - assert_eq!(get_contract(&contract.addr).extra_deposit(), 2); + // + 1 because due to fixed point arithmetic we can sometimes refund + // one unit to little + assert_eq!(get_contract(&contract.addr).extra_deposit(), 16 + 32 + 2 + 1); }); } @@ -1357,6 +1360,16 @@ fn call_return_code() { .value(min_balance * 100) .build_and_unwrap_contract(); + // BOB cannot pay the ed which is needed to pull DJANGO into existence + // this does trap the caller instead of returning an error code + // reasoning is that this error state does not exist on eth where + // ed does not exist. We hide this fact from the contract. + let result = builder::bare_call(bob.addr) + .data((DJANGO_ADDR, u256_bytes(1)).encode()) + .origin(RuntimeOrigin::signed(BOB)) + .build(); + assert_err!(result.result, >::StorageDepositNotEnoughFunds); + // Contract calls into Django which is no valid contract // This will be a balance transfer into a new account // with more than the contract has which will make the transfer fail @@ -1373,18 +1386,20 @@ fn call_return_code() { // Sending below the minimum balance should result in success. // The ED is charged from the call origin. + let alice_before = test_utils::get_balance(&ALICE_FALLBACK); assert_eq!(test_utils::get_balance(&DJANGO_FALLBACK), 0); let result = builder::bare_call(bob.addr) .data( AsRef::<[u8]>::as_ref(&DJANGO_ADDR) .iter() - .chain(&u256_bytes(55)) + .chain(&u256_bytes(1)) .cloned() .collect(), ) .build_and_unwrap_result(); assert_return_code!(result, RuntimeReturnCode::Success); - assert_eq!(test_utils::get_balance(&DJANGO_FALLBACK), 55 + min_balance); + assert_eq!(test_utils::get_balance(&DJANGO_FALLBACK), min_balance + 1); + assert_eq!(test_utils::get_balance(&ALICE_FALLBACK), alice_before - min_balance); let django = builder::bare_instantiate(Code::Upload(callee_code)) .origin(RuntimeOrigin::signed(CHARLIE)) @@ -1449,6 +1464,14 @@ fn instantiate_return_code() { .value(min_balance * 100) .build_and_unwrap_contract(); + // bob cannot pay the ED to create the contract as he has no money + // this traps the caller rather than returning an error + let result = builder::bare_call(contract.addr) + .data(callee_hash.iter().chain(&0u32.to_le_bytes()).cloned().collect()) + .origin(RuntimeOrigin::signed(BOB)) + .build(); + assert_err!(result.result, >::StorageDepositNotEnoughFunds); + // Contract has only the minimal balance so any transfer will fail. ::Currency::set_balance(&contract.account_id, min_balance); let result = builder::bare_call(contract.addr) @@ -2118,7 +2141,7 @@ fn failed_deposit_charge_should_roll_back_call() { let transfer_call = Box::new(RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death { dest: CHARLIE, - value: pallet_balances::Pallet::::free_balance(&ALICE) - 2 * ED, + value: pallet_balances::Pallet::::free_balance(&ALICE) - (2 * ED + 48), })); // Wrap the transfer call in a proxy call. @@ -2129,7 +2152,7 @@ fn failed_deposit_charge_should_roll_back_call() { }); let data = ( - (ED - DepositPerItem::get()) as u32, // storage length + 1u32, // storage length addr_callee, transfer_proxy_call, ); @@ -2141,9 +2164,13 @@ fn failed_deposit_charge_should_roll_back_call() { // With a low enough deposit per byte, the call should succeed. let result = execute().unwrap(); - // Bump the deposit per byte to a high value to trigger a FundsUnavailable error. - DEPOSIT_PER_BYTE.with(|c| *c.borrow_mut() = 20); - assert_err_with_weight!(execute(), TokenError::FundsUnavailable, result.actual_weight); + // Bump the deposit per byte to a high value to trigger an error + DEPOSIT_PER_BYTE.with(|c| *c.borrow_mut() = 200); + assert_err_with_weight!( + execute(), + >::StorageDepositNotEnoughFunds, + result.actual_weight + ); } #[test] @@ -2422,7 +2449,8 @@ fn storage_deposit_works() { // Create storage assert_ok!(builder::call(addr).value(42).data((50u32, 20u32).encode()).build()); // 4 is for creating 2 storage items - let charged0 = 4 + 50 + 20; + // 48 is for each of the keys + let charged0 = 4 + 50 + 20 + 48 + 48; deposit += charged0; assert_eq!(get_contract(&addr).total_deposit(), deposit); @@ -2471,7 +2499,7 @@ fn storage_deposit_callee_works() { assert_ok!(builder::call(addr_caller).data((100u32, &addr_callee).encode()).build()); let callee = get_contract(&addr_callee); - let deposit = DepositPerByte::get() * 100 + DepositPerItem::get() * 1; + let deposit = DepositPerByte::get() * 100 + DepositPerItem::get() * 1 + 48; assert_eq!(test_utils::get_balance(&account_id), min_balance); assert_eq!( @@ -2709,20 +2737,18 @@ fn storage_deposit_limit_is_enforced() { // Create 1 byte of storage with a price of per byte, // setting insufficient deposit limit, as it requires 3 Balance: - // 2 for the item added + 1 for the new storage item. + // 2 for the item added + 1 (value) + 48 (key) assert_err_ignore_postinfo!( builder::call(addr) - .storage_deposit_limit(2) + .storage_deposit_limit(50) .data(1u32.to_le_bytes().to_vec()) .build(), >::StorageDepositLimitExhausted, ); - // Create 1 byte of storage, should cost 3 Balance: - // 2 for the item added + 1 for the new storage item. - // Should pass as it fallbacks to DefaultDepositLimit. + // now with enough limit assert_ok!(builder::call(addr) - .storage_deposit_limit(3) + .storage_deposit_limit(51) .data(1u32.to_le_bytes().to_vec()) .build()); @@ -2753,19 +2779,20 @@ fn deposit_limit_in_nested_calls() { // Create 100 bytes of storage with a price of per byte // This is 100 Balance + 2 Balance for the item + // 48 for the key assert_ok!(builder::call(addr_callee) - .storage_deposit_limit(102) + .storage_deposit_limit(102 + 48) .data(100u32.to_le_bytes().to_vec()) .build()); // We do not remove any storage but add a storage item of 12 bytes in the caller - // contract. This would cost 12 + 2 = 14 Balance. + // contract. This would cost 12 + 2 + 72 = 86 Balance. // The nested call doesn't get a special limit, which is set by passing `u64::MAX` to it. // This should fail as the specified parent's limit is less than the cost: 13 < // 14. assert_err_ignore_postinfo!( builder::call(addr_caller) - .storage_deposit_limit(13) + .storage_deposit_limit(85) .data((100u32, &addr_callee, U256::MAX).encode()) .build(), >::StorageDepositLimitExhausted, @@ -2773,30 +2800,29 @@ fn deposit_limit_in_nested_calls() { // Now we specify the parent's limit high enough to cover the caller's storage // additions. However, we use a single byte more in the callee, hence the storage - // deposit should be 15 Balance. + // deposit should be 87 Balance. // The nested call doesn't get a special limit, which is set by passing `u64::MAX` to it. - // This should fail as the specified parent's limit is less than the cost: 14 - // < 15. + // This should fail as the specified parent's limit is less than the cost: 86 < 87 assert_err_ignore_postinfo!( builder::call(addr_caller) - .storage_deposit_limit(14) + .storage_deposit_limit(86) .data((101u32, &addr_callee, &U256::MAX).encode()) .build(), >::StorageDepositLimitExhausted, ); - // Now we specify the parent's limit high enough to cover both the caller's and callee's - // storage additions. However, we set a special deposit limit of 1 Balance for the + // The parents storage deposit limit doesn't matter as the sub calls limit + // is enforced eagerly. However, we set a special deposit limit of 1 Balance for the // nested call. This should fail as callee adds up 2 bytes to the storage, meaning // that the nested call should have a deposit limit of at least 2 Balance. The // sub-call should be rolled back, which is covered by the next test case. let ret = builder::bare_call(addr_caller) - .storage_deposit_limit(DepositLimit::Balance(16)) + .storage_deposit_limit(DepositLimit::Balance(u64::MAX)) .data((102u32, &addr_callee, U256::from(1u64)).encode()) .build_and_unwrap_result(); assert_return_code!(ret, RuntimeReturnCode::OutOfResources); - // Refund in the callee contract but not enough to cover the 14 Balance required by the + // Refund in the callee contract but not enough to cover the Balance required by the // caller. Note that if previous sub-call wouldn't roll back, this call would pass // making the test case fail. We don't set a special limit for the nested call here. assert_err_ignore_postinfo!( @@ -2816,12 +2842,12 @@ fn deposit_limit_in_nested_calls() { .build_and_unwrap_result(); assert_return_code!(ret, RuntimeReturnCode::OutOfResources); - // Same as above but allow for the additional deposit of 1 Balance in parent. + // Free up enough storage in the callee so that the caller can create a new item // We set the special deposit limit of 1 Balance for the nested call, which isn't // enforced as callee frees up storage. This should pass. assert_ok!(builder::call(addr_caller) .storage_deposit_limit(1) - .data((87u32, &addr_callee, U256::from(1u64)).encode()) + .data((0u32, &addr_callee, U256::from(1u64)).encode()) .build()); }); } @@ -2851,15 +2877,16 @@ fn deposit_limit_in_nested_instantiate() { // - the deposit for depending on a code hash // - ED for deployed contract account // - 2 for the storage item of 0 bytes being created in the callee constructor + // - 48 for the key let callee_min_deposit = { let callee_info_len = ContractInfoOf::::get(&addr).unwrap().encoded_size() as u64; let code_deposit = test_utils::lockup_deposit(&code_hash_callee); - callee_info_len + code_deposit + 2 + ED + 2 + callee_info_len + code_deposit + 2 + ED + 2 + 48 }; // The parent just stores an item of the passed size so at least // we need to pay for the item itself. - let caller_min_deposit = callee_min_deposit + 2; + let caller_min_deposit = callee_min_deposit + 2 + 48; // Fail in callee. // @@ -2973,7 +3000,7 @@ fn deposit_limit_honors_liquidity_restrictions() { .storage_deposit_limit(10_000) .data(100u32.to_le_bytes().to_vec()) .build(), - >::StorageDepositLimitExhausted, + >::StorageDepositNotEnoughFunds, ); assert_eq!(::Currency::free_balance(&BOB), min_balance); }); @@ -3007,51 +3034,12 @@ fn deposit_limit_honors_existential_deposit() { .storage_deposit_limit(10_000) .data(100u32.to_le_bytes().to_vec()) .build(), - >::StorageDepositLimitExhausted, + >::StorageDepositNotEnoughFunds, ); assert_eq!(::Currency::free_balance(&BOB), 300); }); } -#[test] -fn deposit_limit_honors_min_leftover() { - let (wasm, _code_hash) = compile_module("store_call").unwrap(); - ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); - let _ = ::Currency::set_balance(&BOB, 1_000); - let min_balance = Contracts::min_balance(); - - // Instantiate the BOB contract. - let Contract { addr, account_id } = - builder::bare_instantiate(Code::Upload(wasm)).build_and_unwrap_contract(); - - let info_deposit = test_utils::contract_base_deposit(&addr); - - // Check that the contract has been instantiated and has the minimum balance and the - // storage deposit - assert_eq!(get_contract(&addr).total_deposit(), info_deposit); - assert_eq!( - ::Currency::total_balance(&account_id), - info_deposit + min_balance - ); - - // check that the minimum leftover (value send) is considered - // given the minimum deposit of 200 sending 750 will only leave - // 50 for the storage deposit. Which is not enough to store the 50 bytes - // as we also need 2 bytes for the item - assert_err_ignore_postinfo!( - builder::call(addr) - .origin(RuntimeOrigin::signed(BOB)) - .value(750) - .storage_deposit_limit(10_000) - .data(50u32.to_le_bytes().to_vec()) - .build(), - >::StorageDepositLimitExhausted, - ); - assert_eq!(::Currency::free_balance(&BOB), 1_000); - }); -} - #[test] fn native_dependency_deposit_works() { let (wasm, code_hash) = compile_module("set_code_hash").unwrap(); @@ -4011,13 +3999,13 @@ fn recovery_works() { #[test] fn skip_transfer_works() { let (code_caller, _) = compile_module("call").unwrap(); - let (code, _) = compile_module("set_empty_storage").unwrap(); + let (code, _) = compile_module("store_call").unwrap(); ExtBuilder::default().existential_deposit(100).build().execute_with(|| { ::Currency::set_balance(&ALICE, 1_000_000); ::Currency::set_balance(&BOB, 0); - // fails to instantiate when gas is specified. + // when gas is some (transfers enabled): bob has no money: fail assert_err!( Pallet::::bare_eth_transact( GenericTransaction { @@ -4034,10 +4022,10 @@ fn skip_transfer_works() { )) ); - // works when no gas is specified. + // no gas specified (all transfers are skipped): even without money bob can deploy assert_ok!(Pallet::::bare_eth_transact( GenericTransaction { - from: Some(ALICE_ADDR), + from: Some(BOB_ADDR), input: code.clone().into(), ..Default::default() }, @@ -4051,12 +4039,13 @@ fn skip_transfer_works() { let Contract { addr: caller_addr, .. } = builder::bare_instantiate(Code::Upload(code_caller)).build_and_unwrap_contract(); - // fails to call when gas is specified. + // call directly: fails with enabled transfers assert_err!( Pallet::::bare_eth_transact( GenericTransaction { from: Some(BOB_ADDR), to: Some(addr), + input: 0u32.encode().into(), gas: Some(1u32.into()), ..Default::default() }, @@ -4068,12 +4057,15 @@ fn skip_transfer_works() { )) ); - // fails when calling from a contract when gas is specified. + // fails to call through other contract + // we didn't roll back the storage changes done by the previous + // call. So the item already exists. We simply increase the size of + // the storage item to incur some deposits (which bob can't pay). assert!(Pallet::::bare_eth_transact( GenericTransaction { from: Some(BOB_ADDR), to: Some(caller_addr), - input: (0u32, &addr).encode().into(), + input: (1u32, &addr).encode().into(), gas: Some(1u32.into()), ..Default::default() }, @@ -4082,24 +4074,57 @@ fn skip_transfer_works() { ) .is_err(),); - // works when no gas is specified. + // works when no gas is specified (skip transfer) assert_ok!(Pallet::::bare_eth_transact( - GenericTransaction { from: Some(BOB_ADDR), to: Some(addr), ..Default::default() }, + GenericTransaction { + from: Some(BOB_ADDR), + to: Some(addr), + input: 2u32.encode().into(), + ..Default::default() + }, + Weight::MAX, + |_, _| 0u64, + )); + + // call through contract works when transfers are skipped + assert_ok!(Pallet::::bare_eth_transact( + GenericTransaction { + from: Some(BOB_ADDR), + to: Some(caller_addr), + input: (3u32, &addr).encode().into(), + ..Default::default() + }, Weight::MAX, |_, _| 0u64, )); - // works when calling from a contract when no gas is specified. + // works with transfers enabled if we don't incur a storage cost + // we shrink the item so its actually a refund assert_ok!(Pallet::::bare_eth_transact( GenericTransaction { from: Some(BOB_ADDR), to: Some(caller_addr), - input: (0u32, &addr).encode().into(), + input: (2u32, &addr).encode().into(), + gas: Some(1u32.into()), ..Default::default() }, Weight::MAX, |_, _| 0u64, )); + + // fails when trying to increase the storage item size + assert!(Pallet::::bare_eth_transact( + GenericTransaction { + from: Some(BOB_ADDR), + to: Some(caller_addr), + input: (3u32, &addr).encode().into(), + gas: Some(1u32.into()), + ..Default::default() + }, + Weight::MAX, + |_, _| 0u64, + ) + .is_err()); }); } @@ -4565,3 +4590,46 @@ fn precompiles_with_info_creates_contract() { }); } } + +#[test] +fn nonce_incremented_dry_run_vs_execute() { + let (wasm, _code_hash) = compile_module("dummy").unwrap(); + + ExtBuilder::default().build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + // Set a known nonce + let initial_nonce = 5; + frame_system::Account::::mutate(&ALICE, |account| { + account.nonce = initial_nonce; + }); + + // stimulate a dry run + let dry_run_result = builder::bare_instantiate(Code::Upload(wasm.clone())) + .nonce_already_incremented(crate::NonceAlreadyIncremented::No) + .salt(None) + .build(); + + let dry_run_addr = dry_run_result.result.unwrap().addr; + + let deployer = ::AddressMapper::to_address(&ALICE); + let expected_addr = create1(&deployer, initial_nonce.into()); + + assert_eq!(dry_run_addr, expected_addr); + + // reset nonce to initial value + frame_system::Account::::mutate(&ALICE, |account| { + account.nonce = initial_nonce; + }); + + // stimulate an actual execution + let exec_result = builder::bare_instantiate(Code::Upload(wasm.clone())).salt(None).build(); + + let exec_addr = exec_result.result.unwrap().addr; + + let deployer = ::AddressMapper::to_address(&ALICE); + let expected_addr = create1(&deployer, (initial_nonce - 1).into()); + + assert_eq!(exec_addr, expected_addr); + }); +} diff --git a/substrate/frame/sassafras/src/lib.rs b/substrate/frame/sassafras/src/lib.rs index 35539fd35f22c..94a8125112ea3 100644 --- a/substrate/frame/sassafras/src/lib.rs +++ b/substrate/frame/sassafras/src/lib.rs @@ -61,7 +61,7 @@ use frame_support::{ BoundedVec, WeakBoundedVec, }; use frame_system::{ - offchain::{CreateInherent, SubmitTransaction}, + offchain::{CreateBare, SubmitTransaction}, pallet_prelude::BlockNumberFor, }; use sp_consensus_sassafras::{ @@ -128,7 +128,7 @@ pub mod pallet { /// Configuration parameters. #[pallet::config] - pub trait Config: frame_system::Config + CreateInherent> { + pub trait Config: frame_system::Config + CreateBare> { /// Amount of slots that each epoch should last. #[pallet::constant] type EpochLength: Get; @@ -1001,7 +1001,7 @@ impl Pallet { pub fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool { let tickets = BoundedVec::truncate_from(tickets); let call = Call::submit_tickets { tickets }; - let xt = T::create_inherent(call.into()); + let xt = T::create_bare(call.into()); match SubmitTransaction::>::submit_transaction(xt) { Ok(_) => true, Err(e) => { diff --git a/substrate/frame/sassafras/src/mock.rs b/substrate/frame/sassafras/src/mock.rs index aa190a8ce5039..db20f3a5bd243 100644 --- a/substrate/frame/sassafras/src/mock.rs +++ b/substrate/frame/sassafras/src/mock.rs @@ -56,11 +56,11 @@ where type Extrinsic = frame_system::mocking::MockUncheckedExtrinsic; } -impl frame_system::offchain::CreateInherent for Test +impl frame_system::offchain::CreateBare for Test where RuntimeCall: From, { - fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + fn create_bare(call: Self::RuntimeCall) -> Self::Extrinsic { frame_system::mocking::MockUncheckedExtrinsic::::new_bare(call) } } diff --git a/substrate/frame/scheduler/src/lib.rs b/substrate/frame/scheduler/src/lib.rs index 50c3ba642b85e..80c1ae4c489fa 100644 --- a/substrate/frame/scheduler/src/lib.rs +++ b/substrate/frame/scheduler/src/lib.rs @@ -415,6 +415,27 @@ pub mod pallet { Self::service_agendas(&mut weight_counter, now, u32::MAX); weight_counter.consumed() } + + #[cfg(feature = "std")] + fn integrity_test() { + /// Calculate the maximum weight that a lookup of a given size can take. + fn lookup_weight(s: usize) -> Weight { + T::WeightInfo::service_agendas_base() + + T::WeightInfo::service_agenda_base(T::MaxScheduledPerBlock::get()) + + T::WeightInfo::service_task(Some(s), true, true) + } + + let limit = sp_runtime::Perbill::from_percent(90) * T::MaximumWeight::get(); + + let small_lookup = lookup_weight::(128); + assert!(small_lookup.all_lte(limit), "Must be possible to submit a small lookup"); + + let medium_lookup = lookup_weight::(1024); + assert!(medium_lookup.all_lte(limit), "Must be possible to submit a medium lookup"); + + let large_lookup = lookup_weight::(1024 * 1024); + assert!(large_lookup.all_lte(limit), "Must be possible to submit a large lookup"); + } } #[pallet::call] diff --git a/substrate/frame/staking-async/ahm-test/src/ah/mock.rs b/substrate/frame/staking-async/ahm-test/src/ah/mock.rs index ab3c5d0392887..9f0054f67a1c3 100644 --- a/substrate/frame/staking-async/ahm-test/src/ah/mock.rs +++ b/substrate/frame/staking-async/ahm-test/src/ah/mock.rs @@ -195,11 +195,11 @@ where type Extrinsic = Extrinsic; } -impl frame_system::offchain::CreateInherent for Runtime +impl frame_system::offchain::CreateBare for Runtime where RuntimeCall: From, { - fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + fn create_bare(call: Self::RuntimeCall) -> Self::Extrinsic { Extrinsic::new_bare(call) } } diff --git a/substrate/frame/staking-async/runtimes/parachain/src/staking.rs b/substrate/frame/staking-async/runtimes/parachain/src/staking.rs index d1f2a71015e15..d57a6ee31b282 100644 --- a/substrate/frame/staking-async/runtimes/parachain/src/staking.rs +++ b/substrate/frame/staking-async/runtimes/parachain/src/staking.rs @@ -470,7 +470,7 @@ impl frame_system::offchain::CreateInherent for Runtime where RuntimeCall: From, { - fn create_inherent(call: RuntimeCall) -> UncheckedExtrinsic { + fn create_bare(call: RuntimeCall) -> UncheckedExtrinsic { UncheckedExtrinsic::new_bare(call) } } diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/cumulus_pallet_xcmp_queue.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/cumulus_pallet_xcmp_queue.rs index 85980091ea428..60543220d68c1 100644 --- a/substrate/frame/staking-async/runtimes/parachain/src/weights/cumulus_pallet_xcmp_queue.rs +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/cumulus_pallet_xcmp_queue.rs @@ -106,6 +106,26 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 105457]`. + fn enqueue_empty_xcmp_message_at(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `334 + n * (1 ±0)` + // Estimated: `108986` + // Minimum execution time: 12_000_000 picoseconds. + Weight::from_parts(11_015_940, 108986) + // Standard Error: 32 + .saturating_add(Weight::from_parts(911, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) @@ -137,12 +157,12 @@ impl cumulus_pallet_xcmp_queue::WeightInfo for WeightIn /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn enqueue_1000_small_xcmp_messages() -> Weight { // Proof Size summary in bytes: - // Measured: `151` - // Estimated: `5487` - // Minimum execution time: 118_730_000 picoseconds. - Weight::from_parts(123_480_000, 5487) + // Measured: `53067` + // Estimated: `108986` + // Minimum execution time: 139_000_000 picoseconds. + Weight::from_parts(148_000_000, 108986) .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) diff --git a/substrate/frame/staking-async/runtimes/rc/src/lib.rs b/substrate/frame/staking-async/runtimes/rc/src/lib.rs index d7e8002b3b253..5bf9cf1e0f954 100644 --- a/substrate/frame/staking-async/runtimes/rc/src/lib.rs +++ b/substrate/frame/staking-async/runtimes/rc/src/lib.rs @@ -1049,7 +1049,7 @@ impl frame_system::offchain::CreateInherent for Runtime where RuntimeCall: From, { - fn create_inherent(call: RuntimeCall) -> UncheckedExtrinsic { + fn create_bare(call: RuntimeCall) -> UncheckedExtrinsic { UncheckedExtrinsic::new_bare(call) } } diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 430a7833e5984..47f4262da0d60 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -729,7 +729,7 @@ impl Pallet { if new_planned_era > 0 { log!( - info, + debug, "new validator set of size {:?} has been processed for era {:?}", elected_stashes.len(), new_planned_era, @@ -987,7 +987,7 @@ impl Pallet { MinimumActiveStake::::put(min_active_stake); log!( - info, + debug, "generated {} npos voters, {} from validators and {} nominators", all_voters.len(), validators_taken, @@ -1037,7 +1037,7 @@ impl Pallet { } Self::register_weight(T::WeightInfo::get_npos_targets(all_targets.len() as u32)); - log!(info, "generated {} npos targets", all_targets.len()); + log!(debug, "generated {} npos targets", all_targets.len()); all_targets } diff --git a/substrate/frame/support/procedural/src/benchmark.rs b/substrate/frame/support/procedural/src/benchmark.rs index c2f615ef54d93..52f282e1d43fe 100644 --- a/substrate/frame/support/procedural/src/benchmark.rs +++ b/substrate/frame/support/procedural/src/benchmark.rs @@ -736,6 +736,7 @@ pub fn benchmarks( verify: bool, internal_repeats: u32, ) -> Result<#krate::__private::Vec<#krate::BenchmarkResult>, #krate::BenchmarkError> { + #krate::benchmarking::wipe_db(); let extrinsic = #krate::__private::str::from_utf8(extrinsic).map_err(|_| "`extrinsic` is not a valid utf-8 string!")?; let selected_benchmark = match extrinsic { #(#selected_benchmark_mappings), diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs index e25492802c329..bc27cd6ec89b7 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs @@ -27,23 +27,28 @@ pub fn expand_outer_inherent( pallet_decls: &[Pallet], scrate: &TokenStream, ) -> TokenStream { + let mut pallet_positions = Vec::new(); let mut pallet_names = Vec::new(); let mut pallet_attrs = Vec::new(); let mut query_inherent_part_macros = Vec::new(); - for pallet_decl in pallet_decls { - if pallet_decl.exists_part("Inherent") { - let name = &pallet_decl.name; - let path = &pallet_decl.path; - let attr = pallet_decl.get_attributes(); - - pallet_names.push(name); - pallet_attrs.push(attr); - query_inherent_part_macros.push(quote! { - #path::__substrate_inherent_check::is_inherent_part_defined!(#name); - }); - } + for (pallet_pos, pallet_decl) in pallet_decls + .iter() + .filter(|pallet_decl| pallet_decl.exists_part("Inherent")) + .enumerate() + { + let name = &pallet_decl.name; + let path = &pallet_decl.path; + let attr = pallet_decl.get_attributes(); + + pallet_positions.push(pallet_pos); + pallet_names.push(name); + pallet_attrs.push(attr); + query_inherent_part_macros.push(quote! { + #path::__substrate_inherent_check::is_inherent_part_defined!(#name); + }); } + let pallet_count = pallet_positions.len(); quote! { #( #query_inherent_part_macros )* @@ -78,8 +83,8 @@ pub fn expand_outer_inherent( fn check_extrinsics(&self, block: &#block) -> #scrate::inherent::CheckInherentsResult { use #scrate::inherent::{ProvideInherent, IsFatalError}; - use #scrate::traits::{IsSubType, ExtrinsicCall}; - use #scrate::sp_runtime::traits::Block as _; + use #scrate::traits::IsSubType; + use #scrate::sp_runtime::traits::{Block as _, ExtrinsicCall}; use #scrate::__private::{sp_inherents::Error, log}; let mut result = #scrate::inherent::CheckInherentsResult::new(); @@ -110,6 +115,7 @@ pub fn expand_outer_inherent( } } + let mut pallet_has_inherent = [false; #pallet_count]; for xt in block.extrinsics() { // Inherents are before any other extrinsics. // And signed extrinsics are not inherents. @@ -118,14 +124,14 @@ pub fn expand_outer_inherent( } let mut is_inherent = false; - + let call = ExtrinsicCall::call(xt); #( #pallet_attrs { - let call = <#unchecked_extrinsic as ExtrinsicCall>::call(xt); if let Some(call) = IsSubType::<_>::is_sub_type(call) { if #pallet_names::is_inherent(call) { is_inherent = true; + pallet_has_inherent[#pallet_positions] = true; if let Err(e) = #pallet_names::check_inherent(call, self) { handle_put_error_result(result.put_error( #pallet_names::INHERENT_IDENTIFIER, &e @@ -140,7 +146,7 @@ pub fn expand_outer_inherent( )* // Inherents are before any other extrinsics. - // No module marked it as inherent thus it is not. + // No module marked it as inherent, thus it is not. if !is_inherent { break } @@ -150,25 +156,7 @@ pub fn expand_outer_inherent( #pallet_attrs match #pallet_names::is_inherent_required(self) { Ok(Some(e)) => { - let found = block.extrinsics().iter().any(|xt| { - let is_bare = #scrate::sp_runtime::traits::ExtrinsicLike::is_bare(xt); - - if is_bare { - let call = < - #unchecked_extrinsic as ExtrinsicCall - >::call(xt); - if let Some(call) = IsSubType::<_>::is_sub_type(call) { - #pallet_names::is_inherent(&call) - } else { - false - } - } else { - // Signed extrinsics are not inherents. - false - } - }); - - if !found { + if !pallet_has_inherent[#pallet_positions] { handle_put_error_result(result.put_error( #pallet_names::INHERENT_IDENTIFIER, &e )); @@ -196,7 +184,8 @@ pub fn expand_outer_inherent( impl #scrate::traits::IsInherent<<#block as #scrate::sp_runtime::traits::Block>::Extrinsic> for #runtime { fn is_inherent(ext: &<#block as #scrate::sp_runtime::traits::Block>::Extrinsic) -> bool { use #scrate::inherent::ProvideInherent; - use #scrate::traits::{IsSubType, ExtrinsicCall}; + use #scrate::traits::IsSubType; + use #scrate::sp_runtime::traits::ExtrinsicCall; let is_bare = #scrate::sp_runtime::traits::ExtrinsicLike::is_bare(ext); if !is_bare { @@ -204,10 +193,10 @@ pub fn expand_outer_inherent( return false } + let call = ExtrinsicCall::call(ext); #( #pallet_attrs { - let call = <#unchecked_extrinsic as ExtrinsicCall>::call(ext); if let Some(call) = IsSubType::<_>::is_sub_type(call) { if <#pallet_names as ProvideInherent>::is_inherent(&call) { return true; @@ -218,27 +207,5 @@ pub fn expand_outer_inherent( false } } - - impl #scrate::traits::EnsureInherentsAreFirst<#block> for #runtime { - fn ensure_inherents_are_first(block: &#block) -> Result { - use #scrate::inherent::ProvideInherent; - use #scrate::traits::{IsSubType, ExtrinsicCall}; - use #scrate::sp_runtime::traits::Block as _; - - let mut num_inherents = 0u32; - - for (i, xt) in block.extrinsics().iter().enumerate() { - if >::is_inherent(xt) { - if num_inherents != i as u32 { - return Err(i as u32); - } - - num_inherents += 1; // Safe since we are in an `enumerate` loop. - } - } - - Ok(num_inherents) - } - } } } diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs index 3a41e6b9b59e4..054b9d9da3306 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -99,7 +99,7 @@ pub fn expand_runtime_metadata( <#extrinsic as #scrate::traits::SignedTransactionBuilder>::Address >(); let call_ty = #scrate::__private::scale_info::meta_type::< - <#extrinsic as #scrate::traits::ExtrinsicCall>::Call + <#extrinsic as #scrate::sp_runtime::traits::ExtrinsicCall>::Call >(); let signature_ty = #scrate::__private::scale_info::meta_type::< <#extrinsic as #scrate::traits::SignedTransactionBuilder>::Signature diff --git a/substrate/frame/support/src/traits.rs b/substrate/frame/support/src/traits.rs index 126715083237d..18b8a391a14fc 100644 --- a/substrate/frame/support/src/traits.rs +++ b/substrate/frame/support/src/traits.rs @@ -59,12 +59,11 @@ pub use misc::{ defensive_prelude::{self, *}, AccountTouch, Backing, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstInt, ConstU128, ConstU16, ConstU32, ConstU64, ConstU8, ConstUint, DefensiveMax, DefensiveMin, - DefensiveSaturating, DefensiveTruncateFrom, EnsureInherentsAreFirst, EqualPrivilegeOnly, - EstimateCallFee, ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, - InherentBuilder, IsInherent, IsSubType, IsType, Len, OffchainWorker, OnKilledAccount, - OnNewAccount, PrivilegeCmp, RewardsReporter, SameOrOther, SignedTransactionBuilder, Time, - TryCollect, TryDrop, TypedGet, UnixTime, VariantCount, VariantCountOf, WrapperKeepOpaque, - WrapperOpaque, + DefensiveSaturating, DefensiveTruncateFrom, EqualPrivilegeOnly, EstimateCallFee, ExecuteBlock, + Get, GetBacking, GetDefault, HandleLifetime, InherentBuilder, IsInherent, IsSubType, IsType, + Len, OffchainWorker, OnKilledAccount, OnNewAccount, PrivilegeCmp, RewardsReporter, SameOrOther, + SignedTransactionBuilder, Time, TryCollect, TryDrop, TypedGet, UnixTime, VariantCount, + VariantCountOf, WrapperKeepOpaque, WrapperOpaque, }; #[allow(deprecated)] pub use misc::{PreimageProvider, PreimageRecipient}; @@ -119,9 +118,9 @@ pub use preimages::{Bounded, BoundedInline, FetchResult, QueryPreimage, StorePre mod messages; pub use messages::{ - BatchFootprint, EnqueueMessage, EnqueueWithOrigin, ExecuteOverweightError, HandleMessage, - NoopServiceQueues, ProcessMessage, ProcessMessageError, QueueFootprint, QueueFootprintQuery, - QueuePausedQuery, ServiceQueues, TransformOrigin, + BatchFootprint, BatchesFootprints, EnqueueMessage, EnqueueWithOrigin, ExecuteOverweightError, + HandleMessage, NoopServiceQueues, ProcessMessage, ProcessMessageError, QueueFootprint, + QueueFootprintQuery, QueuePausedQuery, ServiceQueues, TransformOrigin, }; mod safe_mode; diff --git a/substrate/frame/support/src/traits/messages.rs b/substrate/frame/support/src/traits/messages.rs index 0370877510b46..0a5c70f8f0fa5 100644 --- a/substrate/frame/support/src/traits/messages.rs +++ b/substrate/frame/support/src/traits/messages.rs @@ -18,9 +18,11 @@ //! Traits for managing message queuing and handling. use super::storage::Footprint; -use alloc::{vec, vec::Vec}; +use crate::defensive; + +use alloc::vec::Vec; use codec::{Decode, DecodeWithMemTracking, Encode, FullCodec, MaxEncodedLen}; -use core::{fmt::Debug, marker::PhantomData}; +use core::{cmp::Ordering, fmt::Debug, marker::PhantomData}; use scale_info::TypeInfo; use sp_core::{ConstU32, Get, TypedGet}; use sp_runtime::{traits::Convert, BoundedSlice, RuntimeDebug}; @@ -164,7 +166,7 @@ pub struct QueueFootprint { } /// The resource footprint of a batch of messages. -#[derive(Default, Copy, Clone, Eq, PartialEq, RuntimeDebug)] +#[derive(Default, Copy, Clone, PartialEq, RuntimeDebug)] pub struct BatchFootprint { /// The number of messages in the batch. pub msgs_count: usize, @@ -174,6 +176,69 @@ pub struct BatchFootprint { pub new_pages_count: u32, } +/// The resource footprints of continuous subsets of messages. +/// +/// For a set of messages `xcms[0..n]`, each `footprints[i]` contains the footprint +/// of the batch `xcms[0..i]`, so as `i` increases `footprints[i]` contains the footprint +/// of a bigger batch. +#[derive(Default, RuntimeDebug)] +pub struct BatchesFootprints { + /// The position in the first available MQ page where the batch will start being appended. + /// + /// The messages in the batch will be enqueued to the message queue. Since the message queue is + /// organized in pages, the messages may be enqueued across multiple contiguous pages. + /// The position where we start appending messages to the first available MQ page is of + /// particular importance since it impacts the performance of the enqueuing operation. + /// That's because the first page has to be decoded first. This is not needed for the following + /// pages. + pub first_page_pos: usize, + pub footprints: Vec, +} + +impl BatchesFootprints { + /// Appends a batch footprint to the back of the collection. + /// + /// The new footprint represents a batch that includes all the messages contained by the + /// previous batches plus the provided `msg`. If `new_page` is true, we will consider that + /// the provided `msg` is appended to a new message queue page. Otherwise, we consider + /// that it is appended to the current page. + pub fn push(&mut self, msg: &[u8], new_page: bool) { + let previous_footprint = + self.footprints.last().map(|footprint| *footprint).unwrap_or_default(); + + let mut new_pages_count = previous_footprint.new_pages_count; + if new_page { + new_pages_count = new_pages_count.saturating_add(1); + } + self.footprints.push(BatchFootprint { + msgs_count: previous_footprint.msgs_count.saturating_add(1), + size_in_bytes: previous_footprint.size_in_bytes.saturating_add(msg.len()), + new_pages_count, + }); + } + + /// Gets the biggest batch for which the comparator function returns `Ordering::Less`. + pub fn search_best_by(&self, f: F) -> &BatchFootprint + where + F: FnMut(&BatchFootprint) -> Ordering, + { + // Since the batches are sorted by size, we can use binary search. + let maybe_best_idx = match self.footprints.binary_search_by(f) { + Ok(last_ok_idx) => Some(last_ok_idx), + Err(first_err_idx) => first_err_idx.checked_sub(1), + }; + if let Some(best_idx) = maybe_best_idx { + match self.footprints.get(best_idx) { + Some(best_footprint) => return best_footprint, + None => { + defensive!("Invalid best_batch_idx: {}", best_idx); + }, + } + } + &BatchFootprint { msgs_count: 0, size_in_bytes: 0, new_pages_count: 0 } + } +} + /// Provides information on queue footprint. pub trait QueueFootprintQuery { /// The maximal length any enqueued message may have. @@ -214,7 +279,7 @@ pub trait QueueFootprintQuery { origin: Origin, msgs: impl Iterator>, total_pages_limit: u32, - ) -> Vec; + ) -> BatchesFootprints; } impl QueueFootprintQuery for () { @@ -228,8 +293,8 @@ impl QueueFootprintQuery for () { _origin: Origin, _msgs: impl Iterator>, _total_pages_limit: u32, - ) -> Vec { - vec![] + ) -> BatchesFootprints { + BatchesFootprints::default() } } @@ -269,7 +334,7 @@ impl, O: MaxEncodedLen, N: MaxEncodedLen, C: Convert>, total_pages_limit: u32, - ) -> Vec { + ) -> BatchesFootprints { E::get_batches_footprints(C::convert(origin), msgs, total_pages_limit) } } diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs index 4defe851f882e..784ef348e5d16 100644 --- a/substrate/frame/support/src/traits/misc.rs +++ b/substrate/frame/support/src/traits/misc.rs @@ -31,7 +31,10 @@ pub use sp_runtime::traits::{ ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstInt, ConstU128, ConstU16, ConstU32, ConstU64, ConstU8, ConstUint, Get, GetDefault, TryCollect, TypedGet, }; -use sp_runtime::{traits::Block as BlockT, DispatchError}; +use sp_runtime::{ + traits::{Block as BlockT, ExtrinsicCall}, + DispatchError, +}; #[doc(hidden)] pub const DEFENSIVE_OP_PUBLIC_ERROR: &str = "a defensive failure has been triggered; please report the block number at https://github.com/paritytech/polkadot-sdk/issues"; @@ -898,49 +901,12 @@ pub trait GetBacking { fn get_backing(&self) -> Option; } -/// A trait to ensure the inherent are before non-inherent in a block. -/// -/// This is typically implemented on runtime, through `construct_runtime!`. -pub trait EnsureInherentsAreFirst: - IsInherent<::Extrinsic> -{ - /// Ensure the position of inherent is correct, i.e. they are before non-inherents. - /// - /// On error return the index of the inherent with invalid position (counting from 0). On - /// success it returns the index of the last inherent. `0` therefore means that there are no - /// inherents. - fn ensure_inherents_are_first(block: &Block) -> Result; -} - /// A trait to check if an extrinsic is an inherent. pub trait IsInherent { /// Whether this extrinsic is an inherent. fn is_inherent(ext: &Extrinsic) -> bool; } -/// An extrinsic on which we can get access to call. -pub trait ExtrinsicCall: sp_runtime::traits::ExtrinsicLike { - type Call; - - /// Get the call of the extrinsic. - fn call(&self) -> &Self::Call; -} - -impl ExtrinsicCall - for sp_runtime::generic::UncheckedExtrinsic -where - Address: TypeInfo, - Call: TypeInfo, - Signature: TypeInfo, - Extra: TypeInfo, -{ - type Call = Call; - - fn call(&self) -> &Call { - &self.function - } -} - /// Interface for types capable of constructing an inherent extrinsic. pub trait InherentBuilder: ExtrinsicCall { /// Create a new inherent from a given call. diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr index 96452560a076e..b6a4f6ed6758a 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr @@ -789,7 +789,7 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied = help: the trait `Serialize` is implemented for `GenesisConfig` = note: required for `GenesisConfig` to implement `Serialize` note: required by a bound in `frame_support::sp_runtime::serde::ser::SerializeStruct::serialize_field` - --> $CARGO/serde-1.0.217/src/ser/mod.rs + --> $CARGO/serde-1.0.219/src/ser/mod.rs | | fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error> | --------------- required by a bound in this associated function diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr index 9608fa58e3c98..362c02f1ab3e0 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr @@ -33,7 +33,7 @@ error[E0599]: no function or associated item named `create_inherent` found for s | = help: items from traits can only be used if the trait is implemented and in scope = note: the following traits define an item `create_inherent`, perhaps you need to implement one of them: - candidate #1: `CreateInherent` + candidate #1: `CreateBare` candidate #2: `ProvideInherent` = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index a3074de841b3e..7de37f1c98150 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -984,7 +984,7 @@ fn instance_expand() { #[test] fn inherent_expand() { - use frame_support::{inherent::InherentData, traits::EnsureInherentsAreFirst}; + use frame_support::inherent::InherentData; use sp_core::Hasher; use sp_runtime::{ traits::{BlakeTwo256, Block as _, Header}, @@ -1071,74 +1071,6 @@ fn inherent_expand() { let mut inherent = InherentData::new(); inherent.put_data(*b"required", &true).unwrap(); assert!(inherent.check_extrinsics(&block).fatal_error()); - - let block = Block::new( - Header::new( - 1, - BlakeTwo256::hash(b"test"), - BlakeTwo256::hash(b"test"), - BlakeTwo256::hash(b"test"), - Digest::default(), - ), - vec![ - UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { - foo: 1, - bar: 1, - })), - UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_storage_layer { - foo: 0, - })), - ], - ); - - assert!(Runtime::ensure_inherents_are_first(&block).is_ok()); - - let block = Block::new( - Header::new( - 1, - BlakeTwo256::hash(b"test"), - BlakeTwo256::hash(b"test"), - BlakeTwo256::hash(b"test"), - Digest::default(), - ), - vec![ - UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { - foo: 1, - bar: 1, - })), - UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_storage_layer { - foo: 0, - })), - UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_no_post_info {})), - ], - ); - - assert_eq!(Runtime::ensure_inherents_are_first(&block).err().unwrap(), 2); - - let block = Block::new( - Header::new( - 1, - BlakeTwo256::hash(b"test"), - BlakeTwo256::hash(b"test"), - BlakeTwo256::hash(b"test"), - Digest::default(), - ), - vec![ - UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo { - foo: 1, - bar: 1, - })), - UncheckedExtrinsic::new_signed( - RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 0 }), - 1, - 1.into(), - Default::default(), - ), - UncheckedExtrinsic::new_bare(RuntimeCall::Example(pallet::Call::foo_no_post_info {})), - ], - ); - - assert_eq!(Runtime::ensure_inherents_are_first(&block).err().unwrap(), 2); } #[test] diff --git a/substrate/frame/system/src/offchain.rs b/substrate/frame/system/src/offchain.rs index 36504a81125af..64ba3df24ac0f 100644 --- a/substrate/frame/system/src/offchain.rs +++ b/substrate/frame/system/src/offchain.rs @@ -273,11 +273,8 @@ impl< } } -impl< - T: SigningTypes + CreateInherent, - C: AppCrypto, - LocalCall, - > SendUnsignedTransaction for Signer +impl, C: AppCrypto, LocalCall> + SendUnsignedTransaction for Signer { type Result = Option<(Account, Result<(), ()>)>; @@ -299,11 +296,8 @@ impl< } } -impl< - T: SigningTypes + CreateInherent, - C: AppCrypto, - LocalCall, - > SendUnsignedTransaction for Signer +impl, C: AppCrypto, LocalCall> + SendUnsignedTransaction for Signer { type Result = Vec<(Account, Result<(), ()>)>; @@ -488,10 +482,29 @@ pub trait CreateSignedTransaction: ) -> Option; } -/// Interface for creating an inherent. -pub trait CreateInherent: CreateTransactionBase { +/// Interface for creating an inherent; ⚠️ **Deprecated use [`CreateBare`]**. +/// +/// This is a deprecated type alias for [`CreateBare`]. +/// +/// Doc for [`CreateBare`]: +#[deprecated(note = "Use `CreateBare` instead")] +#[doc(inline)] +pub use CreateBare as CreateInherent; + +/// Interface for creating a bare extrinsic. +/// +/// Bare extrinsic are used for inherent extrinsic and unsigned transaction. +pub trait CreateBare: CreateTransactionBase { + /// Create a bare extrinsic. + /// + /// Bare extrinsic are used for inherent extrinsic and unsigned transaction. + fn create_bare(call: Self::RuntimeCall) -> Self::Extrinsic; + /// Create an inherent. - fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic; + #[deprecated(note = "Use `create_bare` instead")] + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + Self::create_bare(call) + } } /// A message signer. @@ -594,7 +607,7 @@ pub trait SendSignedTransaction< } /// Submit an unsigned transaction onchain with a signed payload -pub trait SendUnsignedTransaction, LocalCall> { +pub trait SendUnsignedTransaction, LocalCall> { /// A submission result. /// /// Should contain the submission result and the account(s) that signed the payload. @@ -617,7 +630,7 @@ pub trait SendUnsignedTransaction, L /// Submits an unsigned call to the transaction pool. fn submit_unsigned_transaction(&self, call: LocalCall) -> Option> { - let xt = T::create_inherent(call.into()); + let xt = T::create_bare(call.into()); Some(SubmitTransaction::::submit_transaction(xt)) } } @@ -663,8 +676,8 @@ mod tests { type RuntimeCall = RuntimeCall; } - impl CreateInherent for TestRuntime { - fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + impl CreateBare for TestRuntime { + fn create_bare(call: Self::RuntimeCall) -> Self::Extrinsic { Extrinsic::new_bare(call) } } diff --git a/substrate/primitives/application-crypto/src/bandersnatch.rs b/substrate/primitives/application-crypto/src/bandersnatch.rs index 0e21e5d3bce31..0cb862efc520a 100644 --- a/substrate/primitives/application-crypto/src/bandersnatch.rs +++ b/substrate/primitives/application-crypto/src/bandersnatch.rs @@ -21,6 +21,12 @@ use crate::{KeyTypeId, RuntimePublic}; use alloc::vec::Vec; pub use sp_core::bandersnatch::*; +use sp_core::{ + crypto::CryptoType, + proof_of_possession::{NonAggregatable, ProofOfPossessionVerifier}, + Pair as TraitPair, +}; + mod app { crate::app_crypto!(super, sp_core::testing::BANDERSNATCH); } @@ -41,14 +47,25 @@ impl RuntimePublic for Public { sp_io::crypto::bandersnatch_generate(key_type, seed) } - /// Dummy implementation. Returns `None`. - fn sign>(&self, _key_type: KeyTypeId, _msg: &M) -> Option { - None + fn sign>(&self, key_type: KeyTypeId, msg: &M) -> Option { + sp_io::crypto::bandersnatch_sign(key_type, self, msg.as_ref()) + } + + fn verify>(&self, msg: &M, signature: &Self::Signature) -> bool { + let sig = AppSignature::from(*signature); + let pub_key = AppPublic::from(*self); + ::Pair::verify(&sig, msg.as_ref(), &pub_key) + } + + fn generate_proof_of_possession(&mut self, key_type: KeyTypeId) -> Option { + let proof_of_possession_statement = Pair::proof_of_possession_statement(self); + sp_io::crypto::bandersnatch_sign(key_type, self, &proof_of_possession_statement) } - /// Dummy implementation. Returns `false`. - fn verify>(&self, _msg: &M, _signature: &Self::Signature) -> bool { - false + fn verify_proof_of_possession(&self, proof_of_possession: &Self::Signature) -> bool { + let proof_of_possession = AppSignature::from(*proof_of_possession); + let pub_key = AppPublic::from(*self); + ::Pair::verify_proof_of_possession(&proof_of_possession, &pub_key) } fn to_raw_vec(&self) -> Vec { diff --git a/substrate/primitives/application-crypto/src/bls381.rs b/substrate/primitives/application-crypto/src/bls381.rs index d4006720ce2e4..a2767e837745d 100644 --- a/substrate/primitives/application-crypto/src/bls381.rs +++ b/substrate/primitives/application-crypto/src/bls381.rs @@ -21,6 +21,7 @@ use crate::{KeyTypeId, RuntimePublic}; use alloc::vec::Vec; pub use sp_core::bls::bls381::*; +use sp_core::{crypto::CryptoType, proof_of_possession::ProofOfPossessionVerifier}; mod app { crate::app_crypto!(super, sp_core::testing::BLS381); @@ -52,6 +53,16 @@ impl RuntimePublic for Public { false } + fn generate_proof_of_possession(&mut self, key_type: KeyTypeId) -> Option { + sp_io::crypto::bls381_generate_proof_of_possession(key_type, self) + } + + fn verify_proof_of_possession(&self, proof_of_possession: &Self::Signature) -> bool { + let proof_of_possession = AppSignature::from(*proof_of_possession); + let pub_key = AppPublic::from(*self); + ::Pair::verify_proof_of_possession(&proof_of_possession, &pub_key) + } + fn to_raw_vec(&self) -> Vec { sp_core::crypto::ByteArray::to_raw_vec(self) } diff --git a/substrate/primitives/application-crypto/src/ecdsa.rs b/substrate/primitives/application-crypto/src/ecdsa.rs index 94d5288584ccb..4f10855d1d17c 100644 --- a/substrate/primitives/application-crypto/src/ecdsa.rs +++ b/substrate/primitives/application-crypto/src/ecdsa.rs @@ -22,6 +22,7 @@ use crate::{KeyTypeId, RuntimePublic}; use alloc::vec::Vec; pub use sp_core::ecdsa::*; +use sp_core::proof_of_possession::NonAggregatable; mod app { crate::app_crypto!(super, sp_core::testing::ECDSA); @@ -48,6 +49,16 @@ impl RuntimePublic for Public { sp_io::crypto::ecdsa_verify(signature, msg.as_ref(), self) } + fn generate_proof_of_possession(&mut self, key_type: KeyTypeId) -> Option { + let proof_of_possession_statement = Pair::proof_of_possession_statement(self); + sp_io::crypto::ecdsa_sign(key_type, self, &proof_of_possession_statement) + } + + fn verify_proof_of_possession(&self, proof_of_possession: &Self::Signature) -> bool { + let proof_of_possession_statement = Pair::proof_of_possession_statement(self); + sp_io::crypto::ecdsa_verify(&proof_of_possession, &proof_of_possession_statement, &self) + } + fn to_raw_vec(&self) -> Vec { sp_core::crypto::ByteArray::to_raw_vec(self) } diff --git a/substrate/primitives/application-crypto/src/ecdsa_bls381.rs b/substrate/primitives/application-crypto/src/ecdsa_bls381.rs index f6c6ddd3ea257..747dd38b6222a 100644 --- a/substrate/primitives/application-crypto/src/ecdsa_bls381.rs +++ b/substrate/primitives/application-crypto/src/ecdsa_bls381.rs @@ -21,6 +21,12 @@ use crate::{KeyTypeId, RuntimePublic}; use alloc::vec::Vec; pub use sp_core::paired_crypto::ecdsa_bls381::*; +use sp_core::{ + bls381, + crypto::CryptoType, + ecdsa, ecdsa_bls381, + proof_of_possession::{NonAggregatable, ProofOfPossessionVerifier}, +}; mod app { crate::app_crypto!(super, sp_core::testing::ECDSA_BLS381); @@ -52,7 +58,98 @@ impl RuntimePublic for Public { false } + fn generate_proof_of_possession(&mut self, key_type: KeyTypeId) -> Option { + let pub_key_as_bytes = self.to_raw_vec(); + let (ecdsa_pub_as_bytes, bls381_pub_as_bytes) = split_pub_key_bytes(&pub_key_as_bytes)?; + let ecdsa_proof_of_possession = + generate_ecdsa_proof_of_possession(key_type, ecdsa_pub_as_bytes)?; + let bls381_proof_of_possession = + generate_bls381_proof_of_possession(key_type, bls381_pub_as_bytes)?; + let combined_proof_of_possession_raw = + combine_proof_of_possession(&ecdsa_proof_of_possession, &bls381_proof_of_possession)?; + Some(Self::Signature::from_raw(combined_proof_of_possession_raw)) + } + + fn verify_proof_of_possession(&self, proof_of_possession: &Self::Signature) -> bool { + let proof_of_possession = AppSignature::from(*proof_of_possession); + let pub_key = AppPublic::from(*self); + ::Pair::verify_proof_of_possession(&proof_of_possession, &pub_key) + } + fn to_raw_vec(&self) -> Vec { sp_core::crypto::ByteArray::to_raw_vec(self) } } + +/// Helper: Split public key bytes into ECDSA and BLS381 parts +fn split_pub_key_bytes( + pub_key_as_bytes: &[u8], +) -> Option<([u8; ecdsa::PUBLIC_KEY_SERIALIZED_SIZE], [u8; bls381::PUBLIC_KEY_SERIALIZED_SIZE])> { + let ecdsa_pub_as_bytes = + pub_key_as_bytes[..ecdsa::PUBLIC_KEY_SERIALIZED_SIZE].try_into().ok()?; + let bls381_pub_as_bytes = + pub_key_as_bytes[ecdsa::PUBLIC_KEY_SERIALIZED_SIZE..].try_into().ok()?; + Some((ecdsa_pub_as_bytes, bls381_pub_as_bytes)) +} + +/// Helper: Generate ECDSA proof of possession +fn generate_ecdsa_proof_of_possession( + key_type: KeyTypeId, + ecdsa_pub_as_bytes: [u8; ecdsa::PUBLIC_KEY_SERIALIZED_SIZE], +) -> Option { + let ecdsa_pub = ecdsa::Public::from_raw(ecdsa_pub_as_bytes); + let proof_of_possession_statement = ecdsa::Pair::proof_of_possession_statement(&ecdsa_pub); + sp_io::crypto::ecdsa_sign(key_type, &ecdsa_pub, &proof_of_possession_statement) +} + +/// Helper: Generate BLS381 proof of possession +fn generate_bls381_proof_of_possession( + key_type: KeyTypeId, + bls381_pub_as_bytes: [u8; bls381::PUBLIC_KEY_SERIALIZED_SIZE], +) -> Option { + let bls381_pub = bls381::Public::from_raw(bls381_pub_as_bytes); + sp_io::crypto::bls381_generate_proof_of_possession(key_type, &bls381_pub) +} + +/// Helper: Combine ECDSA and BLS381 proof_of_possessions into a single raw proof_of_possession +fn combine_proof_of_possession( + ecdsa_proof_of_possession: &ecdsa::Signature, + bls381_proof_of_possession: &bls381::Signature, +) -> Option<[u8; ecdsa_bls381::SIGNATURE_LEN]> { + let mut combined_proof_of_possession_raw = [0u8; ecdsa_bls381::SIGNATURE_LEN]; + combined_proof_of_possession_raw[..ecdsa::SIGNATURE_SERIALIZED_SIZE] + .copy_from_slice(ecdsa_proof_of_possession.as_ref()); + combined_proof_of_possession_raw[ecdsa::SIGNATURE_SERIALIZED_SIZE..] + .copy_from_slice(bls381_proof_of_possession.as_ref()); + Some(combined_proof_of_possession_raw) +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_core::{bls381, crypto::Pair, ecdsa}; + + /// Helper function to generate test public keys for ECDSA and BLS381 + fn generate_test_keys( + ) -> ([u8; ecdsa::PUBLIC_KEY_SERIALIZED_SIZE], [u8; bls381::PUBLIC_KEY_SERIALIZED_SIZE]) { + let ecdsa_pair = ecdsa::Pair::generate().0; + let bls381_pair = bls381::Pair::generate().0; + + let ecdsa_pub = ecdsa_pair.public(); + let bls381_pub = bls381_pair.public(); + + (ecdsa_pub.to_raw_vec().try_into().unwrap(), bls381_pub.to_raw_vec().try_into().unwrap()) + } + + #[test] + fn test_split_pub_key_bytes() { + let (ecdsa_pub, bls381_pub) = generate_test_keys(); + let mut combined_pub_key = Vec::new(); + combined_pub_key.extend_from_slice(&ecdsa_pub); + combined_pub_key.extend_from_slice(&bls381_pub); + + let result = split_pub_key_bytes(&combined_pub_key).unwrap(); + assert_eq!(result.0, ecdsa_pub, "ECDSA public key does not match"); + assert_eq!(result.1, bls381_pub, "BLS381 public key does not match"); + } +} diff --git a/substrate/primitives/application-crypto/src/ed25519.rs b/substrate/primitives/application-crypto/src/ed25519.rs index 6769de4e47c34..d94203a1cd282 100644 --- a/substrate/primitives/application-crypto/src/ed25519.rs +++ b/substrate/primitives/application-crypto/src/ed25519.rs @@ -22,6 +22,7 @@ use crate::{KeyTypeId, RuntimePublic}; use alloc::vec::Vec; pub use sp_core::ed25519::*; +use sp_core::proof_of_possession::NonAggregatable; mod app { crate::app_crypto!(super, sp_core::testing::ED25519); @@ -48,6 +49,16 @@ impl RuntimePublic for Public { sp_io::crypto::ed25519_verify(signature, msg.as_ref(), self) } + fn generate_proof_of_possession(&mut self, key_type: KeyTypeId) -> Option { + let proof_of_possession_statement = Pair::proof_of_possession_statement(self); + sp_io::crypto::ed25519_sign(key_type, self, &proof_of_possession_statement) + } + + fn verify_proof_of_possession(&self, proof_of_possession: &Self::Signature) -> bool { + let proof_of_possession_statement = Pair::proof_of_possession_statement(self); + sp_io::crypto::ed25519_verify(&proof_of_possession, &proof_of_possession_statement, &self) + } + fn to_raw_vec(&self) -> Vec { sp_core::crypto::ByteArray::to_raw_vec(self) } diff --git a/substrate/primitives/application-crypto/src/lib.rs b/substrate/primitives/application-crypto/src/lib.rs index 818062717cfce..628b8cac10942 100644 --- a/substrate/primitives/application-crypto/src/lib.rs +++ b/substrate/primitives/application-crypto/src/lib.rs @@ -29,6 +29,7 @@ pub use sp_core::crypto::{DeriveError, Pair, SecretStringError}; pub use sp_core::{ self, crypto::{ByteArray, CryptoType, Derive, IsWrappedBy, Public, Signature, UncheckedFrom, Wraps}, + proof_of_possession::{ProofOfPossessionGenerator, ProofOfPossessionVerifier}, RuntimeDebug, }; @@ -177,6 +178,18 @@ macro_rules! app_crypto_pair_common { } } + impl $crate::ProofOfPossessionVerifier for Pair { + fn verify_proof_of_possession( + proof_of_possession: &Self::Signature, + allegedly_possessed_pubkey: &Self::Public, + ) -> bool { + <$pair>::verify_proof_of_possession( + &proof_of_possession.0, + allegedly_possessed_pubkey.as_ref(), + ) + } + } + impl $crate::AppCrypto for Pair { type Public = Public; type Pair = Pair; diff --git a/substrate/primitives/application-crypto/src/sr25519.rs b/substrate/primitives/application-crypto/src/sr25519.rs index ba6f0e3ae6b37..d0b04c86421dd 100644 --- a/substrate/primitives/application-crypto/src/sr25519.rs +++ b/substrate/primitives/application-crypto/src/sr25519.rs @@ -21,6 +21,7 @@ use crate::{KeyTypeId, RuntimePublic}; use alloc::vec::Vec; +use sp_core::proof_of_possession::NonAggregatable; pub use sp_core::sr25519::*; mod app { @@ -48,6 +49,16 @@ impl RuntimePublic for Public { sp_io::crypto::sr25519_verify(signature, msg.as_ref(), self) } + fn generate_proof_of_possession(&mut self, key_type: KeyTypeId) -> Option { + let proof_of_possession_statement = Pair::proof_of_possession_statement(self); + sp_io::crypto::sr25519_sign(key_type, self, &proof_of_possession_statement) + } + + fn verify_proof_of_possession(&self, proof_of_possession: &Self::Signature) -> bool { + let proof_of_possession_statement = Pair::proof_of_possession_statement(self); + sp_io::crypto::sr25519_verify(&proof_of_possession, &proof_of_possession_statement, &self) + } + fn to_raw_vec(&self) -> Vec { sp_core::crypto::ByteArray::to_raw_vec(self) } diff --git a/substrate/primitives/application-crypto/src/traits.rs b/substrate/primitives/application-crypto/src/traits.rs index 1789d9b96fd82..816cfe3e37fd6 100644 --- a/substrate/primitives/application-crypto/src/traits.rs +++ b/substrate/primitives/application-crypto/src/traits.rs @@ -100,6 +100,17 @@ pub trait RuntimePublic: Sized { /// Verify that the given signature matches the given message using this public key. fn verify>(&self, msg: &M, signature: &Self::Signature) -> bool; + /// Generate proof of possession of the corresponding public key + /// + /// The private key will be requested from the keystore using the given key type. + /// + /// Returns the proof of possession as a signature or `None` if it failed or is not able to do + /// so. + fn generate_proof_of_possession(&mut self, key_type: KeyTypeId) -> Option; + + /// Verify that the given proof of possession is valid for the corresponding public key. + fn verify_proof_of_possession(&self, pop: &Self::Signature) -> bool; + /// Returns `Self` as raw vec. fn to_raw_vec(&self) -> Vec; } @@ -133,13 +144,24 @@ pub trait RuntimeAppPublic: Sized { /// Verify that the given signature matches the given message using this public key. fn verify>(&self, msg: &M, signature: &Self::Signature) -> bool; + /// Generate proof of possession of the corresponding public key + /// + /// The private key will be requested from the keystore using the given key type. + /// + /// Returns the proof of possession as a signature or `None` if it failed or is not able to do + /// so. + fn generate_proof_of_possession(&mut self) -> Option; + + /// Verify that the given proof of possession is valid for the corresponding public key. + fn verify_proof_of_possession(&self, pop: &Self::Signature) -> bool; + /// Returns `Self` as raw vec. fn to_raw_vec(&self) -> Vec; } impl RuntimeAppPublic for T where - T: AppPublic + AsRef<::Generic>, + T: AppPublic + AsRef<::Generic> + AsMut<::Generic>, ::Generic: RuntimePublic, ::Signature: TypeInfo + Codec @@ -170,6 +192,21 @@ where <::Generic as RuntimePublic>::verify(self.as_ref(), msg, signature.as_ref()) } + fn generate_proof_of_possession(&mut self) -> Option { + <::Generic as RuntimePublic>::generate_proof_of_possession( + self.as_mut(), + Self::ID, + ) + .map(|s| s.into()) + } + + fn verify_proof_of_possession(&self, pop: &Self::Signature) -> bool { + <::Generic as RuntimePublic>::verify_proof_of_possession( + self.as_ref(), + pop.as_ref(), + ) + } + fn to_raw_vec(&self) -> Vec { <::Generic as RuntimePublic>::to_raw_vec(self.as_ref()) } diff --git a/substrate/primitives/application-crypto/test/Cargo.toml b/substrate/primitives/application-crypto/test/Cargo.toml index 5c19161bc870b..22f9655a4917b 100644 --- a/substrate/primitives/application-crypto/test/Cargo.toml +++ b/substrate/primitives/application-crypto/test/Cargo.toml @@ -20,4 +20,8 @@ sp-api = { workspace = true, default-features = true } sp-application-crypto = { workspace = true, default-features = true } sp-core = { workspace = true } sp-keystore = { workspace = true } +sp-tracing = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } + +[features] +bls-experimental = ["substrate-test-runtime-client/bls-experimental"] diff --git a/substrate/primitives/application-crypto/test/src/bls381.rs b/substrate/primitives/application-crypto/test/src/bls381.rs new file mode 100644 index 0000000000000..e38adffd13286 --- /dev/null +++ b/substrate/primitives/application-crypto/test/src/bls381.rs @@ -0,0 +1,63 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Integration tests for bls12-381 + +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_application_crypto::{bls381::AppPair, RuntimePublic}; +use sp_core::{ + bls381::Pair as Bls381Pair, + crypto::ByteArray, + proof_of_possession::{ProofOfPossessionGenerator, ProofOfPossessionVerifier}, + testing::BLS381, + Pair, +}; +use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt}; +use std::sync::Arc; +use substrate_test_runtime_client::{ + runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, +}; + +#[test] +fn bls381_works_in_runtime() { + sp_tracing::try_init_simple(); + let keystore = Arc::new(MemoryKeystore::new()); + let test_client = TestClientBuilder::new().build(); + + let mut runtime_api = test_client.runtime_api(); + runtime_api.register_extension(KeystoreExt::new(keystore.clone())); + + let (proof_of_possession, public) = runtime_api + .test_bls381_crypto(test_client.chain_info().genesis_hash) + .expect("Tests `bls381` crypto."); + + let supported_keys = keystore.keys(BLS381).unwrap(); + assert!(supported_keys.contains(&public.to_raw_vec())); + + assert!(AppPair::verify_proof_of_possession(&proof_of_possession.into(), &public.into())); +} + +#[test] +fn bls381_client_proof_of_possession_verified_by_runtime_public() { + let (mut test_pair, _) = Bls381Pair::generate(); + + let client_generated_proof_of_possession = test_pair.generate_proof_of_possession(); + assert!(RuntimePublic::verify_proof_of_possession( + &test_pair.public(), + &client_generated_proof_of_possession + )); +} diff --git a/substrate/primitives/application-crypto/test/src/ecdsa.rs b/substrate/primitives/application-crypto/test/src/ecdsa.rs index 396683a91ac02..a96f45d192b0f 100644 --- a/substrate/primitives/application-crypto/test/src/ecdsa.rs +++ b/substrate/primitives/application-crypto/test/src/ecdsa.rs @@ -17,9 +17,11 @@ //! Integration tests for ecdsa use sp_api::{ApiExt, ProvideRuntimeApi}; -use sp_application_crypto::ecdsa::AppPair; +use sp_application_crypto::{ecdsa::AppPair, RuntimePublic}; use sp_core::{ crypto::{ByteArray, Pair}, + ecdsa::Pair as ECDSAPair, + proof_of_possession::{ProofOfPossessionGenerator, ProofOfPossessionVerifier}, testing::ECDSA, }; use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt}; @@ -30,17 +32,30 @@ use substrate_test_runtime_client::{ #[test] fn ecdsa_works_in_runtime() { + sp_tracing::try_init_simple(); let keystore = Arc::new(MemoryKeystore::new()); let test_client = TestClientBuilder::new().build(); let mut runtime_api = test_client.runtime_api(); runtime_api.register_extension(KeystoreExt::new(keystore.clone())); - let (signature, public) = runtime_api + let (signature, public, proof_of_possession) = runtime_api .test_ecdsa_crypto(test_client.chain_info().genesis_hash) .expect("Tests `ecdsa` crypto."); let supported_keys = keystore.keys(ECDSA).unwrap(); assert!(supported_keys.contains(&public.to_raw_vec())); assert!(AppPair::verify(&signature, "ecdsa", &public)); + assert!(AppPair::verify_proof_of_possession(&proof_of_possession.into(), &public.into())); +} + +#[test] +fn ecdsa_client_generated_proof_of_possession_verified_by_runtime_public() { + let (mut test_pair, _) = ECDSAPair::generate(); + + let client_generated_proof_of_possession = test_pair.generate_proof_of_possession(); + assert!(RuntimePublic::verify_proof_of_possession( + &test_pair.public(), + &client_generated_proof_of_possession + )); } diff --git a/substrate/primitives/application-crypto/test/src/ecdsa_bls381.rs b/substrate/primitives/application-crypto/test/src/ecdsa_bls381.rs new file mode 100644 index 0000000000000..df39a427c1ba0 --- /dev/null +++ b/substrate/primitives/application-crypto/test/src/ecdsa_bls381.rs @@ -0,0 +1,64 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Integration tests for ecdsa-bls12-381 + +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_application_crypto::{ecdsa_bls381::AppPair, RuntimePublic}; +use sp_core::{ + crypto::ByteArray, + ecdsa_bls381::Pair as EcdsaBls381Pair, + proof_of_possession::{ProofOfPossessionGenerator, ProofOfPossessionVerifier}, + testing::ECDSA_BLS381, + Pair, +}; +use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt}; +use std::sync::Arc; +use substrate_test_runtime_client::{ + runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, +}; + +#[test] +fn ecdsa_bls381_works_in_runtime() { + sp_tracing::try_init_simple(); + let keystore = Arc::new(MemoryKeystore::new()); + let test_client = TestClientBuilder::new().build(); + + let mut runtime_api = test_client.runtime_api(); + runtime_api.register_extension(KeystoreExt::new(keystore.clone())); + + let (proof_of_possession, public) = runtime_api + .test_ecdsa_bls381_crypto(test_client.chain_info().genesis_hash) + .expect("Tests `ecdsa_bls381` crypto."); + + let supported_keys = keystore.keys(ECDSA_BLS381).unwrap(); + assert!(supported_keys.contains(&public.to_raw_vec())); + assert!(supported_keys.len() == 3); + + assert!(AppPair::verify_proof_of_possession(&proof_of_possession, &public)); +} + +#[test] +fn ecdsa_bls381_client_proof_of_possession_verified_by_runtime_public() { + let (mut test_pair, _) = EcdsaBls381Pair::generate(); + + let client_generated_proof_of_possession = test_pair.generate_proof_of_possession(); + assert!(RuntimePublic::verify_proof_of_possession( + &test_pair.public(), + &client_generated_proof_of_possession + )); +} diff --git a/substrate/primitives/application-crypto/test/src/ed25519.rs b/substrate/primitives/application-crypto/test/src/ed25519.rs index f0ceccdcebfcd..74d11bda19e7e 100644 --- a/substrate/primitives/application-crypto/test/src/ed25519.rs +++ b/substrate/primitives/application-crypto/test/src/ed25519.rs @@ -18,9 +18,11 @@ //! Integration tests for ed25519 use sp_api::{ApiExt, ProvideRuntimeApi}; -use sp_application_crypto::ed25519::AppPair; +use sp_application_crypto::{ed25519::AppPair, RuntimePublic}; use sp_core::{ crypto::{ByteArray, Pair}, + ed25519::Pair as Ed25519Pair, + proof_of_possession::{ProofOfPossessionGenerator, ProofOfPossessionVerifier}, testing::ED25519, }; use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt}; @@ -31,17 +33,30 @@ use substrate_test_runtime_client::{ #[test] fn ed25519_works_in_runtime() { + sp_tracing::try_init_simple(); let keystore = Arc::new(MemoryKeystore::new()); let test_client = TestClientBuilder::new().build(); let mut runtime_api = test_client.runtime_api(); runtime_api.register_extension(KeystoreExt::new(keystore.clone())); - let (signature, public) = runtime_api + let (signature, public, proof_of_possession) = runtime_api .test_ed25519_crypto(test_client.chain_info().genesis_hash) .expect("Tests `ed25519` crypto."); let supported_keys = keystore.keys(ED25519).unwrap(); assert!(supported_keys.contains(&public.to_raw_vec())); assert!(AppPair::verify(&signature, "ed25519", &public)); + assert!(AppPair::verify_proof_of_possession(&proof_of_possession.into(), &public.into())); +} + +#[test] +fn ed25519_client_proof_of_possession_verified_by_runtime_public() { + let (mut test_pair, _) = Ed25519Pair::generate(); + + let client_generated_proof_of_possession = test_pair.generate_proof_of_possession(); + assert!(RuntimePublic::verify_proof_of_possession( + &test_pair.public(), + &client_generated_proof_of_possession + )); } diff --git a/substrate/primitives/application-crypto/test/src/lib.rs b/substrate/primitives/application-crypto/test/src/lib.rs index 90856ee1e596f..008092393971c 100644 --- a/substrate/primitives/application-crypto/test/src/lib.rs +++ b/substrate/primitives/application-crypto/test/src/lib.rs @@ -17,9 +17,14 @@ //! Integration tests for application crypto +#[cfg(all(test, feature = "bls-experimental"))] +mod bls381; #[cfg(test)] mod ecdsa; #[cfg(test)] mod ed25519; #[cfg(test)] mod sr25519; + +#[cfg(all(test, feature = "bls-experimental"))] +mod ecdsa_bls381; diff --git a/substrate/primitives/application-crypto/test/src/sr25519.rs b/substrate/primitives/application-crypto/test/src/sr25519.rs index 3c62270395f04..d4a1caf79aa9c 100644 --- a/substrate/primitives/application-crypto/test/src/sr25519.rs +++ b/substrate/primitives/application-crypto/test/src/sr25519.rs @@ -18,9 +18,11 @@ //! Integration tests for sr25519 use sp_api::{ApiExt, ProvideRuntimeApi}; -use sp_application_crypto::sr25519::AppPair; +use sp_application_crypto::{sr25519::AppPair, RuntimePublic}; use sp_core::{ crypto::{ByteArray, Pair}, + proof_of_possession::{ProofOfPossessionGenerator, ProofOfPossessionVerifier}, + sr25519::Pair as Sr25519Pair, testing::SR25519, }; use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt}; @@ -31,17 +33,30 @@ use substrate_test_runtime_client::{ #[test] fn sr25519_works_in_runtime() { + sp_tracing::try_init_simple(); let keystore = Arc::new(MemoryKeystore::new()); let test_client = TestClientBuilder::new().build(); let mut runtime_api = test_client.runtime_api(); runtime_api.register_extension(KeystoreExt::new(keystore.clone())); - let (signature, public) = runtime_api + let (signature, public, proof_of_possession) = runtime_api .test_sr25519_crypto(test_client.chain_info().genesis_hash) .expect("Tests `sr25519` crypto."); let supported_keys = keystore.keys(SR25519).unwrap(); assert!(supported_keys.contains(&public.to_raw_vec())); assert!(AppPair::verify(&signature, "sr25519", &public)); + assert!(AppPair::verify_proof_of_possession(&proof_of_possession.into(), &public.into())); +} + +#[test] +fn sr25519_client_proof_of_possession_verified_by_runtime_public() { + let (mut test_pair, _) = Sr25519Pair::generate(); + + let client_generated_proof_of_possession = test_pair.generate_proof_of_possession(); + assert!(RuntimePublic::verify_proof_of_possession( + &test_pair.public(), + &client_generated_proof_of_possession + )); } diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index b68e45ed574a3..4eff3043f8659 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -67,7 +67,9 @@ k256 = { features = ["alloc", "ecdsa"], workspace = true } secp256k1 = { features = ["alloc", "recovery"], optional = true, workspace = true } # bls crypto +sha2 = { optional = true, workspace = true } w3f-bls = { optional = true, workspace = true } + # bandersnatch crypto ark-vrf = { optional = true, workspace = true, features = ["bandersnatch", "ring"] } @@ -114,6 +116,7 @@ std = [ "secp256k1/global-context", "secp256k1/std", "serde/std", + "sha2?/std", "sp-crypto-hashing/std", "sp-debug-derive/std", "sp-externalities/std", @@ -153,7 +156,7 @@ full_crypto = [ # This feature adds BLS crypto primitives. # It should not be used in production since the implementation and interface may still # be subject to significant changes. -bls-experimental = ["w3f-bls"] +bls-experimental = ["sha2", "w3f-bls"] # This feature adds Bandersnatch crypto primitives. # It should not be used in production since the implementation and interface may still diff --git a/substrate/primitives/core/src/bandersnatch.rs b/substrate/primitives/core/src/bandersnatch.rs index 891af7f72f59b..d56627f738ceb 100644 --- a/substrate/primitives/core/src/bandersnatch.rs +++ b/substrate/primitives/core/src/bandersnatch.rs @@ -22,9 +22,12 @@ #[cfg(feature = "full_crypto")] use crate::crypto::VrfSecret; -use crate::crypto::{ - ByteArray, CryptoType, CryptoTypeId, DeriveError, DeriveJunction, Pair as TraitPair, - PublicBytes, SecretStringError, SignatureBytes, UncheckedFrom, VrfPublic, +use crate::{ + crypto::{ + ByteArray, CryptoType, CryptoTypeId, DeriveError, DeriveJunction, Pair as TraitPair, + PublicBytes, SecretStringError, SignatureBytes, UncheckedFrom, VrfPublic, + }, + proof_of_possession::NonAggregatable, }; use alloc::{vec, vec::Vec}; use ark_vrf::{ @@ -186,6 +189,8 @@ impl CryptoType for Pair { type Pair = Pair; } +impl NonAggregatable for Pair {} + /// Bandersnatch VRF types and operations. pub mod vrf { use super::*; @@ -592,7 +597,10 @@ pub mod ring_vrf { #[cfg(test)] mod tests { use super::{ring_vrf::*, vrf::*, *}; - use crate::crypto::{VrfPublic, VrfSecret, DEV_PHRASE}; + use crate::{ + crypto::{VrfPublic, VrfSecret, DEV_PHRASE}, + proof_of_possession::{ProofOfPossessionGenerator, ProofOfPossessionVerifier}, + }; const TEST_SEED: &[u8; SEED_SERIALIZED_SIZE] = &[0xcb; SEED_SERIALIZED_SIZE]; const TEST_RING_SIZE: usize = 16; @@ -858,4 +866,13 @@ mod tests { let enc2 = vd2.encode(); assert_eq!(enc1, enc2); } + + #[test] + fn good_proof_of_possession_should_work_bad_proof_of_possession_should_fail() { + let mut pair = Pair::from_seed(b"12345678901234567890123456789012"); + let other_pair = Pair::from_seed(b"23456789012345678901234567890123"); + let proof_of_possession = pair.generate_proof_of_possession(); + assert!(Pair::verify_proof_of_possession(&proof_of_possession, &pair.public())); + assert!(!Pair::verify_proof_of_possession(&proof_of_possession, &other_pair.public())); + } } diff --git a/substrate/primitives/core/src/bls.rs b/substrate/primitives/core/src/bls.rs index f721a6ae08d18..5f719f98c7e7f 100644 --- a/substrate/primitives/core/src/bls.rs +++ b/substrate/primitives/core/src/bls.rs @@ -23,18 +23,28 @@ //! Chaum-Pedersen proof uses the same hash-to-field specified in RFC 9380 for the field of the BLS //! curve. -use crate::crypto::{ - CryptoType, DeriveError, DeriveJunction, Pair as TraitPair, PublicBytes, SecretStringError, - SignatureBytes, UncheckedFrom, +use crate::{ + crypto::{ + CryptoType, DeriveError, DeriveJunction, Pair as TraitPair, PublicBytes, SecretStringError, + SignatureBytes, UncheckedFrom, + }, + proof_of_possession::{ProofOfPossessionGenerator, ProofOfPossessionVerifier}, }; use alloc::vec::Vec; use w3f_bls::{ DoublePublicKey, DoublePublicKeyScheme, DoubleSignature, EngineBLS, Keypair, Message, - SecretKey, SerializableToBytes, TinyBLS381, + NuggetBLSnCPPoP, ProofOfPossession as BlsProofOfPossession, SecretKey, SerializableToBytes, + TinyBLS381, }; +#[cfg(feature = "full_crypto")] +use w3f_bls::ProofOfPossessionGenerator as BlsProofOfPossessionGenerator; + +/// Required to generate Proof Of Possession +use sha2::Sha256; + /// BLS-377 specialized types pub mod bls377 { pub use super::{PUBLIC_KEY_SERIALIZED_SIZE, SIGNATURE_SERIALIZED_SIZE}; @@ -99,6 +109,10 @@ pub const PUBLIC_KEY_SERIALIZED_SIZE: usize = pub const SIGNATURE_SERIALIZED_SIZE: usize = as SerializableToBytes>::SERIALIZED_BYTES_SIZE; +/// Signature serialized size +pub const PROOF_OF_POSSESSION_SERIALIZED_SIZE: usize = + as SerializableToBytes>::SERIALIZED_BYTES_SIZE; + /// A secret seed. /// /// It's not called a "secret key" because ring doesn't expose the secret keys @@ -227,6 +241,49 @@ impl TraitPair for Pair { } } +impl ProofOfPossessionGenerator for Pair { + #[cfg(feature = "full_crypto")] + fn generate_proof_of_possession(&mut self) -> Self::Signature { + let r: [u8; SIGNATURE_SERIALIZED_SIZE] = as BlsProofOfPossessionGenerator< + T, + Sha256, + DoublePublicKey, + NuggetBLSnCPPoP, + >>::generate_pok(&mut self.0) + .to_bytes() + .try_into() + .expect("NuggetBLSnCPPoP serializer returns vectors of SIGNATURE_SERIALIZED_SIZE size"); + Self::Signature::unchecked_from(r) + } +} + +impl ProofOfPossessionVerifier for Pair +where + Pair: TraitPair, +{ + fn verify_proof_of_possession( + proof_of_possession: &Self::Signature, + allegedly_possessed_pubkey: &Self::Public, + ) -> bool { + let Ok(proof_of_possession) = + NuggetBLSnCPPoP::::from_bytes(proof_of_possession.as_ref()) + else { + return false + }; + + let Ok(allegedly_possessed_pubkey_as_bls_pubkey) = + DoublePublicKey::::from_bytes(allegedly_possessed_pubkey.as_ref()) + else { + return false + }; + + BlsProofOfPossession::::verify( + &proof_of_possession, + &allegedly_possessed_pubkey_as_bls_pubkey, + ) + } +} + impl CryptoType for Pair { type Pair = Pair; } @@ -301,8 +358,6 @@ mod tests { hex_expected_signature: &str, ) { let public = pair.public(); - let public_bytes: &[u8] = public.as_ref(); - println!("pub key is: {:?}", array_bytes::bytes2hex("", public_bytes)); assert_eq!( public, Public::unchecked_from(array_bytes::hex2array_unchecked(hex_expected_pub_key)) @@ -312,6 +367,7 @@ mod tests { let expected_signature = Signature::unchecked_from(expected_signature_bytes); let signature = pair.sign(&message[..]); + assert!(signature == expected_signature); assert!(Pair::verify(&signature, &message[..], &public)); } @@ -323,7 +379,7 @@ mod tests { )); test_vector_should_work(pair, "7a84ca8ce4c37c93c95ecee6a3c0c9a7b9c225093cf2f12dc4f69cbfb847ef9424a18f5755d5a742247d386ff2aabb806bcf160eff31293ea9616976628f77266c8a8cc1d8753be04197bd6cdd8c5c87a148f782c4c1568d599b48833fd539001e580cff64bbc71850605433fcd051f3afc3b74819786f815ffb5272030a8d03e5df61e6183f8fd8ea85f26defa83400", - "d1e3013161991e142d8751017d4996209c2ff8a9ee160f373733eda3b4b785ba6edce9f45f87104bbe07aa6aa6eb2780aa705efb2c13d3b317d6409d159d23bdc7cdd5c2a832d1551cf49d811d49c901495e527dbd532e3a462335ce2686009104aba7bc11c5b22be78f3198d2727a0b" + "124571b4bf23083b5d07e720fde0a984d4d592868156ece77487e97a1ba4b29397dbdc454f13e3aed1ad4b6a99af2501c68ab88ec0495f962a4f55c7c460275a8d356cfa344c27778ca4c641bd9a3604ce5c28f9ed566e1d29bf3b5d3591e46ae28be3ece035e8e4db53a40fc5826002" ) } @@ -334,7 +390,7 @@ mod tests { )); test_vector_should_work(pair, "88ff6c3a32542bc85f2adf1c490a929b7fcee50faeb95af9a036349390e9b3ea7326247c4fc4ebf88050688fd6265de0806284eec09ba0949f5df05dc93a787a14509749f36e4a0981bb748d953435483740907bb5c2fe8ffd97e8509e1a038b05fb08488db628ea0638b8d48c3ddf62ed437edd8b23d5989d6c65820fc70f80fb39b486a3766813e021124aec29a566", - "8c29473f44ac4f0a8ac4dc8c8da09adf9d2faa2dbe0cfdce3ce7c920714196a1b7bf48dc05048e453c161ebc2db9f44fae060b3be77e14e66d1a5262f14d3da0c3a18e650018761a7402b31abc7dd803d466bdcb71bc28c77eb73c610cbff53c00130b79116831e520a04a8ef6630e6f" + "8f4fe16cbb1b7f26ddbfbcde864a3c2f68802fbca5bd59920a135ed7e0f74cd9ba160e61c85e9acee3b4fe277862f226e60ac1958b57ed4487daf4673af420e8bf036ee8169190a927ede2e8eb3d6600633c69b2a84eb017473988fdfde082e150cbef05b77018c1f8ccc06da9e80421" ) } @@ -347,7 +403,7 @@ mod tests { .unwrap(); test_vector_should_work(pair, "7a84ca8ce4c37c93c95ecee6a3c0c9a7b9c225093cf2f12dc4f69cbfb847ef9424a18f5755d5a742247d386ff2aabb806bcf160eff31293ea9616976628f77266c8a8cc1d8753be04197bd6cdd8c5c87a148f782c4c1568d599b48833fd539001e580cff64bbc71850605433fcd051f3afc3b74819786f815ffb5272030a8d03e5df61e6183f8fd8ea85f26defa83400", - "d1e3013161991e142d8751017d4996209c2ff8a9ee160f373733eda3b4b785ba6edce9f45f87104bbe07aa6aa6eb2780aa705efb2c13d3b317d6409d159d23bdc7cdd5c2a832d1551cf49d811d49c901495e527dbd532e3a462335ce2686009104aba7bc11c5b22be78f3198d2727a0b" + "124571b4bf23083b5d07e720fde0a984d4d592868156ece77487e97a1ba4b29397dbdc454f13e3aed1ad4b6a99af2501c68ab88ec0495f962a4f55c7c460275a8d356cfa344c27778ca4c641bd9a3604ce5c28f9ed566e1d29bf3b5d3591e46ae28be3ece035e8e4db53a40fc5826002" ) } @@ -360,7 +416,7 @@ mod tests { .unwrap(); test_vector_should_work(pair, "88ff6c3a32542bc85f2adf1c490a929b7fcee50faeb95af9a036349390e9b3ea7326247c4fc4ebf88050688fd6265de0806284eec09ba0949f5df05dc93a787a14509749f36e4a0981bb748d953435483740907bb5c2fe8ffd97e8509e1a038b05fb08488db628ea0638b8d48c3ddf62ed437edd8b23d5989d6c65820fc70f80fb39b486a3766813e021124aec29a566", - "8c29473f44ac4f0a8ac4dc8c8da09adf9d2faa2dbe0cfdce3ce7c920714196a1b7bf48dc05048e453c161ebc2db9f44fae060b3be77e14e66d1a5262f14d3da0c3a18e650018761a7402b31abc7dd803d466bdcb71bc28c77eb73c610cbff53c00130b79116831e520a04a8ef6630e6f" + "8f4fe16cbb1b7f26ddbfbcde864a3c2f68802fbca5bd59920a135ed7e0f74cd9ba160e61c85e9acee3b4fe277862f226e60ac1958b57ed4487daf4673af420e8bf036ee8169190a927ede2e8eb3d6600633c69b2a84eb017473988fdfde082e150cbef05b77018c1f8ccc06da9e80421" ) } @@ -524,6 +580,7 @@ mod tests { fn signature_serialization_works_for_bls381() { signature_serialization_works::(); } + fn signature_serialization_doesnt_panic() { fn deserialize_signature( text: &str, @@ -544,4 +601,55 @@ mod tests { fn signature_serialization_doesnt_panic_for_bls381() { signature_serialization_doesnt_panic::(); } + + fn must_generate_proof_of_possession() { + let mut pair = Pair::::from_seed(b"12345678901234567890123456789012"); + pair.generate_proof_of_possession(); + } + + #[test] + fn must_generate_proof_of_possession_for_bls377() { + must_generate_proof_of_possession::(); + } + + #[test] + fn must_generate_proof_of_possession_for_bls381() { + must_generate_proof_of_possession::(); + } + + fn good_proof_of_possession_must_verify() { + let mut pair = Pair::::from_seed(b"12345678901234567890123456789012"); + let proof_of_possession = pair.generate_proof_of_possession(); + assert!(Pair::::verify_proof_of_possession(&proof_of_possession, &pair.public())); + } + + #[test] + fn good_proof_of_possession_must_verify_for_bls377() { + good_proof_of_possession_must_verify::(); + } + + #[test] + fn good_proof_of_possession_must_verify_for_bls381() { + good_proof_of_possession_must_verify::(); + } + + fn proof_of_possession_must_fail_if_prover_does_not_possess_secret_key() { + let mut pair = Pair::::from_seed(b"12345678901234567890123456789012"); + let other_pair = Pair::::from_seed(b"23456789012345678901234567890123"); + let proof_of_possession = pair.generate_proof_of_possession(); + assert_eq!( + Pair::::verify_proof_of_possession(&proof_of_possession, &other_pair.public()), + false + ); + } + + #[test] + fn proof_of_possession_must_fail_if_prover_does_not_possess_secret_key_for_bls377() { + proof_of_possession_must_fail_if_prover_does_not_possess_secret_key::(); + } + + #[test] + fn proof_of_possession_must_fail_if_prover_does_not_possess_secret_key_for_bls381() { + proof_of_possession_must_fail_if_prover_does_not_possess_secret_key::(); + } } diff --git a/substrate/primitives/core/src/ecdsa.rs b/substrate/primitives/core/src/ecdsa.rs index d11811ff2af65..5a7590e05a795 100644 --- a/substrate/primitives/core/src/ecdsa.rs +++ b/substrate/primitives/core/src/ecdsa.rs @@ -17,9 +17,12 @@ //! Simple ECDSA secp256k1 API. -use crate::crypto::{ - CryptoType, CryptoTypeId, DeriveError, DeriveJunction, Pair as TraitPair, PublicBytes, - SecretStringError, SignatureBytes, +use crate::{ + crypto::{ + CryptoType, CryptoTypeId, DeriveError, DeriveJunction, Pair as TraitPair, PublicBytes, + SecretStringError, SignatureBytes, + }, + proof_of_possession::NonAggregatable, }; #[cfg(not(feature = "std"))] @@ -326,12 +329,17 @@ impl CryptoType for Pair { type Pair = Pair; } +impl NonAggregatable for Pair {} + #[cfg(test)] mod test { use super::*; - use crate::crypto::{ - set_default_ss58_version, PublicError, Ss58AddressFormat, Ss58AddressFormatRegistry, - Ss58Codec, DEV_PHRASE, + use crate::{ + crypto::{ + set_default_ss58_version, PublicError, Ss58AddressFormat, Ss58AddressFormatRegistry, + Ss58Codec, DEV_PHRASE, + }, + proof_of_possession::{ProofOfPossessionGenerator, ProofOfPossessionVerifier}, }; use serde_json; @@ -629,4 +637,16 @@ mod test { let key = sig.recover_prehashed(&msg).unwrap(); assert_ne!(pair.public(), key); } + + #[test] + fn good_proof_of_possession_should_work_bad_proof_of_possession_should_fail() { + let mut pair = Pair::from_seed(b"12345678901234567890123456789012"); + let other_pair = Pair::from_seed(b"23456789012345678901234567890123"); + let proof_of_possession = pair.generate_proof_of_possession(); + assert!(Pair::verify_proof_of_possession(&proof_of_possession, &pair.public())); + assert_eq!( + Pair::verify_proof_of_possession(&proof_of_possession, &other_pair.public()), + false + ); + } } diff --git a/substrate/primitives/core/src/ed25519.rs b/substrate/primitives/core/src/ed25519.rs index 401f9a39d5673..4e46e8a0664af 100644 --- a/substrate/primitives/core/src/ed25519.rs +++ b/substrate/primitives/core/src/ed25519.rs @@ -17,9 +17,12 @@ //! Simple Ed25519 API. -use crate::crypto::{ - ByteArray, CryptoType, CryptoTypeId, DeriveError, DeriveJunction, Pair as TraitPair, - PublicBytes, SecretStringError, SignatureBytes, +use crate::{ + crypto::{ + ByteArray, CryptoType, CryptoTypeId, DeriveError, DeriveJunction, Pair as TraitPair, + PublicBytes, SecretStringError, SignatureBytes, + }, + proof_of_possession::NonAggregatable, }; use ed25519_zebra::{SigningKey, VerificationKey}; @@ -153,12 +156,17 @@ impl CryptoType for Pair { type Pair = Pair; } +impl NonAggregatable for Pair {} + #[cfg(test)] mod tests { use super::*; #[cfg(feature = "serde")] use crate::crypto::Ss58Codec; - use crate::crypto::DEV_PHRASE; + use crate::{ + crypto::DEV_PHRASE, + proof_of_possession::{ProofOfPossessionGenerator, ProofOfPossessionVerifier}, + }; use serde_json; #[test] @@ -328,4 +336,16 @@ mod tests { // Poorly-sized assert!(deserialize_signature("\"abc123\"").is_err()); } + + #[test] + fn good_proof_of_possession_should_work_bad_proof_of_possession_should_fail() { + let mut pair = Pair::from_seed(b"12345678901234567890123456789012"); + let other_pair = Pair::from_seed(b"23456789012345678901234567890123"); + let proof_of_possession = pair.generate_proof_of_possession(); + assert!(Pair::verify_proof_of_possession(&proof_of_possession, &pair.public())); + assert_eq!( + Pair::verify_proof_of_possession(&proof_of_possession, &other_pair.public()), + false + ); + } } diff --git a/substrate/primitives/core/src/lib.rs b/substrate/primitives/core/src/lib.rs index 0357bab854f05..fc5faa6a6f233 100644 --- a/substrate/primitives/core/src/lib.rs +++ b/substrate/primitives/core/src/lib.rs @@ -64,6 +64,7 @@ pub mod hash; #[cfg(not(substrate_runtime))] mod hasher; pub mod offchain; +pub mod proof_of_possession; pub mod testing; #[cfg(not(substrate_runtime))] pub mod traits; diff --git a/substrate/primitives/core/src/paired_crypto.rs b/substrate/primitives/core/src/paired_crypto.rs index bf5b26366571a..321726f213e10 100644 --- a/substrate/primitives/core/src/paired_crypto.rs +++ b/substrate/primitives/core/src/paired_crypto.rs @@ -24,6 +24,8 @@ use crate::crypto::{ PublicBytes, SecretStringError, Signature as SignatureT, SignatureBytes, UncheckedFrom, }; +use crate::proof_of_possession::{ProofOfPossessionGenerator, ProofOfPossessionVerifier}; + use alloc::vec::Vec; /// ECDSA and BLS12-377 paired crypto scheme @@ -139,9 +141,12 @@ pub mod ecdsa_bls381 { /// An identifier used to match public keys against BLS12-381 keys pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"ecb8"); - const PUBLIC_KEY_LEN: usize = + /// Aggregate public key size. + pub const PUBLIC_KEY_LEN: usize = ecdsa::PUBLIC_KEY_SERIALIZED_SIZE + bls381::PUBLIC_KEY_SERIALIZED_SIZE; - const SIGNATURE_LEN: usize = + + /// Aggregate signature serialized size. + pub const SIGNATURE_LEN: usize = ecdsa::SIGNATURE_SERIALIZED_SIZE + bls381::SIGNATURE_SERIALIZED_SIZE; #[doc(hidden)] @@ -387,6 +392,82 @@ where } } +impl< + LeftPair: PairT + ProofOfPossessionGenerator, + RightPair: PairT + ProofOfPossessionGenerator, + const PUBLIC_KEY_LEN: usize, + const SIGNATURE_LEN: usize, + SubTag: PairedCryptoSubTagBound, + > ProofOfPossessionGenerator for Pair +where + Pair: CryptoType, + Public: PublicT, + Signature: SignatureT, + LeftPair::Seed: From + Into, + RightPair::Seed: From + Into, +{ + #[cfg(feature = "full_crypto")] + fn generate_proof_of_possession(&mut self) -> Self::Signature { + let mut raw: [u8; SIGNATURE_LEN] = [0u8; SIGNATURE_LEN]; + + raw.copy_from_slice( + [ + self.left.generate_proof_of_possession().to_raw_vec(), + self.right.generate_proof_of_possession().to_raw_vec(), + ] + .concat() + .as_slice(), + ); + Self::Signature::unchecked_from(raw) + } +} + +/// This requires that the proof_of_possession of LEFT is of LeftPair::Signature. +/// This is the case for current implemented cases but does not +/// holds in general. +impl< + LeftPair: PairT + ProofOfPossessionVerifier, + RightPair: PairT + ProofOfPossessionVerifier, + const PUBLIC_KEY_LEN: usize, + const SIGNATURE_LEN: usize, + SubTag: PairedCryptoSubTagBound, + > ProofOfPossessionVerifier for Pair +where + Pair: CryptoType, + Public: PublicT, + Signature: SignatureT, + LeftPair::Seed: From + Into, + RightPair::Seed: From + Into, +{ + fn verify_proof_of_possession( + proof_of_possession: &Self::Signature, + allegedly_possessed_pubkey: &Self::Public, + ) -> bool { + let Ok(left_pub) = allegedly_possessed_pubkey.0[..LeftPair::Public::LEN].try_into() else { + return false + }; + let Ok(left_proof_of_possession) = + proof_of_possession.0[0..LeftPair::Signature::LEN].try_into() + else { + return false + }; + + if !LeftPair::verify_proof_of_possession(&left_proof_of_possession, &left_pub) { + return false + } + + let Ok(right_pub) = allegedly_possessed_pubkey.0[LeftPair::Public::LEN..].try_into() else { + return false + }; + let Ok(right_proof_of_possession) = + proof_of_possession.0[LeftPair::Signature::LEN..].try_into() + else { + return false + }; + RightPair::verify_proof_of_possession(&right_proof_of_possession, &right_pub) + } +} + // Test set exercising the (ECDSA,BLS12-377) implementation #[cfg(all(test, feature = "bls-experimental"))] mod tests { @@ -475,7 +556,7 @@ mod tests { ); let message = b""; let signature = - array_bytes::hex2array_unchecked("3dde91174bd9359027be59a428b8146513df80a2a3c7eda2194f64de04a69ab97b753169e94db6ffd50921a2668a48b94ca11e3d32c1ff19cfe88890aa7e8f3c00d1e3013161991e142d8751017d4996209c2ff8a9ee160f373733eda3b4b785ba6edce9f45f87104bbe07aa6aa6eb2780aa705efb2c13d3b317d6409d159d23bdc7cdd5c2a832d1551cf49d811d49c901495e527dbd532e3a462335ce2686009104aba7bc11c5b22be78f3198d2727a0b" + array_bytes::hex2array_unchecked("3dde91174bd9359027be59a428b8146513df80a2a3c7eda2194f64de04a69ab97b753169e94db6ffd50921a2668a48b94ca11e3d32c1ff19cfe88890aa7e8f3c00124571b4bf23083b5d07e720fde0a984d4d592868156ece77487e97a1ba4b29397dbdc454f13e3aed1ad4b6a99af2501c68ab88ec0495f962a4f55c7c460275a8d356cfa344c27778ca4c641bd9a3604ce5c28f9ed566e1d29bf3b5d3591e46ae28be3ece035e8e4db53a40fc5826002" ); let signature = Signature::unchecked_from(signature); assert!(pair.sign(&message[..]) == signature); @@ -499,7 +580,7 @@ mod tests { ); let message = b""; let signature = - array_bytes::hex2array_unchecked("3dde91174bd9359027be59a428b8146513df80a2a3c7eda2194f64de04a69ab97b753169e94db6ffd50921a2668a48b94ca11e3d32c1ff19cfe88890aa7e8f3c00d1e3013161991e142d8751017d4996209c2ff8a9ee160f373733eda3b4b785ba6edce9f45f87104bbe07aa6aa6eb2780aa705efb2c13d3b317d6409d159d23bdc7cdd5c2a832d1551cf49d811d49c901495e527dbd532e3a462335ce2686009104aba7bc11c5b22be78f3198d2727a0b" + array_bytes::hex2array_unchecked("3dde91174bd9359027be59a428b8146513df80a2a3c7eda2194f64de04a69ab97b753169e94db6ffd50921a2668a48b94ca11e3d32c1ff19cfe88890aa7e8f3c00124571b4bf23083b5d07e720fde0a984d4d592868156ece77487e97a1ba4b29397dbdc454f13e3aed1ad4b6a99af2501c68ab88ec0495f962a4f55c7c460275a8d356cfa344c27778ca4c641bd9a3604ce5c28f9ed566e1d29bf3b5d3591e46ae28be3ece035e8e4db53a40fc5826002" ); let signature = Signature::unchecked_from(signature); assert!(pair.sign(&message[..]) == signature); @@ -628,4 +709,16 @@ mod tests { let decoded_signature = Signature::decode(&mut encoded_signature.as_slice()).unwrap(); assert_eq!(signature, decoded_signature) } + + #[test] + fn good_proof_of_possession_should_work_bad_proof_of_possession_should_fail() { + let mut pair = Pair::from_seed(b"12345678901234567890123456789012"); + let other_pair = Pair::from_seed(b"23456789012345678901234567890123"); + let proof_of_possession = pair.generate_proof_of_possession(); + assert!(Pair::verify_proof_of_possession(&proof_of_possession, &pair.public())); + assert_eq!( + Pair::verify_proof_of_possession(&proof_of_possession, &other_pair.public()), + false + ); + } } diff --git a/substrate/primitives/core/src/proof_of_possession.rs b/substrate/primitives/core/src/proof_of_possession.rs new file mode 100644 index 0000000000000..b1df00e1dc0c3 --- /dev/null +++ b/substrate/primitives/core/src/proof_of_possession.rs @@ -0,0 +1,128 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Utilities for proving possession of a particular public key + +use crate::crypto::{CryptoType, Pair}; +use sp_std::vec::Vec; + +/// Pair which is able to generate proof of possession. +/// +/// This is implemented in different trait to provide default behavior. +pub trait ProofOfPossessionGenerator: Pair +where + Self::Public: CryptoType, +{ + /// Generate proof of possession. + /// + /// The proof of possession generator is supposed to + /// produce a "signature" with unique hash context that should + /// never be used in other signatures. This proves that + /// the secret key is known to the prover. While prevent + /// malicious actors to trick an honest party to sign an + /// unpossessed public key resulting in a rogue key attack (See: Section 4.3 of + /// - Ristenpart, T., & Yilek, S. (2007). The power of proofs-of-possession: Securing multiparty + /// signatures against rogue-key attacks. In , Annual {{International Conference}} on the + /// {{Theory}} and {{Applications}} of {{Cryptographic Techniques} (pp. 228–245). : Springer). + #[cfg(feature = "full_crypto")] + fn generate_proof_of_possession(&mut self) -> Self::Signature; +} + +/// Pair which is able to verify proof of possession. +/// +/// While you don't need a keypair to verify a proof of possession (you only need a public key) +/// we constrain on Pair to use the Public and Signature types associated to Pair. +/// This is implemented in different trait (than Public Key) to provide default behavior. +pub trait ProofOfPossessionVerifier: Pair +where + Self::Public: CryptoType, +{ + /// Verify proof of possession. + /// + /// The proof of possession verifier is supposed to to verify a signature with unique hash + /// context that is produced solely for this reason. This proves that that the secret key is + /// known to the prover. + fn verify_proof_of_possession( + proof_of_possession: &Self::Signature, + allegedly_possessesd_pubkey: &Self::Public, + ) -> bool; +} + +/// Marker trait to identify whether the scheme is not aggregatable. +/// +/// Aggregatable schemes may change/optimize implementation parts such as Proof Of Possession +/// or other specifics. +/// +/// This is specifically because implementation of proof of possession for aggregatable schemes +/// is security critical. +/// +/// We would like to prevent aggregatable scheme from unknowingly generating signatures +/// which aggregate to false albeit valid proof of possession aka rogue key attack. +/// We ensure that by separating signing and generating proof_of_possession at the API level. +/// +/// Rogue key attack however is not immediately applicable to non-aggregatable scheme +/// when even if an honest signing oracle is tricked to sign a rogue proof_of_possession, it is not +/// possible to aggregate it to generate a valid proof for a key the attack does not +/// possess. Therefore we do not require non-aggregatable schemes to prevent proof_of_possession +/// confirming signatures at API level +pub trait NonAggregatable: Pair { + /// Default proof_of_possession statement. + fn proof_of_possession_statement(pk: &impl crate::Public) -> Vec { + /// The context which attached to pop message to attest its purpose. + const PROOF_OF_POSSESSION_CONTEXT_TAG: &[u8; 4] = b"POP_"; + [PROOF_OF_POSSESSION_CONTEXT_TAG, pk.to_raw_vec().as_slice()].concat() + } +} + +impl ProofOfPossessionVerifier for T +where + T: NonAggregatable, +{ + /// Default implementation for non-aggregatable signatures. + /// + /// While we enforce hash context separation at the library level in aggregatable schemes, + /// it remains as an advisory for the default implementation using signature API used for + /// non-aggregatable schemes + fn verify_proof_of_possession( + proof_of_possession: &Self::Signature, + allegedly_possessesd_pubkey: &Self::Public, + ) -> bool { + let proof_of_possession_statement = + Self::proof_of_possession_statement(allegedly_possessesd_pubkey); + Self::verify( + &proof_of_possession, + proof_of_possession_statement, + allegedly_possessesd_pubkey, + ) + } +} + +impl ProofOfPossessionGenerator for T +where + T: NonAggregatable, +{ + /// Default implementation for non-aggregatable signatures. + /// + /// While we enforce hash context separation at the library level in aggregatable schemes, + /// it remains as an advisory for the default implementation using signature API used for + /// non-aggregatable schemes + #[cfg(feature = "full_crypto")] + fn generate_proof_of_possession(&mut self) -> Self::Signature { + let proof_of_possession_statement = Self::proof_of_possession_statement(&self.public()); + self.sign(proof_of_possession_statement.as_slice()) + } +} diff --git a/substrate/primitives/core/src/sr25519.rs b/substrate/primitives/core/src/sr25519.rs index 5541b0d0e8281..e65892e2495fb 100644 --- a/substrate/primitives/core/src/sr25519.rs +++ b/substrate/primitives/core/src/sr25519.rs @@ -22,9 +22,11 @@ #[cfg(feature = "serde")] use crate::crypto::Ss58Codec; -use crate::crypto::{ - CryptoBytes, DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError, +use crate::{ + crypto::{CryptoBytes, DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError}, + proof_of_possession::NonAggregatable, }; + use alloc::vec::Vec; #[cfg(feature = "full_crypto")] use schnorrkel::signing_context; @@ -296,6 +298,8 @@ impl CryptoType for Pair { type Pair = Pair; } +impl NonAggregatable for Pair {} + /// Schnorrkel VRF related types and operations. pub mod vrf { use super::*; @@ -587,6 +591,7 @@ mod tests { use super::{vrf::*, *}; use crate::{ crypto::{Ss58Codec, VrfPublic, VrfSecret, DEV_ADDRESS, DEV_PHRASE}, + proof_of_possession::{ProofOfPossessionGenerator, ProofOfPossessionVerifier}, ByteArray as _, }; use serde_json; @@ -915,4 +920,13 @@ mod tests { assert!(public.vrf_verify(&data, &signature2)); assert_eq!(signature.pre_output, signature2.pre_output); } + + #[test] + fn good_proof_of_possession_should_work_bad_proof_of_possession_should_fail() { + let mut pair = Pair::from_seed(b"12345678901234567890123456789012"); + let other_pair = Pair::from_seed(b"23456789012345678901234567890123"); + let proof_of_possession = pair.generate_proof_of_possession(); + assert!(Pair::verify_proof_of_possession(&proof_of_possession, &pair.public())); + assert!(!Pair::verify_proof_of_possession(&proof_of_possession, &other_pair.public())); + } } diff --git a/substrate/primitives/io/src/lib.rs b/substrate/primitives/io/src/lib.rs index 7b94a87e59a4d..e9f0dae59a07e 100644 --- a/substrate/primitives/io/src/lib.rs +++ b/substrate/primitives/io/src/lib.rs @@ -1364,8 +1364,25 @@ pub trait Crypto { .expect("`bls381_generate` failed") } - /// Generate an `(ecdsa,bls12-381)` key for the given key type using an optional `seed` and - /// store it in the keystore. + /// Generate a 'bls12-381' Proof Of Possession for the corresponding public key. + /// + /// Returns the Proof Of Possession as an option of the ['bls381::Signature'] type + /// or 'None' if an error occurs. + #[cfg(feature = "bls-experimental")] + fn bls381_generate_proof_of_possession( + &mut self, + id: PassPointerAndReadCopy, + pub_key: PassPointerAndRead<&bls381::Public, 144>, + ) -> AllocateAndReturnByCodec> { + self.extension::() + .expect("No `keystore` associated for the current context!") + .bls381_generate_proof_of_possession(id, pub_key) + .ok() + .flatten() + } + + /// Generate combination `ecdsa & bls12-381` key for the given key type using an optional `seed` + /// and store it in the keystore. /// /// The `seed` needs to be a valid utf8. /// @@ -1401,6 +1418,24 @@ pub trait Crypto { .bandersnatch_generate_new(id, seed) .expect("`bandernatch_generate` failed") } + + /// Sign the given `msg` with the `bandersnatch` key that corresponds to the given public key + /// and key type in the keystore. + /// + /// Returns the signature or `None` if an error occurred. + #[cfg(feature = "bandersnatch-experimental")] + fn bandersnatch_sign( + &mut self, + id: PassPointerAndReadCopy, + pub_key: PassPointerAndRead<&bandersnatch::Public, 32>, + msg: PassFatPointerAndRead<&[u8]>, + ) -> AllocateAndReturnByCodec> { + self.extension::() + .expect("No `keystore` associated for the current context!") + .bandersnatch_sign(id, pub_key, msg) + .ok() + .flatten() + } } /// Interface that provides functions for hashing with different algorithms. diff --git a/substrate/primitives/keystore/src/lib.rs b/substrate/primitives/keystore/src/lib.rs index 42ad2c600d02b..182724ebb0d14 100644 --- a/substrate/primitives/keystore/src/lib.rs +++ b/substrate/primitives/keystore/src/lib.rs @@ -322,6 +322,21 @@ pub trait Keystore: Send + Sync { msg: &[u8], ) -> Result, Error>; + /// Generate a bls381 Proof of Possession for a given public key + /// + /// Receives ['KeyTypeId'] and a ['bls381::Public'] key to be able to map + /// them to a private key that exists in the keystore + /// + /// Returns an ['bls381::Signature'] or 'None' in case the given 'key_type' + /// and 'public' combination doesn't exist in the keystore. + /// An 'Err' will be returned if generating the proof of possession itself failed. + #[cfg(feature = "bls-experimental")] + fn bls381_generate_proof_of_possession( + &self, + key_type: KeyTypeId, + public: &bls381::Public, + ) -> Result, Error>; + /// Generate a (ecdsa,bls381) signature pair for a given message. /// /// Receives [`KeyTypeId`] and a [`ecdsa_bls381::Public`] key to be able to map @@ -620,6 +635,15 @@ impl Keystore for Arc { (**self).bls381_sign(key_type, public, msg) } + #[cfg(feature = "bls-experimental")] + fn bls381_generate_proof_of_possession( + &self, + key_type: KeyTypeId, + public: &bls381::Public, + ) -> Result, Error> { + (**self).bls381_generate_proof_of_possession(key_type, public) + } + #[cfg(feature = "bls-experimental")] fn ecdsa_bls381_sign( &self, diff --git a/substrate/primitives/keystore/src/testing.rs b/substrate/primitives/keystore/src/testing.rs index 7939ee81005a0..1148620850695 100644 --- a/substrate/primitives/keystore/src/testing.rs +++ b/substrate/primitives/keystore/src/testing.rs @@ -22,7 +22,9 @@ use crate::{Error, Keystore, KeystorePtr}; #[cfg(feature = "bandersnatch-experimental")] use sp_core::bandersnatch; #[cfg(feature = "bls-experimental")] -use sp_core::{bls381, ecdsa_bls381, KeccakHasher}; +use sp_core::{ + bls381, ecdsa_bls381, proof_of_possession::ProofOfPossessionGenerator, KeccakHasher, +}; use sp_core::{ crypto::{ByteArray, KeyTypeId, Pair, VrfSecret}, ecdsa, ed25519, sr25519, @@ -57,9 +59,12 @@ impl MemoryKeystore { .read() .get(&key_type) .map(|keys| { - keys.values() - .map(|s| T::from_string(s, None).expect("seed slice is valid")) - .map(|p| p.public()) + keys.iter() + .filter_map(|(raw_pubkey, s)| { + let pair = T::from_string(s, None).expect("seed slice is valid"); + let pubkey = pair.public(); + (pubkey.as_slice() == raw_pubkey).then_some(pubkey) + }) .collect() }) .unwrap_or_default() @@ -122,6 +127,18 @@ impl MemoryKeystore { let pre_output = self.pair::(key_type, public).map(|pair| pair.vrf_pre_output(input)); Ok(pre_output) } + + #[cfg(feature = "bls-experimental")] + fn generate_proof_of_possession( + &self, + key_type: KeyTypeId, + public: &T::Public, + ) -> Result, Error> { + let proof_of_possession = self + .pair::(key_type, public) + .map(|mut pair| pair.generate_proof_of_possession()); + Ok(proof_of_possession) + } } impl Keystore for MemoryKeystore { @@ -298,6 +315,15 @@ impl Keystore for MemoryKeystore { self.sign::(key_type, public, msg) } + #[cfg(feature = "bls-experimental")] + fn bls381_generate_proof_of_possession( + &self, + key_type: KeyTypeId, + public: &bls381::Public, + ) -> Result, Error> { + self.generate_proof_of_possession::(key_type, public) + } + #[cfg(feature = "bls-experimental")] fn ecdsa_bls381_public_keys(&self, key_type: KeyTypeId) -> Vec { self.public_keys::(key_type) @@ -309,7 +335,24 @@ impl Keystore for MemoryKeystore { key_type: KeyTypeId, seed: Option<&str>, ) -> Result { - self.generate_new::(key_type, seed) + let pubkey = self.generate_new::(key_type, seed)?; + + let s: String = self + .keys + .read() + .get(&key_type) + .and_then(|inner| inner.get(pubkey.as_slice()).map(|s| s.to_string())) + .expect("Can Retrieve Seed"); + + // This is done to give the keystore access to individual keys, this is necessary to avoid + // redundant host functions for paired keys and re-use host functions implemented for each + // element of the pair. + self.generate_new::(key_type, Some(&s)) + .expect("seed slice is valid"); + self.generate_new::(key_type, Some(&s)) + .expect("seed slice is valid"); + + Ok(pubkey) } #[cfg(feature = "bls-experimental")] @@ -514,6 +557,63 @@ mod tests { )); } + #[test] + #[cfg(feature = "bls-experimental")] + fn ecdsa_bls381_generate_with_none_works() { + use sp_core::testing::ECDSA_BLS381; + + let store = MemoryKeystore::new(); + let ecdsa_bls381_key = + store.ecdsa_bls381_generate_new(ECDSA_BLS381, None).expect("Can generate key.."); + + let ecdsa_keys = store.ecdsa_public_keys(ECDSA_BLS381); + let bls381_keys = store.bls381_public_keys(ECDSA_BLS381); + let ecdsa_bls381_keys = store.ecdsa_bls381_public_keys(ECDSA_BLS381); + + assert_eq!(ecdsa_keys.len(), 1); + assert_eq!(bls381_keys.len(), 1); + assert_eq!(ecdsa_bls381_keys.len(), 1); + + let ecdsa_key = ecdsa_keys[0]; + let bls381_key = bls381_keys[0]; + + let mut combined_key_raw = [0u8; ecdsa_bls381::PUBLIC_KEY_LEN]; + combined_key_raw[..ecdsa::PUBLIC_KEY_SERIALIZED_SIZE].copy_from_slice(ecdsa_key.as_ref()); + combined_key_raw[ecdsa::PUBLIC_KEY_SERIALIZED_SIZE..].copy_from_slice(bls381_key.as_ref()); + let combined_key = ecdsa_bls381::Public::from_raw(combined_key_raw); + + assert_eq!(combined_key, ecdsa_bls381_key); + } + + #[test] + #[cfg(feature = "bls-experimental")] + fn ecdsa_bls381_generate_with_seed_works() { + use sp_core::testing::ECDSA_BLS381; + + let store = MemoryKeystore::new(); + let ecdsa_bls381_key = store + .ecdsa_bls381_generate_new(ECDSA_BLS381, Some("//Alice")) + .expect("Can generate key.."); + + let ecdsa_keys = store.ecdsa_public_keys(ECDSA_BLS381); + let bls381_keys = store.bls381_public_keys(ECDSA_BLS381); + let ecdsa_bls381_keys = store.ecdsa_bls381_public_keys(ECDSA_BLS381); + + assert_eq!(ecdsa_keys.len(), 1); + assert_eq!(bls381_keys.len(), 1); + assert_eq!(ecdsa_bls381_keys.len(), 1); + + let ecdsa_key = ecdsa_keys[0]; + let bls381_key = bls381_keys[0]; + + let mut combined_key_raw = [0u8; ecdsa_bls381::PUBLIC_KEY_LEN]; + combined_key_raw[..ecdsa::PUBLIC_KEY_SERIALIZED_SIZE].copy_from_slice(ecdsa_key.as_ref()); + combined_key_raw[ecdsa::PUBLIC_KEY_SERIALIZED_SIZE..].copy_from_slice(bls381_key.as_ref()); + let combined_key = ecdsa_bls381::Public::from_raw(combined_key_raw); + + assert_eq!(combined_key, ecdsa_bls381_key); + } + #[test] #[cfg(feature = "bandersnatch-experimental")] fn bandersnatch_vrf_sign() { diff --git a/substrate/primitives/npos-elections/src/lib.rs b/substrate/primitives/npos-elections/src/lib.rs index 01dab56237e62..2d3841747819c 100644 --- a/substrate/primitives/npos-elections/src/lib.rs +++ b/substrate/primitives/npos-elections/src/lib.rs @@ -138,6 +138,10 @@ pub enum Error { TooManyVoters, /// Some bounds were exceeded when converting election types. BoundsExceeded, + /// A duplicate voter was detected. + DuplicateVoter, + /// A duplicate target was detected. + DuplicateTarget, } /// A type which is used in the API of this crate as a numeric weight of a vote, most often the diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs index 6744b96fa910d..48ee1b19fbe27 100644 --- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -20,8 +20,8 @@ use crate::{ generic::{CheckedExtrinsic, ExtrinsicFormat}, traits::{ - self, transaction_extension::TransactionExtension, Checkable, Dispatchable, ExtrinsicLike, - ExtrinsicMetadata, IdentifyAccount, MaybeDisplay, Member, SignaturePayload, + self, transaction_extension::TransactionExtension, Checkable, Dispatchable, ExtrinsicCall, + ExtrinsicLike, ExtrinsicMetadata, IdentifyAccount, MaybeDisplay, Member, SignaturePayload, }, transaction_validity::{InvalidTransaction, TransactionValidityError}, OpaqueExtrinsic, @@ -29,7 +29,10 @@ use crate::{ #[cfg(all(not(feature = "std"), feature = "serde"))] use alloc::format; use alloc::{vec, vec::Vec}; -use codec::{Compact, Decode, DecodeWithMemTracking, Encode, EncodeLike, Error, Input}; +use codec::{ + Compact, CountedInput, Decode, DecodeWithMemLimit, DecodeWithMemTracking, Encode, EncodeLike, + Input, +}; use core::fmt; use scale_info::{build::Fields, meta_type, Path, StaticTypeInfo, Type, TypeInfo, TypeParameter}; use sp_io::hashing::blake2_256; @@ -59,6 +62,9 @@ pub const LEGACY_EXTRINSIC_FORMAT_VERSION: ExtrinsicVersion = 4; /// [UncheckedExtrinsic] implementation. const EXTENSION_VERSION: ExtensionVersion = 0; +/// Maximum decoded heap size for a runtime call (in bytes). +pub const DEFAULT_MAX_CALL_SIZE: usize = 16 * 1024 * 1024; // 16 MiB + /// The `SignaturePayload` of `UncheckedExtrinsic`. pub type UncheckedSignaturePayload = (Address, Signature, Extension); @@ -102,7 +108,7 @@ where Signature: Decode, Extension: Decode, { - fn decode(input: &mut I) -> Result { + fn decode(input: &mut I) -> Result { let version_and_type = input.read_byte()?; let version = version_and_type & VERSION_MASK; @@ -226,7 +232,13 @@ where Signature: DecodeWithMemTracking, Extension: DecodeWithMemTracking) )] -pub struct UncheckedExtrinsic { +pub struct UncheckedExtrinsic< + Address, + Call, + Signature, + Extension, + const MAX_CALL_SIZE: usize = DEFAULT_MAX_CALL_SIZE, +> { /// Information regarding the type of extrinsic this is (inherent or transaction) as well as /// associated extension (`Extension`) data if it's a transaction and a possible signature. pub preamble: Preamble, @@ -295,12 +307,12 @@ impl UncheckedExtrinsic Self { - Self { preamble: Preamble::Bare(EXTRINSIC_FORMAT_VERSION), function } + Self::from_parts(function, Preamble::Bare(EXTRINSIC_FORMAT_VERSION)) } /// New instance of a bare (ne unsigned) extrinsic on extrinsic format version 4. pub fn new_bare_legacy(function: Call) -> Self { - Self { preamble: Preamble::Bare(LEGACY_EXTRINSIC_FORMAT_VERSION), function } + Self::from_parts(function, Preamble::Bare(LEGACY_EXTRINSIC_FORMAT_VERSION)) } /// New instance of an old-school signed transaction on extrinsic format version 4. @@ -310,24 +322,34 @@ impl UncheckedExtrinsic Self { - Self { preamble: Preamble::Signed(signed, signature, tx_ext), function } + Self::from_parts(function, Preamble::Signed(signed, signature, tx_ext)) } /// New instance of an new-school unsigned transaction. pub fn new_transaction(function: Call, tx_ext: Extension) -> Self { - Self { preamble: Preamble::General(EXTENSION_VERSION, tx_ext), function } + Self::from_parts(function, Preamble::General(EXTENSION_VERSION, tx_ext)) } } -impl ExtrinsicLike +impl ExtrinsicLike for UncheckedExtrinsic { + fn is_signed(&self) -> Option { + Some(matches!(self.preamble, Preamble::Signed(..))) + } + fn is_bare(&self) -> bool { matches!(self.preamble, Preamble::Bare(_)) } +} - fn is_signed(&self) -> Option { - Some(matches!(self.preamble, Preamble::Signed(..))) +impl ExtrinsicCall + for UncheckedExtrinsic +{ + type Call = Call; + + fn call(&self) -> &Call { + &self.function } } @@ -412,32 +434,28 @@ impl Decode - for UncheckedExtrinsic +impl Decode + for UncheckedExtrinsic where Address: Decode, Signature: Decode, - Call: Decode, + Call: DecodeWithMemTracking, Extension: Decode, { - fn decode(input: &mut I) -> Result { + fn decode(input: &mut I) -> Result { // This is a little more complicated than usual since the binary format must be compatible // with SCALE's generic `Vec` type. Basically this just means accepting that there // will be a prefix of vector length. let expected_length: Compact = Decode::decode(input)?; - let before_length = input.remaining_len()?; + let mut input = CountedInput::new(input); - let preamble = Decode::decode(input)?; - let function = Decode::decode(input)?; + let preamble = Decode::decode(&mut input)?; + // Adds 1 byte to the `MAX_CALL_SIZE` as the decoding fails exactly at the given value and + // the maximum should be allowed to fit in. + let function = Call::decode_with_mem_limit(&mut input, MAX_CALL_SIZE.saturating_add(1))?; - if let Some((before_length, after_length)) = - input.remaining_len()?.and_then(|a| before_length.map(|b| (b, a))) - { - let length = before_length.saturating_sub(after_length); - - if length != expected_length.0 as usize { - return Err("Invalid length prefix".into()) - } + if input.count() != expected_length.0 as u64 { + return Err("Invalid length prefix".into()) } Ok(Self { preamble, function }) @@ -491,8 +509,8 @@ impl serde: } #[cfg(feature = "serde")] -impl<'a, Address: Decode, Signature: Decode, Call: Decode, Extension: Decode> serde::Deserialize<'a> - for UncheckedExtrinsic +impl<'a, Address: Decode, Signature: Decode, Call: DecodeWithMemTracking, Extension: Decode> + serde::Deserialize<'a> for UncheckedExtrinsic { fn deserialize(de: D) -> Result where @@ -1005,4 +1023,21 @@ mod tests { decoded_old_ux.check(&IdentityLookup::::default()).unwrap(); assert_eq!(new_checked, old_checked); } + + #[test] + fn max_call_heap_size_should_be_checked() { + // Should be able to decode an `UncheckedExtrinsic` that contains a call with + // heap size < `MAX_CALL_HEAP_SIZE` + let ux = Ex::new_bare(vec![0u8; DEFAULT_MAX_CALL_SIZE].into()); + let encoded = ux.encode(); + assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); + + // Otherwise should fail + let ux = Ex::new_bare(vec![0u8; DEFAULT_MAX_CALL_SIZE + 1].into()); + let encoded = ux.encode(); + assert_eq!( + Ex::decode(&mut &encoded[..]).unwrap_err().to_string(), + "Could not decode `FakeDispatchable.0`:\n\tHeap memory limit exceeded while decoding\n" + ); + } } diff --git a/substrate/primitives/runtime/src/testing.rs b/substrate/primitives/runtime/src/testing.rs index 8084ff53e634e..1104435f278a6 100644 --- a/substrate/primitives/runtime/src/testing.rs +++ b/substrate/primitives/runtime/src/testing.rs @@ -124,6 +124,14 @@ impl sp_application_crypto::RuntimeAppPublic for UintAuthorityId { traits::Verify::verify(signature, msg.as_ref(), &self.0) } + fn generate_proof_of_possession(&mut self) -> Option { + None + } + + fn verify_proof_of_possession(&self, _pop: &Self::Signature) -> bool { + false + } + fn to_raw_vec(&self) -> Vec { AsRef::<[u8]>::as_ref(self).to_vec() } diff --git a/substrate/primitives/runtime/src/traits/mod.rs b/substrate/primitives/runtime/src/traits/mod.rs index 0cda70efa8672..b049e54a33341 100644 --- a/substrate/primitives/runtime/src/traits/mod.rs +++ b/substrate/primitives/runtime/src/traits/mod.rs @@ -1402,6 +1402,15 @@ where } } +/// An extrinsic on which we can get access to call. +pub trait ExtrinsicCall: ExtrinsicLike { + /// The type of the call. + type Call; + + /// Get the call of the extrinsic. + fn call(&self) -> &Self::Call; +} + /// Something that acts like a [`SignaturePayload`](Extrinsic::SignaturePayload) of an /// [`Extrinsic`]. pub trait SignaturePayload { @@ -1569,7 +1578,7 @@ impl Dispatchable for () { } /// Dispatchable impl containing an arbitrary value which panics if it actually is dispatched. -#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(Clone, Eq, PartialEq, Encode, Decode, DecodeWithMemTracking, RuntimeDebug, TypeInfo)] pub struct FakeDispatchable(pub Inner); impl From for FakeDispatchable { fn from(inner: Inner) -> Self { diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 7af692b437f63..11a246ba75d41 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -35,6 +35,7 @@ sp-consensus-babe = { features = ["serde"], workspace = true } sp-consensus-grandpa = { features = ["serde"], workspace = true } sp-core = { features = ["serde"], workspace = true } sp-crypto-hashing = { workspace = true } +sp-debug-derive = { workspace = true, default-features = false, features = ["force-debug"] } sp-externalities = { workspace = true } sp-genesis-builder = { workspace = true } sp-inherents = { workspace = true } @@ -98,6 +99,7 @@ std = [ "sp-consensus-grandpa/std", "sp-core/std", "sp-crypto-hashing/std", + "sp-debug-derive/std", "sp-externalities/std", "sp-genesis-builder/std", "sp-inherents/std", @@ -118,3 +120,8 @@ std = [ # Special feature to disable logging disable-logging = ["sp-api/disable-logging"] + +# This feature adds BLS crypto primitives. +# It should not be used in production since the implementation and interface may still +# be subject to significant changes. +bls-experimental = ["sp-application-crypto/bls-experimental"] diff --git a/substrate/test-utils/runtime/client/Cargo.toml b/substrate/test-utils/runtime/client/Cargo.toml index 5dd3c304f4a8e..8aa87aad2859b 100644 --- a/substrate/test-utils/runtime/client/Cargo.toml +++ b/substrate/test-utils/runtime/client/Cargo.toml @@ -26,3 +26,6 @@ sp-core = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } substrate-test-client = { workspace = true } substrate-test-runtime = { workspace = true } + +[features] +bls-experimental = ["substrate-test-runtime/bls-experimental"] diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index 5636007136338..34e5facbf6532 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -50,6 +50,10 @@ use sp_application_crypto::Ss58Codec; use sp_keyring::Sr25519Keyring; use sp_application_crypto::{ecdsa, ed25519, sr25519, RuntimeAppPublic}; + +#[cfg(feature = "bls-experimental")] +use sp_application_crypto::{bls381, ecdsa_bls381}; + use sp_core::{OpaqueMetadata, RuntimeDebug}; use sp_trie::{ trie_types::{TrieDBBuilder, TrieDBMutBuilderV1}, @@ -184,6 +188,27 @@ pub type Header = sp_runtime::generic::Header; /// Balance of an account. pub type Balance = u64; +#[cfg(feature = "bls-experimental")] +mod bls { + use sp_application_crypto::{bls381, ecdsa_bls381}; + pub type Bls381Public = bls381::AppPublic; + pub type Bls381Pop = bls381::AppSignature; + pub type EcdsaBls381Public = ecdsa_bls381::AppPublic; + pub type EcdsaBls381Pop = ecdsa_bls381::AppSignature; +} +#[cfg(not(feature = "bls-experimental"))] +mod bls { + pub type Bls381Public = (); + pub type Bls381Pop = (); + pub type EcdsaBls381Public = (); + pub type EcdsaBls381Pop = (); +} +pub use bls::*; + +pub type EcdsaPop = ecdsa::AppSignature; +pub type Sr25519Pop = sr25519::AppSignature; +pub type Ed25519Pop = ed25519::AppSignature; + decl_runtime_apis! { #[api_version(2)] pub trait TestAPI { @@ -209,19 +234,26 @@ decl_runtime_apis! { fn vec_with_capacity(size: u32) -> Vec; /// Returns the initialized block number. fn get_block_number() -> u64; - /// Test that `ed25519` crypto works in the runtime. /// - /// Returns the signature generated for the message `ed25519` and the public key. - fn test_ed25519_crypto() -> (ed25519::AppSignature, ed25519::AppPublic); + /// Returns the signature generated for the message `ed25519` both the public key and proof of possession. + fn test_ed25519_crypto() -> (ed25519::AppSignature, ed25519::AppPublic, Ed25519Pop); /// Test that `sr25519` crypto works in the runtime. /// - /// Returns the signature generated for the message `sr25519`. - fn test_sr25519_crypto() -> (sr25519::AppSignature, sr25519::AppPublic); + /// Returns the signature generated for the message `sr25519` both the public key and proof of possession. + fn test_sr25519_crypto() -> (sr25519::AppSignature, sr25519::AppPublic, Sr25519Pop); /// Test that `ecdsa` crypto works in the runtime. /// - /// Returns the signature generated for the message `ecdsa`. - fn test_ecdsa_crypto() -> (ecdsa::AppSignature, ecdsa::AppPublic); + /// Returns the signature generated for the message `ecdsa` both the public key and proof of possession. + fn test_ecdsa_crypto() -> (ecdsa::AppSignature, ecdsa::AppPublic, EcdsaPop); + /// Test that `bls381` crypto works in the runtime + /// + /// Returns both the proof of possession and public key. + fn test_bls381_crypto() -> (Bls381Pop, Bls381Public); + /// Test that `ecdsa_bls381_crypto` works in the runtime + /// + /// Returns both the proof of possession and public key. + fn test_ecdsa_bls381_crypto() -> (EcdsaBls381Pop, EcdsaBls381Public); /// Run various tests against storage. fn test_storage(); /// Check a witness. @@ -574,18 +606,38 @@ impl_runtime_apis! { System::block_number() } - fn test_ed25519_crypto() -> (ed25519::AppSignature, ed25519::AppPublic) { + fn test_ed25519_crypto() -> (ed25519::AppSignature, ed25519::AppPublic, Ed25519Pop) { test_ed25519_crypto() } - fn test_sr25519_crypto() -> (sr25519::AppSignature, sr25519::AppPublic) { + fn test_sr25519_crypto() -> (sr25519::AppSignature, sr25519::AppPublic, Sr25519Pop) { test_sr25519_crypto() } - fn test_ecdsa_crypto() -> (ecdsa::AppSignature, ecdsa::AppPublic) { + fn test_ecdsa_crypto() -> (ecdsa::AppSignature, ecdsa::AppPublic, EcdsaPop) { test_ecdsa_crypto() } + #[cfg(feature = "bls-experimental")] + fn test_bls381_crypto() -> (Bls381Pop, Bls381Public) { + test_bls381_crypto() + } + + #[cfg(feature = "bls-experimental")] + fn test_ecdsa_bls381_crypto() -> (EcdsaBls381Pop, EcdsaBls381Public) { + test_ecdsa_bls381_crypto() + } + + #[cfg(not(feature = "bls-experimental"))] + fn test_bls381_crypto() -> (Bls381Pop, Bls381Public) { + ((),()) + } + + #[cfg(not(feature = "bls-experimental"))] + fn test_ecdsa_bls381_crypto() -> (EcdsaBls381Pop, EcdsaBls381Public) { + ((), ()) + } + fn test_storage() { test_read_storage(); test_read_child_storage(); @@ -765,8 +817,8 @@ impl_runtime_apis! { } } -fn test_ed25519_crypto() -> (ed25519::AppSignature, ed25519::AppPublic) { - let public0 = ed25519::AppPublic::generate_pair(None); +fn test_ed25519_crypto() -> (ed25519::AppSignature, ed25519::AppPublic, Ed25519Pop) { + let mut public0 = ed25519::AppPublic::generate_pair(None); let public1 = ed25519::AppPublic::generate_pair(None); let public2 = ed25519::AppPublic::generate_pair(None); @@ -775,13 +827,18 @@ fn test_ed25519_crypto() -> (ed25519::AppSignature, ed25519::AppPublic) { assert!(all.contains(&public1)); assert!(all.contains(&public2)); + let proof_of_possession = public0 + .generate_proof_of_possession() + .expect("Cant generate proof_of_possession for ed25519"); + assert!(public0.verify_proof_of_possession(&proof_of_possession)); + let signature = public0.sign(&"ed25519").expect("Generates a valid `ed25519` signature."); assert!(public0.verify(&"ed25519", &signature)); - (signature, public0) + (signature, public0, proof_of_possession) } -fn test_sr25519_crypto() -> (sr25519::AppSignature, sr25519::AppPublic) { - let public0 = sr25519::AppPublic::generate_pair(None); +fn test_sr25519_crypto() -> (sr25519::AppSignature, sr25519::AppPublic, Sr25519Pop) { + let mut public0 = sr25519::AppPublic::generate_pair(None); let public1 = sr25519::AppPublic::generate_pair(None); let public2 = sr25519::AppPublic::generate_pair(None); @@ -790,13 +847,18 @@ fn test_sr25519_crypto() -> (sr25519::AppSignature, sr25519::AppPublic) { assert!(all.contains(&public1)); assert!(all.contains(&public2)); + let proof_of_possession = public0 + .generate_proof_of_possession() + .expect("Cant generate proof_of_possession for sr25519"); + assert!(public0.verify_proof_of_possession(&proof_of_possession)); + let signature = public0.sign(&"sr25519").expect("Generates a valid `sr25519` signature."); assert!(public0.verify(&"sr25519", &signature)); - (signature, public0) + (signature, public0, proof_of_possession) } -fn test_ecdsa_crypto() -> (ecdsa::AppSignature, ecdsa::AppPublic) { - let public0 = ecdsa::AppPublic::generate_pair(None); +fn test_ecdsa_crypto() -> (ecdsa::AppSignature, ecdsa::AppPublic, EcdsaPop) { + let mut public0 = ecdsa::AppPublic::generate_pair(None); let public1 = ecdsa::AppPublic::generate_pair(None); let public2 = ecdsa::AppPublic::generate_pair(None); @@ -805,10 +867,39 @@ fn test_ecdsa_crypto() -> (ecdsa::AppSignature, ecdsa::AppPublic) { assert!(all.contains(&public1)); assert!(all.contains(&public2)); + let proof_of_possession = public0 + .generate_proof_of_possession() + .expect("Cant generate proof_of_possession for ecdsa"); + assert!(public0.verify_proof_of_possession(&proof_of_possession)); + let signature = public0.sign(&"ecdsa").expect("Generates a valid `ecdsa` signature."); assert!(public0.verify(&"ecdsa", &signature)); - (signature, public0) + (signature, public0, proof_of_possession) +} + +#[cfg(feature = "bls-experimental")] +fn test_bls381_crypto() -> (Bls381Pop, Bls381Public) { + let mut public0 = bls381::AppPublic::generate_pair(None); + + let proof_of_possession = public0 + .generate_proof_of_possession() + .expect("Cant generate proof_of_possession for bls381"); + assert!(public0.verify_proof_of_possession(&proof_of_possession)); + + (proof_of_possession, public0) +} + +#[cfg(feature = "bls-experimental")] +fn test_ecdsa_bls381_crypto() -> (EcdsaBls381Pop, EcdsaBls381Public) { + let mut public0 = ecdsa_bls381::AppPublic::generate_pair(None); + + let proof_of_possession = public0 + .generate_proof_of_possession() + .expect("Cant Generate proof_of_possession for ecdsa_bls381"); + assert!(public0.verify_proof_of_possession(&proof_of_possession)); + + (proof_of_possession, public0) } fn test_read_storage() { diff --git a/substrate/utils/frame/benchmarking-cli/Cargo.toml b/substrate/utils/frame/benchmarking-cli/Cargo.toml index 540a08a98953d..b903175a38149 100644 --- a/substrate/utils/frame/benchmarking-cli/Cargo.toml +++ b/substrate/utils/frame/benchmarking-cli/Cargo.toml @@ -25,6 +25,7 @@ comfy-table = { workspace = true } cumulus-client-parachain-inherent = { workspace = true, default-features = true } cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } frame-benchmarking = { workspace = true, default-features = true } +frame-storage-access-test-runtime = { workspace = true, default-features = true } frame-support = { workspace = true, default-features = true } frame-system = { workspace = true, default-features = true } gethostname = { workspace = true } @@ -42,6 +43,8 @@ sc-cli = { workspace = true, default-features = false } sc-client-api = { workspace = true, default-features = true } sc-client-db = { workspace = true, default-features = false } sc-executor = { workspace = true, default-features = true } +sc-executor-common = { workspace = true } +sc-executor-wasmtime = { workspace = true } sc-runtime-utilities = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = false } sc-sysinfo = { workspace = true, default-features = true } @@ -79,6 +82,7 @@ westend-runtime = { workspace = true, default-features = true } default = [] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", + "frame-storage-access-test-runtime/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs index 65f0fcc32a690..c5546de232b6a 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/remark_builder.rs @@ -117,8 +117,9 @@ impl ExtrinsicBuilder for DynamicRemarkBuilder { let transaction = self .offline_client .tx() - .create_signed_offline(&dynamic_tx, &signer, params) - .unwrap(); + .create_partial_offline(&dynamic_tx, params) + .unwrap() + .sign(&signer); let mut encoded = transaction.into_encoded(); OpaqueExtrinsic::from_bytes(&mut encoded).map_err(|_| "Unable to construct OpaqueExtrinsic") diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs index af9118140d91c..39a2070b7b4ef 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -365,7 +365,8 @@ impl PalletCmd { let mut timer = time::SystemTime::now(); // Maps (pallet, extrinsic) to its component ranges. let mut component_ranges = HashMap::<(String, String), Vec>::new(); - let pov_modes = Self::parse_pov_modes(&benchmarks_to_run)?; + let pov_modes = + Self::parse_pov_modes(&benchmarks_to_run, &storage_info, self.ignore_unknown_pov_mode)?; let mut failed = Vec::<(String, String)>::new(); 'outer: for (i, SelectedBenchmark { pallet, instance, extrinsic, components, .. }) in @@ -469,12 +470,12 @@ impl PalletCmd { "dispatch a benchmark", ) { Err(e) => { - log::error!(target: LOG_TARGET, "Error executing and verifying runtime benchmark: {}", e); + log::error!(target: LOG_TARGET, "Benchmark {pallet}::{extrinsic} failed: {e}"); failed.push((pallet.clone(), extrinsic.clone())); continue 'outer }, Ok(Err(e)) => { - log::error!(target: LOG_TARGET, "Error executing and verifying runtime benchmark: {}", e); + log::error!(target: LOG_TARGET, "Benchmark {pallet}::{extrinsic} failed: {e}"); failed.push((pallet.clone(), extrinsic.clone())); continue 'outer }, @@ -490,7 +491,7 @@ impl PalletCmd { _, >( StateMachine::new( - state, // todo remove tracking + state, &mut Default::default(), &executor, "Benchmark_dispatch_benchmark", @@ -502,12 +503,12 @@ impl PalletCmd { "dispatch a benchmark", ) { Err(e) => { - log::error!(target: LOG_TARGET, "Error executing runtime benchmark: {}", e); + log::error!(target: LOG_TARGET, "Benchmark {pallet}::{extrinsic} failed: {e}"); failed.push((pallet.clone(), extrinsic.clone())); continue 'outer }, Ok(Err(e)) => { - log::error!(target: LOG_TARGET, "Benchmark {pallet}::{extrinsic} failed: {e}",); + log::error!(target: LOG_TARGET, "Benchmark {pallet}::{extrinsic} failed: {e}"); failed.push((pallet.clone(), extrinsic.clone())); continue 'outer }, @@ -537,11 +538,13 @@ impl PalletCmd { "dispatch a benchmark", ) { Err(e) => { - return Err(format!("Error executing runtime benchmark: {e}",).into()); + return Err( + format!("Benchmark {pallet}::{extrinsic} failed: {e}").into() + ); }, Ok(Err(e)) => { return Err( - format!("Benchmark {pallet}::{extrinsic} failed: {e}",).into() + format!("Benchmark {pallet}::{extrinsic} failed: {e}").into() ); }, Ok(Ok(b)) => b, @@ -589,20 +592,11 @@ impl PalletCmd { } fn select_benchmarks_to_run(&self, list: Vec) -> Result> { - let extrinsic = self.extrinsic.clone().unwrap_or_default(); - let extrinsic_split: Vec<&str> = extrinsic.split(',').collect(); - let extrinsics: Vec<_> = extrinsic_split.iter().map(|x| x.trim().as_bytes()).collect(); - // Use the benchmark list and the user input to determine the set of benchmarks to run. let mut benchmarks_to_run = Vec::new(); list.iter().filter(|item| self.pallet_selected(&item.pallet)).for_each(|item| { for benchmark in &item.benchmarks { - let benchmark_name = &benchmark.name; - if extrinsic.is_empty() || - extrinsic.as_bytes() == &b"*"[..] || - extrinsic.as_bytes() == &b"all"[..] || - extrinsics.contains(&&benchmark_name[..]) - { + if self.extrinsic_selected(&item.pallet, &benchmark.name) { benchmarks_to_run.push(( item.pallet.clone(), item.instance.clone(), @@ -638,7 +632,7 @@ impl PalletCmd { .collect(); if benchmarks_to_run.is_empty() { - return Err("No benchmarks found which match your input.".into()) + return Err("No benchmarks found which match your input. Try `--list --all` to list all available benchmarks.".into()) } Ok(benchmarks_to_run) @@ -646,17 +640,54 @@ impl PalletCmd { /// Whether this pallet should be run. fn pallet_selected(&self, pallet: &Vec) -> bool { - let include = self.pallet.clone().unwrap_or_default(); + let include = self.pallets.clone(); let included = include.is_empty() || - include == "*" || - include == "all" || - include.as_bytes() == pallet; + include.iter().any(|p| p.as_bytes() == pallet) || + include.iter().any(|p| p == "*") || + include.iter().any(|p| p == "all"); let excluded = self.exclude_pallets.iter().any(|p| p.as_bytes() == pallet); included && !excluded } + /// Whether this extrinsic should be run. + fn extrinsic_selected(&self, pallet: &Vec, extrinsic: &Vec) -> bool { + if !self.pallet_selected(pallet) { + return false; + } + + let extrinsic_filter = self.extrinsic.clone().unwrap_or_default(); + let extrinsic_split: Vec<&str> = extrinsic_filter.split(',').collect(); + let extrinsics: Vec<_> = extrinsic_split.iter().map(|x| x.trim().as_bytes()).collect(); + + let included = extrinsic_filter.is_empty() || + extrinsic_filter == "*" || + extrinsics.contains(&&extrinsic[..]); + + let excluded = self + .excluded_extrinsics() + .iter() + .any(|(p, e)| p.as_bytes() == pallet && e.as_bytes() == extrinsic); + + included && !excluded + } + + /// All `(pallet, extrinsic)` tuples that are excluded from the benchmarks. + fn excluded_extrinsics(&self) -> Vec<(String, String)> { + let mut excluded = Vec::new(); + + for e in &self.exclude_extrinsics { + let splits = e.split("::").collect::>(); + if splits.len() != 2 { + panic!("Invalid argument for '--exclude-extrinsics'. Expected format: 'pallet::extrinsic' but got '{}'", e); + } + excluded.push((splits[0].to_string(), splits[1].to_string())); + } + + excluded + } + /// Execute a state machine and decode its return value as `R`. fn exec_state_machine( mut machine: StateMachine, H, Exec>, @@ -912,14 +943,20 @@ impl PalletCmd { } /// Parses the PoV modes per benchmark that were specified by the `#[pov_mode]` attribute. - fn parse_pov_modes(benchmarks: &Vec) -> Result { + fn parse_pov_modes( + benchmarks: &Vec, + storage_info: &[StorageInfo], + ignore_unknown_pov_mode: bool, + ) -> Result { use std::collections::hash_map::Entry; let mut parsed = PovModesMap::new(); for SelectedBenchmark { pallet, extrinsic, pov_modes, .. } in benchmarks { for (pallet_storage, mode) in pov_modes { let mode = PovEstimationMode::from_str(&mode)?; + let pallet_storage = pallet_storage.replace(" ", ""); let splits = pallet_storage.split("::").collect::>(); + if splits.is_empty() || splits.len() > 2 { return Err(format!( "Expected 'Pallet::Storage' as storage name but got: {}", @@ -927,7 +964,8 @@ impl PalletCmd { ) .into()) } - let (pov_pallet, pov_storage) = (splits[0], splits.get(1).unwrap_or(&"ALL")); + let (pov_pallet, pov_storage) = + (splits[0].trim(), splits.get(1).unwrap_or(&"ALL").trim()); match parsed .entry((pallet.clone(), extrinsic.clone())) @@ -946,9 +984,43 @@ impl PalletCmd { } } } + log::debug!("Parsed PoV modes: {:?}", parsed); + Self::check_pov_modes(&parsed, storage_info, ignore_unknown_pov_mode)?; + Ok(parsed) } + fn check_pov_modes( + pov_modes: &PovModesMap, + storage_info: &[StorageInfo], + ignore_unknown_pov_mode: bool, + ) -> Result<()> { + // Check that all PoV modes are valid pallet storage keys + for (pallet, storage) in pov_modes.values().flat_map(|i| i.keys()) { + let (mut found_pallet, mut found_storage) = (false, false); + + for info in storage_info { + if pallet == "ALL" || info.pallet_name == pallet.as_bytes() { + found_pallet = true; + } + if storage == "ALL" || info.storage_name == storage.as_bytes() { + found_storage = true; + } + } + if !found_pallet || !found_storage { + let err = format!("The PoV mode references an unknown storage item or pallet: `{}::{}`. You can ignore this warning by specifying `--ignore-unknown-pov-mode`", pallet, storage); + + if ignore_unknown_pov_mode { + log::warn!(target: LOG_TARGET, "Error demoted to warning due to `--ignore-unknown-pov-mode`: {}", err); + } else { + return Err(err.into()); + } + } + } + + Ok(()) + } + /// Sanity check the CLI arguments. fn check_args( &self, diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs index caa999c3a6c58..5cb353059988c 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs @@ -46,9 +46,10 @@ pub enum ListOutput { /// Benchmark the extrinsic weight of FRAME Pallets. #[derive(Debug, clap::Parser)] pub struct PalletCmd { - /// Select a FRAME Pallet to benchmark, or `*` for all (in which case `extrinsic` must be `*`). - #[arg(short, long, value_parser = parse_pallet_name, required_unless_present_any = ["list", "json_input", "all"], default_value_if("all", "true", Some("*".into())))] - pub pallet: Option, + /// Select a FRAME Pallets to benchmark, or `*` for all (in which case `extrinsic` must be + /// `*`). + #[arg(short, long, alias = "pallet", num_args = 1.., value_delimiter = ',', value_parser = parse_pallet_name, required_unless_present_any = ["list", "json_input", "all"], default_value_if("all", "true", Some("*".into())))] + pub pallets: Vec, /// Select an extrinsic inside the pallet to benchmark, or `*` or 'all' for all. #[arg(short, long, required_unless_present_any = ["list", "json_input", "all"], default_value_if("all", "true", Some("*".into())))] @@ -58,6 +59,12 @@ pub struct PalletCmd { #[arg(long, value_parser, num_args = 1.., value_delimiter = ',')] pub exclude_pallets: Vec, + /// Comma separated list of `pallet::extrinsic` combinations that should not be run. + /// + /// Example: `frame_system::remark,pallet_balances::transfer_keep_alive` + #[arg(long, value_parser, num_args = 1.., value_delimiter = ',')] + pub exclude_extrinsics: Vec, + /// Run benchmarks for all pallets and extrinsics. /// /// This is equivalent to running `--pallet * --extrinsic *`. @@ -133,6 +140,10 @@ pub struct PalletCmd { #[arg(long, default_value("max-encoded-len"), value_enum)] pub default_pov_mode: command::PovEstimationMode, + /// Ignore the error when PoV modes reference unknown storage items or pallets. + #[arg(long)] + pub ignore_unknown_pov_mode: bool, + /// Set the heap pages while running benchmarks. If not set, the default value from the client /// is used. #[arg(long)] diff --git a/substrate/utils/frame/benchmarking-cli/src/storage/cmd.rs b/substrate/utils/frame/benchmarking-cli/src/storage/cmd.rs index da791c25d228f..68a81fb51524a 100644 --- a/substrate/utils/frame/benchmarking-cli/src/storage/cmd.rs +++ b/substrate/utils/frame/benchmarking-cli/src/storage/cmd.rs @@ -26,7 +26,7 @@ use sp_runtime::traits::{Block as BlockT, HashingFor}; use sp_state_machine::Storage; use sp_storage::{ChildInfo, ChildType, PrefixedStorageKey, StateVersion}; -use clap::{Args, Parser}; +use clap::{Args, Parser, ValueEnum}; use log::info; use rand::prelude::*; use serde::Serialize; @@ -36,6 +36,16 @@ use std::{fmt::Debug, path::PathBuf, sync::Arc}; use super::template::TemplateData; use crate::shared::{new_rng, HostInfoParams, WeightParams}; +/// The mode in which to run the storage benchmark. +#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Serialize, ValueEnum)] +pub enum StorageBenchmarkMode { + /// Run the benchmark for block import. + #[default] + ImportBlock, + /// Run the benchmark for block validation. + ValidateBlock, +} + /// Benchmark the storage speed of a chain snapshot. #[derive(Debug, Parser)] pub struct StorageCmd { @@ -129,13 +139,36 @@ pub struct StorageParams { #[arg(long, default_value = "false")] pub disable_pov_recorder: bool, - /// The batch size for the write benchmark. + /// The batch size for the read/write benchmark. /// /// Since the write size needs to also include the cost of computing the storage root, which is /// done once at the end of the block, the batch size is used to simulate multiple writes in a /// block. #[arg(long, default_value_t = 100_000)] pub batch_size: usize, + + /// The mode in which to run the storage benchmark. + /// + /// PoV recorder must be activated to provide a storage proof for block validation at runtime. + #[arg(long, value_enum, default_value_t = StorageBenchmarkMode::ImportBlock)] + pub mode: StorageBenchmarkMode, + + /// Number of rounds to execute block validation during the benchmark. + /// + /// We need to run the benchmark several times to avoid fluctuations during runtime setup. + /// This is only used when `mode` is `validate-block`. + #[arg(long, default_value_t = 20)] + pub validate_block_rounds: u32, +} + +impl StorageParams { + pub fn is_import_block_mode(&self) -> bool { + matches!(self.mode, StorageBenchmarkMode::ImportBlock) + } + + pub fn is_validate_block_mode(&self) -> bool { + matches!(self.mode, StorageBenchmarkMode::ValidateBlock) + } } impl StorageCmd { diff --git a/substrate/utils/frame/benchmarking-cli/src/storage/mod.rs b/substrate/utils/frame/benchmarking-cli/src/storage/mod.rs index 188cc5e3d4e41..ef04fde9a8d51 100644 --- a/substrate/utils/frame/benchmarking-cli/src/storage/mod.rs +++ b/substrate/utils/frame/benchmarking-cli/src/storage/mod.rs @@ -21,3 +21,37 @@ pub mod template; pub mod write; pub use cmd::StorageCmd; + +/// Empirically, the maximum batch size for block validation should be no more than 10,000. +/// Bigger sizes may cause problems with runtime memory allocation. +pub(crate) const MAX_BATCH_SIZE_FOR_BLOCK_VALIDATION: usize = 10_000; + +pub(crate) fn get_wasm_module() -> Box { + let blob = sc_executor_common::runtime_blob::RuntimeBlob::uncompress_if_needed( + frame_storage_access_test_runtime::WASM_BINARY + .expect("You need to build the WASM binaries to run the benchmark!"), + ) + .expect("Failed to create runtime blob"); + let config = sc_executor_wasmtime::Config { + allow_missing_func_imports: true, + cache_path: None, + semantics: sc_executor_wasmtime::Semantics { + heap_alloc_strategy: sc_executor_common::wasm_runtime::HeapAllocStrategy::Dynamic { + maximum_pages: Some(4096), + }, + instantiation_strategy: sc_executor::WasmtimeInstantiationStrategy::PoolingCopyOnWrite, + deterministic_stack_limit: None, + canonicalize_nans: false, + parallel_compilation: false, + wasm_multi_value: false, + wasm_bulk_memory: false, + wasm_reference_types: false, + wasm_simd: false, + }, + }; + + Box::new( + sc_executor_wasmtime::create_runtime::(blob, config) + .expect("Unable to create wasm module."), + ) +} diff --git a/substrate/utils/frame/benchmarking-cli/src/storage/read.rs b/substrate/utils/frame/benchmarking-cli/src/storage/read.rs index 126eb815f75b5..7a222d7167a70 100644 --- a/substrate/utils/frame/benchmarking-cli/src/storage/read.rs +++ b/substrate/utils/frame/benchmarking-cli/src/storage/read.rs @@ -15,16 +15,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -use log::info; +use codec::Encode; +use frame_storage_access_test_runtime::StorageAccessParams; +use log::{debug, info}; use rand::prelude::*; use sc_cli::{Error, Result}; use sc_client_api::{Backend as ClientBackend, StorageProvider, UsageProvider}; use sp_api::CallApiAt; use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT}; use sp_state_machine::{backend::AsTrieBackend, Backend}; +use sp_storage::ChildInfo; +use sp_trie::StorageProof; use std::{fmt::Debug, sync::Arc, time::Instant}; -use super::cmd::StorageCmd; +use super::{cmd::StorageCmd, get_wasm_module, MAX_BATCH_SIZE_FOR_BLOCK_VALIDATION}; use crate::shared::{new_rng, BenchRecord}; impl StorageCmd { @@ -41,6 +45,15 @@ impl StorageCmd { BA: ClientBackend, <::Header as HeaderT>::Number: From, { + if self.params.is_validate_block_mode() && self.params.disable_pov_recorder { + return Err("PoV recorder must be activated to provide a storage proof for block validation at runtime. Remove `--disable-pov-recorder` from the command line.".into()) + } + if self.params.is_validate_block_mode() && + self.params.batch_size > MAX_BATCH_SIZE_FOR_BLOCK_VALIDATION + { + return Err(format!("Batch size is too large. This may cause problems with runtime memory allocation. Better set `--batch-size {}` or less.", MAX_BATCH_SIZE_FOR_BLOCK_VALIDATION).into()) + } + let mut record = BenchRecord::default(); let best_hash = client.usage_info().chain.best_hash; @@ -49,6 +62,9 @@ impl StorageCmd { let mut keys: Vec<_> = client.storage_keys(best_hash, None, None)?.collect(); let (mut rng, _) = new_rng(None); keys.shuffle(&mut rng); + if keys.is_empty() { + return Err("Can't process benchmarking with empty storage".into()) + } let mut child_nodes = Vec::new(); // Interesting part here: @@ -57,85 +73,191 @@ impl StorageCmd { // Read using the same TrieBackend and recorder for up to `batch_size` keys. // This would allow us to measure the amortized cost of reading a key. - let recorder = (!self.params.disable_pov_recorder).then(|| Default::default()); - let mut state = client + let state = client .state_at(best_hash) .map_err(|_err| Error::Input("State not found".into()))?; - let mut as_trie_backend = state.as_trie_backend(); - let mut backend = sp_state_machine::TrieBackendBuilder::wrap(&as_trie_backend) - .with_optional_recorder(recorder) - .build(); + // We reassign the backend and recorder for every batch size. + // Using a new recorder for every read vs using the same for the entire batch + // produces significant different results. Since in the real use case we use a + // single recorder per block, simulate the same behavior by creating a new + // recorder every batch size, so that the amortized cost of reading a key is + // measured in conditions closer to the real world. + let (mut backend, mut recorder) = self.create_backend::(&state); + let mut read_in_batch = 0; + let mut on_validation_batch = vec![]; + let mut on_validation_size = 0; + let last_key = keys.last().expect("Checked above to be non-empty"); for key in keys.as_slice() { match (self.params.include_child_trees, self.is_child_key(key.clone().0)) { (true, Some(info)) => { // child tree key for ck in client.child_storage_keys(best_hash, info.clone(), None, None)? { - child_nodes.push((ck.clone(), info.clone())); + child_nodes.push((ck, info.clone())); } }, _ => { // regular key + on_validation_batch.push((key.0.clone(), None)); let start = Instant::now(); - let v = backend .storage(key.0.as_ref()) .expect("Checked above to exist") .ok_or("Value unexpectedly empty")?; - record.append(v.len(), start.elapsed())?; + on_validation_size += v.len(); + if self.params.is_import_block_mode() { + record.append(v.len(), start.elapsed())?; + } }, } read_in_batch += 1; - if read_in_batch >= self.params.batch_size { - // Using a new recorder for every read vs using the same for the entire batch - // produces significant different results. Since in the real use case we use a - // single recorder per block, simulate the same behavior by creating a new - // recorder every batch size, so that the amortized cost of reading a key is - // measured in conditions closer to the real world. - let recorder = (!self.params.disable_pov_recorder).then(|| Default::default()); - state = client - .state_at(best_hash) - .map_err(|_err| Error::Input("State not found".to_string()))?; - as_trie_backend = state.as_trie_backend(); - backend = sp_state_machine::TrieBackendBuilder::wrap(&as_trie_backend) - .with_optional_recorder(recorder) - .build(); + let is_batch_full = read_in_batch >= self.params.batch_size || key == last_key; + + // Read keys on block validation + if is_batch_full && self.params.is_validate_block_mode() { + let root = backend.root(); + let storage_proof = recorder + .clone() + .map(|r| r.drain_storage_proof()) + .expect("Storage proof must exist for block validation"); + let elapsed = measure_block_validation::( + *root, + storage_proof, + on_validation_batch.clone(), + self.params.validate_block_rounds, + ); + record.append(on_validation_size / on_validation_batch.len(), elapsed)?; + + on_validation_batch = vec![]; + on_validation_size = 0; + } + + // Reload recorder + if is_batch_full { + (backend, recorder) = self.create_backend::(&state); read_in_batch = 0; } } - if self.params.include_child_trees { + if self.params.include_child_trees && !child_nodes.is_empty() { child_nodes.shuffle(&mut rng); info!("Reading {} child keys", child_nodes.len()); + let (last_child_key, last_child_info) = + child_nodes.last().expect("Checked above to be non-empty"); for (key, info) in child_nodes.as_slice() { + on_validation_batch.push((key.0.clone(), Some(info.clone()))); let start = Instant::now(); let v = backend .child_storage(info, key.0.as_ref()) .expect("Checked above to exist") .ok_or("Value unexpectedly empty")?; - record.append(v.len(), start.elapsed())?; - + on_validation_size += v.len(); + if self.params.is_import_block_mode() { + record.append(v.len(), start.elapsed())?; + } read_in_batch += 1; - if read_in_batch >= self.params.batch_size { - // Using a new recorder for every read vs using the same for the entire batch - // produces significant different results. Since in the real use case we use a - // single recorder per block, simulate the same behavior by creating a new - // recorder every batch size, so that the amortized cost of reading a key is - // measured in conditions closer to the real world. - let recorder = (!self.params.disable_pov_recorder).then(|| Default::default()); - state = client - .state_at(best_hash) - .map_err(|_err| Error::Input("State not found".to_string()))?; - as_trie_backend = state.as_trie_backend(); - backend = sp_state_machine::TrieBackendBuilder::wrap(&as_trie_backend) - .with_optional_recorder(recorder) - .build(); + let is_batch_full = read_in_batch >= self.params.batch_size || + (last_child_key == key && last_child_info == info); + + // Read child keys on block validation + if is_batch_full && self.params.is_validate_block_mode() { + let root = backend.root(); + let storage_proof = recorder + .clone() + .map(|r| r.drain_storage_proof()) + .expect("Storage proof must exist for block validation"); + let elapsed = measure_block_validation::( + *root, + storage_proof, + on_validation_batch.clone(), + self.params.validate_block_rounds, + ); + record.append(on_validation_size / on_validation_batch.len(), elapsed)?; + + on_validation_batch = vec![]; + on_validation_size = 0; + } + + // Reload recorder + if is_batch_full { + (backend, recorder) = self.create_backend::(&state); read_in_batch = 0; } } } + Ok(record) } + + fn create_backend<'a, B, C>( + &self, + state: &'a C::StateBackend, + ) -> ( + sp_state_machine::TrieBackend< + &'a >>::TrieBackendStorage, + HashingFor, + &'a sp_trie::cache::LocalTrieCache>, + >, + Option>>, + ) + where + C: CallApiAt, + B: BlockT + Debug, + { + let recorder = (!self.params.disable_pov_recorder).then(|| Default::default()); + let backend = sp_state_machine::TrieBackendBuilder::wrap(state.as_trie_backend()) + .with_optional_recorder(recorder.clone()) + .build(); + + (backend, recorder) + } +} + +fn measure_block_validation( + root: B::Hash, + storage_proof: StorageProof, + on_validation_batch: Vec<(Vec, Option)>, + rounds: u32, +) -> std::time::Duration { + debug!( + "POV: len {:?} {:?}", + storage_proof.len(), + storage_proof.clone().encoded_compact_size::>(root) + ); + let batch_size = on_validation_batch.len(); + let wasm_module = get_wasm_module(); + let mut instance = wasm_module.new_instance().expect("Failed to create wasm instance"); + let params = StorageAccessParams::::new_read(root, storage_proof, on_validation_batch); + let dry_run_encoded = params.as_dry_run().encode(); + let encoded = params.encode(); + + let mut durations_in_nanos = Vec::new(); + + for i in 1..=rounds { + info!("validate_block with {} keys, round {}/{}", batch_size, i, rounds); + + // Dry run to get the time it takes without storage access + let dry_run_start = Instant::now(); + instance + .call_export("validate_block", &dry_run_encoded) + .expect("Failed to call validate_block"); + let dry_run_elapsed = dry_run_start.elapsed(); + debug!("validate_block dry-run time {:?}", dry_run_elapsed); + + let start = Instant::now(); + instance + .call_export("validate_block", &encoded) + .expect("Failed to call validate_block"); + let elapsed = start.elapsed(); + debug!("validate_block time {:?}", elapsed); + + durations_in_nanos + .push(elapsed.saturating_sub(dry_run_elapsed).as_nanos() as u64 / batch_size as u64); + } + + std::time::Duration::from_nanos( + durations_in_nanos.iter().sum::() / durations_in_nanos.len() as u64, + ) } diff --git a/substrate/utils/frame/benchmarking-cli/src/storage/template.rs b/substrate/utils/frame/benchmarking-cli/src/storage/template.rs index 43aea75b47711..03802dedc38ac 100644 --- a/substrate/utils/frame/benchmarking-cli/src/storage/template.rs +++ b/substrate/utils/frame/benchmarking-cli/src/storage/template.rs @@ -74,7 +74,11 @@ impl TemplateData { .unwrap_or_default(); Ok(TemplateData { - db_name: format!("{}", cfg.database), + db_name: if params.is_validate_block_mode() { + String::from("InMemoryDb") + } else { + format!("{}", cfg.database) + }, runtime_name: cfg.chain_spec.name().into(), version: VERSION.into(), date: chrono::Utc::now().format("%Y-%m-%d (Y/M/D)").to_string(), diff --git a/substrate/utils/frame/benchmarking-cli/src/storage/weights.hbs b/substrate/utils/frame/benchmarking-cli/src/storage/weights.hbs index 135b18b193746..33e3eea2e7e21 100644 --- a/substrate/utils/frame/benchmarking-cli/src/storage/weights.hbs +++ b/substrate/utils/frame/benchmarking-cli/src/storage/weights.hbs @@ -22,7 +22,11 @@ pub mod constants { use sp_weights::RuntimeDbWeight; parameter_types! { - {{#if (eq db_name "ParityDb")}} + {{#if (eq db_name "InMemoryDb")}} + /// `InMemoryDb` weights are measured in the context of the validation functions. + /// To avoid submitting overweight blocks to the relay chain this is the configuration + /// parachains should use. + {{else if (eq db_name "ParityDb")}} /// `ParityDB` can be enabled with a feature flag, but is still experimental. These weights /// are available for brave runtime engineers who may want to try this out as default. {{else}} diff --git a/substrate/utils/frame/benchmarking-cli/src/storage/write.rs b/substrate/utils/frame/benchmarking-cli/src/storage/write.rs index 4a56ee3f9d8a1..fa818b847f124 100644 --- a/substrate/utils/frame/benchmarking-cli/src/storage/write.rs +++ b/substrate/utils/frame/benchmarking-cli/src/storage/write.rs @@ -15,6 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. +use codec::Encode; +use frame_storage_access_test_runtime::StorageAccessParams; +use log::{debug, info, trace, warn}; +use rand::prelude::*; use sc_cli::Result; use sc_client_api::{Backend as ClientBackend, StorageProvider, UsageProvider}; use sc_client_db::{DbHash, DbState, DbStateBuilder}; @@ -22,23 +26,25 @@ use sp_blockchain::HeaderBackend; use sp_database::{ColumnId, Transaction}; use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT}; use sp_state_machine::Backend as StateBackend; -use sp_trie::PrefixedMemoryDB; - -use log::{info, trace}; -use rand::prelude::*; use sp_storage::{ChildInfo, StateVersion}; +use sp_trie::{recorder::Recorder, PrefixedMemoryDB}; use std::{ fmt::Debug, sync::Arc, time::{Duration, Instant}, }; -use super::cmd::StorageCmd; +use super::{cmd::StorageCmd, get_wasm_module, MAX_BATCH_SIZE_FOR_BLOCK_VALIDATION}; use crate::shared::{new_rng, BenchRecord}; impl StorageCmd { /// Benchmarks the time it takes to write a single Storage item. + /// /// Uses the latest state that is available for the given client. + /// + /// Unlike reading benchmark, where we read every single key, here we write a batch of keys in + /// one time. So writing a remaining keys with the size much smaller than batch size can + /// dramatically distort the results. To avoid this, we skip the remaining keys. pub(crate) fn bench_write( &self, client: Arc, @@ -52,6 +58,15 @@ impl StorageCmd { BA: ClientBackend, C: UsageProvider + HeaderBackend + StorageProvider, { + if self.params.is_validate_block_mode() && self.params.disable_pov_recorder { + return Err("PoV recorder must be activated to provide a storage proof for block validation at runtime. Remove `--disable-pov-recorder`.".into()) + } + if self.params.is_validate_block_mode() && + self.params.batch_size > MAX_BATCH_SIZE_FOR_BLOCK_VALIDATION + { + return Err(format!("Batch size is too large. This may cause problems with runtime memory allocation. Better set `--batch-size {}` or less.", MAX_BATCH_SIZE_FOR_BLOCK_VALIDATION).into()) + } + // Store the time that it took to write each value. let mut record = BenchRecord::default(); @@ -59,28 +74,26 @@ impl StorageCmd { let header = client.header(best_hash)?.ok_or("Header not found")?; let original_root = *header.state_root(); - info!("Preparing keys from block {}", best_hash); - let build_trie_backend = |storage: Arc< - dyn sp_state_machine::Storage>, - >, - original_root, - enable_pov_recorder: bool| { - let pov_recorder = enable_pov_recorder.then(|| Default::default()); - - DbStateBuilder::>::new(storage.clone(), original_root) - .with_optional_cache(shared_trie_cache.as_ref().map(|c| c.local_cache_trusted())) - .with_optional_recorder(pov_recorder) - .build() - }; - - let trie = - build_trie_backend(storage.clone(), original_root, !self.params.disable_pov_recorder); + let (trie, _) = self.create_trie_backend::( + original_root, + &storage, + shared_trie_cache.as_ref(), + ); + info!("Preparing keys from block {}", best_hash); // Load all KV pairs and randomly shuffle them. let mut kvs: Vec<_> = trie.pairs(Default::default())?.collect(); let (mut rng, _) = new_rng(None); kvs.shuffle(&mut rng); - info!("Writing {} keys", kvs.len()); + if kvs.is_empty() { + return Err("Can't process benchmarking with empty storage".into()) + } + + info!("Writing {} keys in batches of {}", kvs.len(), self.params.batch_size); + let remainder = kvs.len() % self.params.batch_size; + if self.params.is_validate_block_mode() && remainder != 0 { + info!("Remaining `{remainder}` keys will be skipped"); + } let mut child_nodes = Vec::new(); let mut batched_keys = Vec::new(); @@ -91,11 +104,10 @@ impl StorageCmd { let (k, original_v) = key_value?; match (self.params.include_child_trees, self.is_child_key(k.to_vec())) { (true, Some(info)) => { - let child_keys = - client.child_storage_keys(best_hash, info.clone(), None, None)?; - for ck in child_keys { - child_nodes.push((ck.clone(), info.clone())); - } + let child_keys = client + .child_storage_keys(best_hash, info.clone(), None, None)? + .collect::>(); + child_nodes.push((child_keys, info.clone())); }, _ => { // regular key @@ -124,81 +136,248 @@ impl StorageCmd { continue } - // For every batched write use a different trie instance and recorder, so we - // don't benefit from past runs. - let trie = build_trie_backend( - storage.clone(), - original_root, - !self.params.disable_pov_recorder, - ); // Write each value in one commit. - let (size, duration) = measure_per_key_amortised_write_cost::( - db.clone(), - &trie, - batched_keys.clone(), - self.state_version(), - state_col, - None, - )?; + let (size, duration) = if self.params.is_validate_block_mode() { + self.measure_per_key_amortised_validate_block_write_cost::( + original_root, + &storage, + shared_trie_cache.as_ref(), + batched_keys.clone(), + None, + )? + } else { + self.measure_per_key_amortised_import_block_write_cost::( + original_root, + &storage, + shared_trie_cache.as_ref(), + db.clone(), + batched_keys.clone(), + self.state_version(), + state_col, + None, + )? + }; record.append(size, duration)?; batched_keys.clear(); }, } } - if self.params.include_child_trees { - child_nodes.shuffle(&mut rng); - info!("Writing {} child keys", child_nodes.len()); + if self.params.include_child_trees && !child_nodes.is_empty() { + info!("Writing {} child keys", child_nodes.iter().map(|(c, _)| c.len()).sum::()); + for (mut child_keys, info) in child_nodes { + if child_keys.len() < self.params.batch_size { + warn!( + "{} child keys will be skipped because it's less than batch size", + child_keys.len() + ); + continue; + } - for (key, info) in child_nodes { - if let Some(original_v) = client - .child_storage(best_hash, &info.clone(), &key) - .expect("Checked above to exist") - { - let mut new_v = vec![0; original_v.0.len()]; + child_keys.shuffle(&mut rng); - loop { - rng.fill_bytes(&mut new_v[..]); - if check_new_value::( - db.clone(), - &trie, - &key.0, - &new_v, - self.state_version(), - state_col, - Some(&info), - ) { - break + for key in child_keys { + if let Some(original_v) = client + .child_storage(best_hash, &info, &key) + .expect("Checked above to exist") + { + let mut new_v = vec![0; original_v.0.len()]; + + loop { + rng.fill_bytes(&mut new_v[..]); + if check_new_value::( + db.clone(), + &trie, + &key.0, + &new_v, + self.state_version(), + state_col, + Some(&info), + ) { + break + } + } + batched_keys.push((key.0, new_v.to_vec())); + if batched_keys.len() < self.params.batch_size { + continue } - } - batched_keys.push((key.0, new_v.to_vec())); - if batched_keys.len() < self.params.batch_size { - continue + let (size, duration) = if self.params.is_validate_block_mode() { + self.measure_per_key_amortised_validate_block_write_cost::( + original_root, + &storage, + shared_trie_cache.as_ref(), + batched_keys.clone(), + None, + )? + } else { + self.measure_per_key_amortised_import_block_write_cost::( + original_root, + &storage, + shared_trie_cache.as_ref(), + db.clone(), + batched_keys.clone(), + self.state_version(), + state_col, + Some(&info), + )? + }; + record.append(size, duration)?; + batched_keys.clear(); } - - let trie = build_trie_backend( - storage.clone(), - original_root, - !self.params.disable_pov_recorder, - ); - - let (size, duration) = measure_per_key_amortised_write_cost::( - db.clone(), - &trie, - batched_keys.clone(), - self.state_version(), - state_col, - Some(&info), - )?; - record.append(size, duration)?; - batched_keys.clear(); } } } Ok(record) } + + fn create_trie_backend( + &self, + original_root: Block::Hash, + storage: &Arc>>, + shared_trie_cache: Option<&sp_trie::cache::SharedTrieCache>>, + ) -> (DbState>, Option>>) + where + Block: BlockT
+ Debug, + H: HeaderT, + { + let recorder = (!self.params.disable_pov_recorder).then(|| Default::default()); + let trie = DbStateBuilder::>::new(storage.clone(), original_root) + .with_optional_cache(shared_trie_cache.map(|c| c.local_cache_trusted())) + .with_optional_recorder(recorder.clone()) + .build(); + + (trie, recorder) + } + + /// Measures write benchmark + /// if `child_info` exist then it means this is a child tree key + fn measure_per_key_amortised_import_block_write_cost( + &self, + original_root: Block::Hash, + storage: &Arc>>, + shared_trie_cache: Option<&sp_trie::cache::SharedTrieCache>>, + db: Arc>, + changes: Vec<(Vec, Vec)>, + version: StateVersion, + col: ColumnId, + child_info: Option<&ChildInfo>, + ) -> Result<(usize, Duration)> + where + Block: BlockT
+ Debug, + H: HeaderT, + { + let batch_size = changes.len(); + let average_len = changes.iter().map(|(_, v)| v.len()).sum::() / batch_size; + // For every batched write use a different trie instance and recorder, so we + // don't benefit from past runs. + let (trie, _recorder) = + self.create_trie_backend::(original_root, storage, shared_trie_cache); + + let start = Instant::now(); + // Create a TX that will modify the Trie in the DB and + // calculate the root hash of the Trie after the modification. + let replace = changes + .iter() + .map(|(key, new_v)| (key.as_ref(), Some(new_v.as_ref()))) + .collect::>(); + let stx = match child_info { + Some(info) => trie.child_storage_root(info, replace.iter().cloned(), version).2, + None => trie.storage_root(replace.iter().cloned(), version).1, + }; + // Only the keep the insertions, since we do not want to benchmark pruning. + let tx = convert_tx::(db.clone(), stx.clone(), false, col); + db.commit(tx).map_err(|e| format!("Writing to the Database: {}", e))?; + let result = (average_len, start.elapsed() / batch_size as u32); + + // Now undo the changes by removing what was added. + let tx = convert_tx::(db.clone(), stx.clone(), true, col); + db.commit(tx).map_err(|e| format!("Writing to the Database: {}", e))?; + + Ok(result) + } + + /// Measures write benchmark on block validation + /// if `child_info` exist then it means this is a child tree key + fn measure_per_key_amortised_validate_block_write_cost( + &self, + original_root: Block::Hash, + storage: &Arc>>, + shared_trie_cache: Option<&sp_trie::cache::SharedTrieCache>>, + changes: Vec<(Vec, Vec)>, + maybe_child_info: Option<&ChildInfo>, + ) -> Result<(usize, Duration)> + where + Block: BlockT
+ Debug, + H: HeaderT, + { + let batch_size = changes.len(); + let average_len = changes.iter().map(|(_, v)| v.len()).sum::() / batch_size; + let (trie, recorder) = + self.create_trie_backend::(original_root, storage, shared_trie_cache); + for (key, _) in changes.iter() { + let _v = trie + .storage(key) + .expect("Checked above to exist") + .ok_or("Value unexpectedly empty")?; + } + let storage_proof = recorder + .map(|r| r.drain_storage_proof()) + .expect("Storage proof must exist for block validation"); + let root = trie.root(); + debug!( + "POV: len {:?} {:?}", + storage_proof.len(), + storage_proof.clone().encoded_compact_size::>(*root) + ); + let params = StorageAccessParams::::new_write( + *root, + storage_proof, + (changes, maybe_child_info.cloned()), + ); + + let mut durations_in_nanos = Vec::new(); + let wasm_module = get_wasm_module(); + let mut instance = wasm_module.new_instance().expect("Failed to create wasm instance"); + let dry_run_encoded = params.as_dry_run().encode(); + let encoded = params.encode(); + + for i in 1..=self.params.validate_block_rounds { + info!( + "validate_block with {} keys, round {}/{}", + batch_size, i, self.params.validate_block_rounds + ); + + // Dry run to get the time it takes without storage access + let dry_run_start = Instant::now(); + instance + .call_export("validate_block", &dry_run_encoded) + .expect("Failed to call validate_block"); + let dry_run_elapsed = dry_run_start.elapsed(); + debug!("validate_block dry-run time {:?}", dry_run_elapsed); + + let start = Instant::now(); + instance + .call_export("validate_block", &encoded) + .expect("Failed to call validate_block"); + let elapsed = start.elapsed(); + debug!("validate_block time {:?}", elapsed); + + durations_in_nanos.push( + elapsed.saturating_sub(dry_run_elapsed).as_nanos() as u64 / batch_size as u64, + ); + } + + let result = ( + average_len, + std::time::Duration::from_nanos( + durations_in_nanos.iter().sum::() / durations_in_nanos.len() as u64, + ), + ); + + Ok(result) + } } /// Converts a Trie transaction into a DB transaction. @@ -227,39 +406,6 @@ fn convert_tx( ret } -/// Measures write benchmark -/// if `child_info` exist then it means this is a child tree key -fn measure_per_key_amortised_write_cost( - db: Arc>, - trie: &DbState>, - changes: Vec<(Vec, Vec)>, - version: StateVersion, - col: ColumnId, - child_info: Option<&ChildInfo>, -) -> Result<(usize, Duration)> { - let start = Instant::now(); - // Create a TX that will modify the Trie in the DB and - // calculate the root hash of the Trie after the modification. - let average_len = changes.iter().map(|(_, v)| v.len()).sum::() / changes.len(); - let replace = changes - .iter() - .map(|(key, new_v)| (key.as_ref(), Some(new_v.as_ref()))) - .collect::>(); - let stx = match child_info { - Some(info) => trie.child_storage_root(info, replace.iter().cloned(), version).2, - None => trie.storage_root(replace.iter().cloned(), version).1, - }; - // Only the keep the insertions, since we do not want to benchmark pruning. - let tx = convert_tx::(db.clone(), stx.clone(), false, col); - db.commit(tx).map_err(|e| format!("Writing to the Database: {}", e))?; - let result = (average_len, start.elapsed() / changes.len() as u32); - - // Now undo the changes by removing what was added. - let tx = convert_tx::(db.clone(), stx.clone(), true, col); - db.commit(tx).map_err(|e| format!("Writing to the Database: {}", e))?; - Ok(result) -} - /// Checks if a new value causes any collision in tree updates /// returns true if there is no collision /// if `child_info` exist then it means this is a child tree key diff --git a/substrate/utils/frame/storage-access-test-runtime/Cargo.toml b/substrate/utils/frame/storage-access-test-runtime/Cargo.toml new file mode 100644 index 0000000000000..5093653d2dc83 --- /dev/null +++ b/substrate/utils/frame/storage-access-test-runtime/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "frame-storage-access-test-runtime" +description = "A runtime for testing storage access on block validation" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +build = "build.rs" +license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true +publish = true + +[lints] +workspace = true + +[dependencies] +codec = { features = ["derive"], workspace = true } +cumulus-pallet-parachain-system = { workspace = true, optional = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-state-machine = { workspace = true } +sp-trie = { workspace = true } + +[build-dependencies] +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } + +[features] +default = ["std"] +no_std = [] +std = [ + "codec/std", + "cumulus-pallet-parachain-system/std", + "sp-core/std", + "sp-runtime/std", + "sp-state-machine/std", + "sp-trie/std", + "substrate-wasm-builder", +] +runtime-benchmarks = [ + "cumulus-pallet-parachain-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] diff --git a/substrate/frame/revive/rpc/src/fee_history.rs b/substrate/utils/frame/storage-access-test-runtime/build.rs similarity index 72% rename from substrate/frame/revive/rpc/src/fee_history.rs rename to substrate/utils/frame/storage-access-test-runtime/build.rs index cf16213c18ce7..651f57388e0d0 100644 --- a/substrate/frame/revive/rpc/src/fee_history.rs +++ b/substrate/utils/frame/storage-access-test-runtime/build.rs @@ -15,13 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub struct FeeHistoryCacheItem { - pub base_fee: u64, - pub gas_used_ratio: f64, - pub rewards: Vec, -} - -pub struct FeeHistoryProvider { - pub client: Arc, - pub fee_history_cache: RwLock>, +fn main() { + #[cfg(feature = "std")] + { + substrate_wasm_builder::WasmBuilder::new() + .with_current_project() + .export_heap_base() + .import_memory() + .disable_runtime_version_section_check() + .build(); + } } diff --git a/substrate/utils/frame/storage-access-test-runtime/src/lib.rs b/substrate/utils/frame/storage-access-test-runtime/src/lib.rs new file mode 100644 index 0000000000000..4b1ab69b14268 --- /dev/null +++ b/substrate/utils/frame/storage-access-test-runtime/src/lib.rs @@ -0,0 +1,181 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test runtime to benchmark storage access on block validation + +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; + +use alloc::vec::Vec; +use codec::{Decode, Encode}; +use sp_core::storage::ChildInfo; +use sp_runtime::traits; +use sp_trie::StorageProof; + +#[cfg(all(not(feature = "std"), feature = "runtime-benchmarks"))] +use { + cumulus_pallet_parachain_system::validate_block::{ + trie_cache::CacheProvider, trie_recorder::SizeOnlyRecorderProvider, + }, + sp_core::storage::StateVersion, + sp_runtime::{generic, OpaqueExtrinsic}, + sp_state_machine::{Backend, TrieBackendBuilder}, +}; + +// Include the WASM binary +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +/// Parameters for benchmarking storage access on block validation. +/// +/// On dry-run, the storage access is not performed to measure the cost of the runtime call. +#[derive(Decode, Clone)] +#[cfg_attr(feature = "std", derive(Encode))] +pub struct StorageAccessParams { + pub state_root: B::Hash, + pub storage_proof: StorageProof, + pub payload: StorageAccessPayload, + /// On dry-run, we don't read/write to the storage. + pub is_dry_run: bool, +} + +/// Payload for benchmarking read and write operations on block validation. +#[derive(Debug, Clone, Decode, Encode)] +pub enum StorageAccessPayload { + // Storage keys with optional child info. + Read(Vec<(Vec, Option)>), + // Storage key-value pairs with optional child info. + Write((Vec<(Vec, Vec)>, Option)), +} + +impl StorageAccessParams { + /// Create a new params for reading from the storage. + pub fn new_read( + state_root: B::Hash, + storage_proof: StorageProof, + payload: Vec<(Vec, Option)>, + ) -> Self { + Self { + state_root, + storage_proof, + payload: StorageAccessPayload::Read(payload), + is_dry_run: false, + } + } + + /// Create a new params for writing to the storage. + pub fn new_write( + state_root: B::Hash, + storage_proof: StorageProof, + payload: (Vec<(Vec, Vec)>, Option), + ) -> Self { + Self { + state_root, + storage_proof, + payload: StorageAccessPayload::Write(payload), + is_dry_run: false, + } + } + + /// Create a dry-run version of the params. + pub fn as_dry_run(&self) -> Self { + Self { + state_root: self.state_root, + storage_proof: self.storage_proof.clone(), + payload: self.payload.clone(), + is_dry_run: true, + } + } +} + +/// Imitates `cumulus_pallet_parachain_system::validate_block::implementation::validate_block` +/// +/// Only performs the storage access, this is used to benchmark the storage access cost. +#[doc(hidden)] +#[cfg(all(not(feature = "std"), feature = "runtime-benchmarks"))] +pub fn proceed_storage_access(mut params: &[u8]) { + let StorageAccessParams { state_root, storage_proof, payload, is_dry_run } = + StorageAccessParams::::decode(&mut params) + .expect("Invalid arguments to `validate_block`."); + + let db = storage_proof.into_memory_db(); + let recorder = SizeOnlyRecorderProvider::>::default(); + let cache_provider = CacheProvider::new(); + let backend = TrieBackendBuilder::new_with_cache(db, state_root, cache_provider) + .with_recorder(recorder) + .build(); + + if is_dry_run { + return; + } + + match payload { + StorageAccessPayload::Read(keys) => + for (key, maybe_child_info) in keys { + match maybe_child_info { + Some(child_info) => { + let _ = backend + .child_storage(&child_info, key.as_ref()) + .expect("Key not found") + .ok_or("Value unexpectedly empty"); + }, + None => { + let _ = backend + .storage(key.as_ref()) + .expect("Key not found") + .ok_or("Value unexpectedly empty"); + }, + } + }, + StorageAccessPayload::Write((changes, maybe_child_info)) => { + let delta = changes.iter().map(|(key, value)| (key.as_ref(), Some(value.as_ref()))); + match maybe_child_info { + Some(child_info) => { + backend.child_storage_root(&child_info, delta, StateVersion::V1); + }, + None => { + backend.storage_root(delta, StateVersion::V1); + }, + } + }, + } +} + +/// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. +#[cfg(feature = "std")] +pub fn wasm_binary_unwrap() -> &'static [u8] { + WASM_BINARY.expect( + "Development wasm binary is not available. Unset SKIP_WASM_BUILD and compile the runtime again.", + ) +} + +#[cfg(enable_alloc_error_handler)] +#[alloc_error_handler] +#[no_mangle] +pub fn oom(_: core::alloc::Layout) -> ! { + core::intrinsics::abort(); +} + +#[cfg(all(not(feature = "std"), feature = "runtime-benchmarks"))] +#[no_mangle] +pub extern "C" fn validate_block(params: *const u8, len: usize) -> u64 { + type Block = generic::Block, OpaqueExtrinsic>; + let params = unsafe { alloc::slice::from_raw_parts(params, len) }; + proceed_storage_access::(params); + 1 +}