diff --git a/.github/scripts/cmd/cmd.py b/.github/scripts/cmd/cmd.py index 2c017b7d0c3ef..e65f10a97b4e8 100755 --- a/.github/scripts/cmd/cmd.py +++ b/.github/scripts/cmd/cmd.py @@ -44,34 +44,6 @@ def setup_logging(): BENCH """ -bench_example = '''**Examples**: - Runs all benchmarks - %(prog)s - - Runs benchmarks for pallet_balances and pallet_multisig for all runtimes which have these pallets. **--quiet** makes it to output nothing to PR but reactions - %(prog)s --pallet pallet_balances pallet_xcm_benchmarks::generic --quiet - - Runs bench for all pallets for westend runtime and fails fast on first failed benchmark - %(prog)s --runtime westend --fail-fast - - Does not output anything and cleans up the previous bot's & author command triggering comments in PR - %(prog)s --runtime westend rococo --pallet pallet_balances pallet_multisig --quiet --clean -''' - -parser_bench = subparsers.add_parser('bench', help='Runs benchmarks (old CLI)', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter) - -for arg, config in common_args.items(): - parser_bench.add_argument(arg, **config) - -parser_bench.add_argument('--runtime', help='Runtime(s) space separated', choices=runtimeNames, nargs='*', default=runtimeNames) -parser_bench.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[]) -parser_bench.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true') - - -""" -BENCH OMNI -""" - bench_example = '''**Examples**: Runs all benchmarks %(prog)s @@ -86,14 +58,14 @@ def setup_logging(): %(prog)s --runtime westend rococo --pallet pallet_balances pallet_multisig --quiet --clean ''' -parser_bench_old = subparsers.add_parser('bench-omni', help='Runs benchmarks (frame omni bencher)', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter) +parser_bench = subparsers.add_parser('bench', aliases=['bench-omni'], help='Runs benchmarks (frame omni bencher)', epilog=bench_example, formatter_class=argparse.RawDescriptionHelpFormatter) for arg, config in common_args.items(): - parser_bench_old.add_argument(arg, **config) + parser_bench.add_argument(arg, **config) -parser_bench_old.add_argument('--runtime', help='Runtime(s) space separated', choices=runtimeNames, nargs='*', default=runtimeNames) -parser_bench_old.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[]) -parser_bench_old.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true') +parser_bench.add_argument('--runtime', help='Runtime(s) space separated', choices=runtimeNames, nargs='*', default=runtimeNames) +parser_bench.add_argument('--pallet', help='Pallet(s) space separated', nargs='*', default=[]) +parser_bench.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true') """ @@ -127,7 +99,7 @@ def main(): print(f'args: {args}') - if args.command == 'bench-omni': + if args.command == 'bench' or args.command == 'bench-omni': runtime_pallets_map = {} failed_benchmarks = {} successful_benchmarks = {} @@ -140,11 +112,23 @@ def main(): runtimesMatrix = {x['name']: x for x in runtimesMatrix} print(f'Filtered out runtimes: {runtimesMatrix}') + compile_bencher = os.system(f"cargo install --path substrate/utils/frame/omni-bencher --locked --profile {profile}") + if compile_bencher != 0: + print_and_log('❌ Failed to compile frame-omni-bencher') + sys.exit(1) + # loop over remaining runtimes to collect available pallets for runtime in runtimesMatrix.values(): build_command = f"forklift cargo build -p {runtime['package']} --profile {profile} --features={runtime['bench_features']}" print(f'-- building "{runtime["name"]}" with `{build_command}`') - os.system(build_command) + build_status = os.system(build_command) + if build_status != 0: + print_and_log(f'❌ Failed to build {runtime["name"]}') + if args.fail_fast: + sys.exit(1) + else: + continue + print(f'-- listing pallets for benchmark for {runtime["name"]}') wasm_file = f"target/{profile}/wbuild/{runtime['package']}/{runtime['package'].replace('-', '_')}.wasm" list_command = f"frame-omni-bencher v1 benchmark pallet " \ @@ -219,12 +203,15 @@ def main(): # TODO: we can remove once all pallets in dev runtime are migrated to polkadot-sdk-frame try: uses_polkadot_sdk_frame = "true" in os.popen(f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .dependencies | any(.name == \"polkadot-sdk-frame\")'").read() + print(f'uses_polkadot_sdk_frame: {uses_polkadot_sdk_frame}') # Empty output from the previous os.popen command except StopIteration: + print(f'Error: {pallet} not found in dev runtime') uses_polkadot_sdk_frame = False template = config['template'] if uses_polkadot_sdk_frame and re.match(r"frame-(:?umbrella-)?weight-template\.hbs", os.path.normpath(template).split(os.path.sep)[-1]): template = "substrate/.maintain/frame-umbrella-weight-template.hbs" + print(f'template: {template}') else: default_path = f"./{config['path']}/src/weights" xcm_path = f"./{config['path']}/src/weights/xcm" @@ -270,149 +257,6 @@ def main(): print_and_log('✅ Successful benchmarks of runtimes/pallets:') for runtime, pallets in successful_benchmarks.items(): print_and_log(f'-- {runtime}: {pallets}') - - if args.command == 'bench': - runtime_pallets_map = {} - failed_benchmarks = {} - successful_benchmarks = {} - - profile = "production" - - print(f'Provided runtimes: {args.runtime}') - # convert to mapped dict - runtimesMatrix = list(filter(lambda x: x['name'] in args.runtime, runtimesMatrix)) - runtimesMatrix = {x['name']: x for x in runtimesMatrix} - print(f'Filtered out runtimes: {runtimesMatrix}') - - # loop over remaining runtimes to collect available pallets - for runtime in runtimesMatrix.values(): - build_command = f"forklift cargo build -p {runtime['old_package']} --profile {profile} --features={runtime['bench_features']} --locked" - print(f'-- building {runtime["name"]} with `{build_command}`') - os.system(build_command) - - chain = runtime['name'] if runtime['name'] == 'dev' else f"{runtime['name']}-dev" - - machine_test = f"target/{profile}/{runtime['old_bin']} benchmark machine --chain={chain}" - print(f"Running machine test for `{machine_test}`") - os.system(machine_test) - - print(f'-- listing pallets for benchmark for {chain}') - list_command = f"target/{profile}/{runtime['old_bin']} " \ - f"benchmark pallet " \ - f"--no-csv-header " \ - f"--no-storage-info " \ - f"--no-min-squares " \ - f"--no-median-slopes " \ - f"--all " \ - f"--list " \ - f"--chain={chain}" - print(f'-- running: {list_command}') - output = os.popen(list_command).read() - raw_pallets = output.strip().split('\n') - - all_pallets = set() - for pallet in raw_pallets: - if pallet: - all_pallets.add(pallet.split(',')[0].strip()) - - pallets = list(all_pallets) - print(f'Pallets in {runtime["name"]}: {pallets}') - runtime_pallets_map[runtime['name']] = pallets - - print(f'\n') - - # filter out only the specified pallets from collected runtimes/pallets - if args.pallet: - print(f'Pallets: {args.pallet}') - new_pallets_map = {} - # keep only specified pallets if they exist in the runtime - for runtime in runtime_pallets_map: - if set(args.pallet).issubset(set(runtime_pallets_map[runtime])): - new_pallets_map[runtime] = args.pallet - - runtime_pallets_map = new_pallets_map - - print(f'Filtered out runtimes & pallets: {runtime_pallets_map}\n') - - if not runtime_pallets_map: - if args.pallet and not args.runtime: - print(f"No pallets {args.pallet} found in any runtime") - elif args.runtime and not args.pallet: - print(f"{args.runtime} runtime does not have any pallets") - elif args.runtime and args.pallet: - print(f"No pallets {args.pallet} found in {args.runtime}") - else: - print('No runtimes found') - sys.exit(1) - - for runtime in runtime_pallets_map: - for pallet in runtime_pallets_map[runtime]: - config = runtimesMatrix[runtime] - header_path = os.path.abspath(config['header']) - template = None - - chain = config['name'] if runtime == 'dev' else f"{config['name']}-dev" - - print(f'-- config: {config}') - if runtime == 'dev': - # to support sub-modules (https://github.com/paritytech/command-bot/issues/275) - search_manifest_path = f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .manifest_path'" - print(f'-- running: {search_manifest_path}') - manifest_path = os.popen(search_manifest_path).read() - if not manifest_path: - print(f'-- pallet {pallet} not found in dev runtime') - if args.fail_fast: - print_and_log(f'Error: {pallet} not found in dev runtime') - sys.exit(1) - package_dir = os.path.dirname(manifest_path) - print(f'-- package_dir: {package_dir}') - print(f'-- manifest_path: {manifest_path}') - output_path = os.path.join(package_dir, "src", "weights.rs") - template = config['template'] - else: - default_path = f"./{config['path']}/src/weights" - xcm_path = f"./{config['path']}/src/weights/xcm" - output_path = default_path - if pallet.startswith("pallet_xcm_benchmarks"): - template = config['template'] - output_path = xcm_path - - print(f'-- benchmarking {pallet} in {runtime} into {output_path}') - cmd = f"target/{profile}/{config['old_bin']} benchmark pallet " \ - f"--extrinsic=* " \ - f"--chain={chain} " \ - f"--pallet={pallet} " \ - f"--header={header_path} " \ - f"--output={output_path} " \ - f"--wasm-execution=compiled " \ - f"--steps=50 " \ - f"--repeat=20 " \ - f"--heap-pages=4096 " \ - f"{f'--template={template} ' if template else ''}" \ - f"--no-storage-info --no-min-squares --no-median-slopes " - print(f'-- Running: {cmd} \n') - status = os.system(cmd) - - if status != 0 and args.fail_fast: - print_and_log(f'❌ Failed to benchmark {pallet} in {runtime}') - sys.exit(1) - - # Otherwise collect failed benchmarks and print them at the end - # push failed pallets to failed_benchmarks - if status != 0: - failed_benchmarks[f'{runtime}'] = failed_benchmarks.get(f'{runtime}', []) + [pallet] - else: - successful_benchmarks[f'{runtime}'] = successful_benchmarks.get(f'{runtime}', []) + [pallet] - - if failed_benchmarks: - print_and_log('❌ Failed benchmarks of runtimes/pallets:') - for runtime, pallets in failed_benchmarks.items(): - print_and_log(f'-- {runtime}: {pallets}') - - if successful_benchmarks: - print_and_log('✅ Successful benchmarks of runtimes/pallets:') - for runtime, pallets in successful_benchmarks.items(): - print_and_log(f'-- {runtime}: {pallets}') elif args.command == 'fmt': command = f"cargo +nightly fmt" diff --git a/.github/workflows/bench-all-runtimes.yml b/.github/workflows/bench-all-runtimes.yml index a24a7095d9801..fa36a6c249776 100644 --- a/.github/workflows/bench-all-runtimes.yml +++ b/.github/workflows/bench-all-runtimes.yml @@ -4,7 +4,11 @@ on: # schedule: # - cron: '0 1 * * 0' # weekly on Sunday night 01:00 UTC workflow_dispatch: - # pull_request: + inputs: + draft: + type: boolean + default: false + description: "Whether to create a draft PR" permissions: # allow the action to create a PR contents: write @@ -22,12 +26,18 @@ jobs: timeout-minutes: 30 outputs: runtime: ${{ steps.runtime.outputs.runtime }} + branch: ${{ steps.branch.outputs.branch }} + date: ${{ steps.branch.outputs.date }} container: image: ${{ needs.preflight.outputs.IMAGE }} name: Extract runtimes from matrix steps: - uses: actions/checkout@v4 - - id: runtime + with: + ref: master + + - name: Extract runtimes + id: runtime run: | RUNTIMES=$(jq '[.[] | select(.package != null)]' .github/workflows/runtimes-matrix.json) @@ -35,6 +45,20 @@ jobs: echo "runtime=$RUNTIMES" echo "runtime=$RUNTIMES" >> $GITHUB_OUTPUT + - name: Create branch + id: branch + run: | + DATE=$(date +'%Y-%m-%d-%s') + BRANCH="update-weights-weekly-$DATE" + # Fixes "detected dubious ownership" error in the ci + git config --global --add safe.directory $GITHUB_WORKSPACE + + git checkout -b $BRANCH + git push --set-upstream origin $BRANCH + + echo "date=$DATE" >> $GITHUB_OUTPUT + echo "branch=$BRANCH" >> $GITHUB_OUTPUT + run-frame-omni-bencher: needs: [preflight, runtime-matrix] runs-on: ${{ needs.preflight.outputs.RUNNER_WEIGHTS }} @@ -58,11 +82,12 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 - ref: master + ref: ${{ needs.runtime-matrix.outputs.branch }} # checkout always from the initially created branch to avoid conflicts - name: script id: required run: | + git --version # Fixes "detected dubious ownership" error in the ci git config --global --add safe.directory $GITHUB_WORKSPACE git remote -v @@ -94,21 +119,18 @@ jobs: apply-diff-commit: runs-on: ubuntu-latest - needs: [run-frame-omni-bencher] + needs: [runtime-matrix, run-frame-omni-bencher] steps: - name: Checkout uses: actions/checkout@v4 with: fetch-depth: 0 - ref: master + ref: ${{ needs.runtime-matrix.outputs.branch }} - name: Download all artifacts uses: actions/download-artifact@v4 with: - path: patches - - - name: Install subweight - run: cargo install subweight + path: patches # needs to be able to trigger CI - uses: actions/create-github-app-token@v1 @@ -120,28 +142,65 @@ jobs: - name: Apply diff and create PR env: GH_TOKEN: ${{ steps.generate_token.outputs.token }} + BRANCH: ${{ needs.runtime-matrix.outputs.branch }} + DATE: ${{ needs.runtime-matrix.outputs.date }} run: | - DATE=$(date +'%Y-%m-%d-%s') - BRANCH="update-weights-weekly-$DATE" - + git --version git config user.name "github-actions[bot]" git config user.email "41898282+github-actions[bot]@users.noreply.github.com" + + git status - git switch -c "$BRANCH" - + # Apply all patches for file in patches/diff-*/diff-*.patch; do if [ -f "$file" ] && [ -s "$file" ]; then echo "Applying $file" - git apply "$file" --unidiff-zero --allow-empty || echo "Failed to apply $file" + # using --3way and --ours for conflicts resolution. Requires git 2.47+ + git apply "$file" --unidiff-zero --allow-empty --3way --ours || echo "Failed to apply $file" else echo "Skipping empty or non-existent patch file: $file" fi done + rm -rf patches + + # Get release tags from 1 and 3 months ago + ONE_MONTH_AGO=$(date -d "1 month ago" +%Y-%m-%d) + THREE_MONTHS_AGO=$(date -d "3 months ago" +%Y-%m-%d) + + # Get tags with their dates + ONE_MONTH_INFO=$(git for-each-ref --sort=-creatordate --format '%(refname:short)|%(creatordate:iso-strict-local)' 'refs/tags/polkadot-v*' | awk -v date="$ONE_MONTH_AGO" -F'|' '$2 <= date {print $0; exit}') + THREE_MONTHS_INFO=$(git for-each-ref --sort=-creatordate --format '%(refname:short)|%(creatordate:iso-strict-local)' 'refs/tags/polkadot-v*' | awk -v date="$THREE_MONTHS_AGO" -F'|' '$2 <= date {print $0; exit}') + + # Split into tag and date + ONE_MONTH_TAG=$(echo "$ONE_MONTH_INFO" | cut -d'|' -f1) + ONE_MONTH_DATE=$(echo "$ONE_MONTH_INFO" | cut -d'|' -f2 | cut -d'T' -f1) + THREE_MONTHS_TAG=$(echo "$THREE_MONTHS_INFO" | cut -d'|' -f1) + THREE_MONTHS_DATE=$(echo "$THREE_MONTHS_INFO" | cut -d'|' -f2 | cut -d'T' -f1) + + # Base URL for Subweight comparisons + BASE_URL="https://weights.tasty.limo/compare?repo=polkadot-sdk&threshold=5&path_pattern=.%2F**%2Fweights%2F**%2F*.rs%2C.%2F**%2Fweights.rs&method=asymptotic&ignore_errors=true&unit=time" + + # Generate comparison links + MASTER_LINK="${BASE_URL}&old=master&new=${BRANCH}" + ONE_MONTH_LINK="${BASE_URL}&old=${ONE_MONTH_TAG}&new=${BRANCH}" + THREE_MONTHS_LINK="${BASE_URL}&old=${THREE_MONTHS_TAG}&new=${BRANCH}" + + # Create PR body with all links in a temporary file + cat > /tmp/pr_body.md << EOF + Auto-update of all weights for ${DATE}. + + Subweight results: + - [now vs master](${MASTER_LINK}) + - [now vs ${ONE_MONTH_TAG} (${ONE_MONTH_DATE})](${ONE_MONTH_LINK}) + - [now vs ${THREE_MONTHS_TAG} (${THREE_MONTHS_DATE})](${THREE_MONTHS_LINK}) + EOF git add . git commit -m "Update all weights weekly for $DATE" git push --set-upstream origin "$BRANCH" + + MAYBE_DRAFT=${{ inputs.draft && '--draft' || '' }} PR_TITLE="Auto-update of all weights for $DATE" gh pr create \ @@ -150,16 +209,6 @@ jobs: --base "master" \ --reviewer paritytech/ci \ --reviewer paritytech/release-engineering \ - --draft \ + $MAYBE_DRAFT \ --label "R0-silent" \ - --body "$PR_TITLE" - - subweight compare commits \ - --path-pattern "./**/weights/**/*.rs,./**/weights.rs" \ - --method asymptotic \ - --format markdown \ - --no-color \ - --change added changed \ - --ignore-errors \ - --threshold 2 \ - origin/master $BRANCH \ No newline at end of file + --body "$(cat /tmp/pr_body.md)" \ No newline at end of file diff --git a/.github/workflows/check-runtime-migration.yml b/.github/workflows/check-runtime-migration.yml index 9866ae18b98ac..e935f1cb44981 100644 --- a/.github/workflows/check-runtime-migration.yml +++ b/.github/workflows/check-runtime-migration.yml @@ -16,6 +16,8 @@ concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true +permissions: {} + jobs: preflight: uses: ./.github/workflows/reusable-preflight.yml diff --git a/.github/workflows/check-semver.yml b/.github/workflows/check-semver.yml index 43c70d6abc78b..df1a1c8be6038 100644 --- a/.github/workflows/check-semver.yml +++ b/.github/workflows/check-semver.yml @@ -76,6 +76,7 @@ jobs: if: ${{ !contains(github.event.pull_request.labels.*.name, 'R0-silent') }} run: | rustup default $TOOLCHAIN + rustup target add wasm32-unknown-unknown --toolchain $TOOLCHAIN rustup component add rust-src --toolchain $TOOLCHAIN - name: install parity-publish diff --git a/.github/workflows/cmd.yml b/.github/workflows/cmd.yml index 247fc34f1b18d..44a9a9f061193 100644 --- a/.github/workflows/cmd.yml +++ b/.github/workflows/cmd.yml @@ -344,11 +344,6 @@ jobs: else echo "arg=" >> $GITHUB_OUTPUT fi - - - name: Install dependencies for bench - if: startsWith(needs.get-pr-info.outputs.CMD, 'bench') - run: | - cargo install --path substrate/utils/frame/omni-bencher --locked --profile production - name: Run cmd id: cmd diff --git a/.github/workflows/runtimes-matrix.json b/.github/workflows/runtimes-matrix.json index ff16b7397247f..747b2bb4ac8fb 100644 --- a/.github/workflows/runtimes-matrix.json +++ b/.github/workflows/runtimes-matrix.json @@ -8,8 +8,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage", "uri": null, - "old_package": "staging-node-cli", - "old_bin": "substrate-node", "is_relay": false }, { @@ -21,8 +19,6 @@ "bench_flags": "", "bench_features": "runtime-benchmarks", "uri": "wss://try-runtime-westend.polkadot.io:443", - "old_package": "polkadot", - "old_bin": "polkadot", "is_relay": true }, { @@ -34,8 +30,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "", "uri": "wss://try-runtime-rococo.polkadot.io:443", - "old_package": "polkadot", - "old_bin": "polkadot", "is_relay": true }, { @@ -47,8 +41,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "", "uri": "wss://westend-asset-hub-rpc.polkadot.io:443", - "old_package": "polkadot-parachain-bin", - "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -60,8 +52,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "", "uri": "wss://rococo-asset-hub-rpc.polkadot.io:443", - "old_package": "polkadot-parachain-bin", - "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -73,8 +63,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "", "uri": "wss://rococo-bridge-hub-rpc.polkadot.io:443", - "old_package": "polkadot-parachain-bin", - "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -86,8 +74,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "", "uri": "wss://westend-bridge-hub-rpc.polkadot.io:443", - "old_package": "polkadot-parachain-bin", - "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -99,8 +85,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "", "uri": "wss://westend-collectives-rpc.polkadot.io:443", - "old_package": "polkadot-parachain-bin", - "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -112,8 +96,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm", "uri": "wss://rococo-contracts-rpc.polkadot.io:443", - "old_package": "polkadot-parachain-bin", - "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -125,8 +107,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic", "uri": "wss://rococo-coretime-rpc.polkadot.io:443", - "old_package": "polkadot-parachain-bin", - "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -138,8 +118,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic", "uri": "wss://westend-coretime-rpc.polkadot.io:443", - "old_package": "polkadot-parachain-bin", - "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -151,8 +129,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "--genesis-builder-policy=none", "uri": null, - "old_package": "polkadot-parachain-bin", - "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -164,8 +140,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic", "uri": "wss://rococo-people-rpc.polkadot.io:443", - "old_package": "polkadot-parachain-bin", - "old_bin": "polkadot-parachain", "is_relay": false }, { @@ -177,8 +151,6 @@ "bench_features": "runtime-benchmarks", "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic", "uri": "wss://westend-people-rpc.polkadot.io:443", - "old_package": "polkadot-parachain-bin", - "old_bin": "polkadot-parachain", "is_relay": false } ] diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index 1fddb697de008..25ff7feb78fdc 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -456,3 +456,19 @@ zombienet-polkadot-functional-async-backing-6-seconds-rate: - unset NEXTEST_FAILURE_OUTPUT - unset NEXTEST_SUCCESS_OUTPUT - cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- functional::async_backing_6_seconds_rate::async_backing_6_seconds_rate_test + +zombienet-polkadot-functional-duplicate-collations: + extends: + - .zombienet-polkadot-common + needs: + - job: build-polkadot-zombienet-tests + artifacts: true + before_script: + - !reference [ ".zombienet-polkadot-common", "before_script" ] + - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}" + - export X_INFRA_INSTANCE=spot # use spot by default + script: + # we want to use `--no-capture` in zombienet tests. + - unset NEXTEST_FAILURE_OUTPUT + - unset NEXTEST_SUCCESS_OUTPUT + - cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- functional::duplicate_collations::duplicate_collations_test diff --git a/Cargo.lock b/Cargo.lock index 73f4e104de504..30aa07dd6e586 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -36,12 +36,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" -[[package]] -name = "adler2" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" - [[package]] name = "adler32" version = "1.2.0" @@ -118,9 +112,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +checksum = "6748e8def348ed4d14996fa801f4122cd763fff530258cdc03f64b25f89d3a5a" dependencies = [ "memchr", ] @@ -369,24 +363,23 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.18" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", - "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.10" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" +checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" [[package]] name = "anstyle-parse" @@ -408,12 +401,12 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "3.0.6" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" +checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" dependencies = [ "anstyle", - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] @@ -1685,7 +1678,7 @@ dependencies = [ "cc", "cfg-if", "libc", - "miniz_oxide 0.7.1", + "miniz_oxide", "object 0.32.2", "rustc-demangle", ] @@ -2647,7 +2640,6 @@ dependencies = [ "pallet-bridge-messages 0.7.0", "pallet-message-queue 31.0.0", "pallet-xcm 7.0.0", - "pallet-xcm-bridge-hub 0.2.0", "parachains-common 7.0.0", "parity-scale-codec", "rococo-system-emulated-network", @@ -2780,7 +2772,6 @@ dependencies = [ "bp-relayers 0.7.0", "bp-runtime 0.7.0", "bp-test-utils 0.7.0", - "bp-xcm-bridge-hub 0.2.0", "cumulus-pallet-parachain-system 0.7.0", "cumulus-pallet-xcmp-queue 0.7.0", "frame-support 28.0.0", @@ -2803,6 +2794,7 @@ dependencies = [ "sp-io 30.0.0", "sp-keyring 31.0.0", "sp-runtime 31.0.1", + "sp-std 14.0.0", "sp-tracing 16.0.0", "staging-xcm 7.0.0", "staging-xcm-builder 7.0.0", @@ -2886,7 +2878,6 @@ dependencies = [ "pallet-bridge-messages 0.7.0", "pallet-message-queue 31.0.0", "pallet-xcm 7.0.0", - "pallet-xcm-bridge-hub 0.2.0", "parachains-common 7.0.0", "parity-scale-codec", "rococo-westend-system-emulated-network", @@ -3084,12 +3075,12 @@ dependencies = [ [[package]] name = "bstr" -version = "1.11.3" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "531a9155a481e2ee699d4f98f43c0ca4ff8ee1bfd55c31e9e98fb29d2b176fe0" +checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05" dependencies = [ "memchr", - "regex-automata 0.4.8", + "regex-automata 0.3.6", "serde", ] @@ -3192,9 +3183,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.9" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" +checksum = "2cfa25e60aea747ec7e1124f238816749faa93759c6ff5b31f1ccdda137f4479" dependencies = [ "serde", ] @@ -3207,7 +3198,7 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" dependencies = [ "camino", "cargo-platform", - "semver 1.0.24", + "semver 1.0.18", "serde", "serde_json", "thiserror", @@ -3492,12 +3483,12 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.26" +version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8eb5e908ef3a6efbe1ed62520fb7287959888c88485abe072543190ecc66783" +checksum = "0fbb260a053428790f3de475e304ff84cdbc4face759ea7a3e64c1edd938a7fc" dependencies = [ "clap_builder", - "clap_derive 4.5.24", + "clap_derive 4.5.13", ] [[package]] @@ -3511,24 +3502,24 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.26" +version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b01801b5fc6a0a232407abc821660c9c6d25a1cafc0d4f85f29fb8d9afc121" +checksum = "64b17d7ea74e9f833c7dbf2cbe4fb12ff26783eda4782a8975b72f895c9b4d99" dependencies = [ "anstream", "anstyle", - "clap_lex 0.7.4", + "clap_lex 0.7.0", "strsim 0.11.1", "terminal_size", ] [[package]] name = "clap_complete" -version = "4.5.42" +version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33a7e468e750fa4b6be660e8b5651ad47372e8fb114030b594c2d75d48c5ffd0" +checksum = "aa3c596da3cf0983427b0df0dba359df9182c13bd5b519b585a482b0c351f4e8" dependencies = [ - "clap 4.5.26", + "clap 4.5.13", ] [[package]] @@ -3546,9 +3537,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.24" +version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" +checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" dependencies = [ "heck 0.5.0", "proc-macro2 1.0.86", @@ -3567,9 +3558,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "cmd_lib" @@ -3756,23 +3747,23 @@ dependencies = [ [[package]] name = "color-print" -version = "0.3.7" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3aa954171903797d5623e047d9ab69d91b493657917bdfb8c2c80ecaf9cdb6f4" +checksum = "f2a5e6504ed8648554968650feecea00557a3476bc040d0ffc33080e66b646d0" dependencies = [ "color-print-proc-macro", ] [[package]] name = "color-print-proc-macro" -version = "0.3.7" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692186b5ebe54007e45a59aea47ece9eb4108e141326c304cdc91699a7118a22" +checksum = "d51beaa537d73d2d1ff34ee70bc095f170420ab2ec5d687ecd3ec2b0d092514b" dependencies = [ "nom", "proc-macro2 1.0.86", "quote 1.0.37", - "syn 2.0.87", + "syn 1.0.109", ] [[package]] @@ -4437,7 +4428,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.26", + "clap 4.5.13", "criterion-plot", "futures", "is-terminal", @@ -4582,7 +4573,7 @@ dependencies = [ name = "cumulus-client-cli" version = "0.7.0" dependencies = [ - "clap 4.5.26", + "clap 4.5.13", "parity-scale-codec", "sc-chain-spec", "sc-cli", @@ -5112,7 +5103,7 @@ version = "1.0.0" dependencies = [ "cumulus-primitives-proof-size-hostfunction 0.2.0", "cumulus-primitives-storage-weight-reclaim 1.0.0", - "derivative", + "derive-where", "docify", "frame-benchmarking 28.0.0", "frame-support 28.0.0", @@ -5242,7 +5233,7 @@ name = "cumulus-pov-validator" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.26", + "clap 4.5.13", "parity-scale-codec", "polkadot-node-primitives", "polkadot-parachain-primitives 6.0.0", @@ -5680,7 +5671,7 @@ name = "cumulus-test-service" version = "0.1.0" dependencies = [ "async-trait", - "clap 4.5.26", + "clap 4.5.13", "criterion", "cumulus-client-cli", "cumulus-client-collator", @@ -5770,9 +5761,9 @@ dependencies = [ [[package]] name = "curl-sys" -version = "0.4.78+curl-8.11.0" +version = "0.4.72+curl-8.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eec768341c5c7789611ae51cf6c459099f22e64a5d5d0ce4892434e33821eaf" +checksum = "29cbdc8314c447d11e8fd156dcdd031d9e02a7a976163e396b548c03153bc9ea" dependencies = [ "cc", "libc", @@ -6923,14 +6914,14 @@ dependencies = [ [[package]] name = "filetime" -version = "0.2.25" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" +checksum = "d4029edd3e734da6fe05b6cd7bd2960760a616bd2ddd0d59a0124746d6272af0" dependencies = [ "cfg-if", "libc", - "libredox", - "windows-sys 0.59.0", + "redox_syscall 0.3.5", + "windows-sys 0.48.0", ] [[package]] @@ -7007,12 +6998,12 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.35" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" +checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" dependencies = [ "crc32fast", - "miniz_oxide 0.8.2", + "miniz_oxide", ] [[package]] @@ -7165,7 +7156,7 @@ dependencies = [ "Inflector", "array-bytes", "chrono", - "clap 4.5.26", + "clap 4.5.13", "comfy-table", "cumulus-client-parachain-inherent", "cumulus-primitives-proof-size-hostfunction 0.2.0", @@ -7331,7 +7322,7 @@ dependencies = [ name = "frame-election-solution-type-fuzzer" version = "2.0.0-alpha.5" dependencies = [ - "clap 4.5.26", + "clap 4.5.13", "frame-election-provider-solution-type 13.0.0", "frame-election-provider-support 28.0.0", "frame-support 28.0.0", @@ -7464,7 +7455,7 @@ name = "frame-omni-bencher" version = "0.1.0" dependencies = [ "assert_cmd", - "clap 4.5.26", + "clap 4.5.13", "cumulus-primitives-proof-size-hostfunction 0.2.0", "cumulus-test-runtime", "frame-benchmarking-cli", @@ -8715,7 +8706,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2 0.5.7", "tokio", "tower-service", "tracing", @@ -9179,12 +9170,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "is_terminal_polyfill" -version = "1.70.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" - [[package]] name = "isahc" version = "1.7.2" @@ -10276,17 +10261,6 @@ dependencies = [ "yamux 0.13.3", ] -[[package]] -name = "libredox" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" -dependencies = [ - "bitflags 2.6.0", - "libc", - "redox_syscall 0.5.8", -] - [[package]] name = "librocksdb-sys" version = "0.11.0+8.1.1" @@ -10363,9 +10337,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.21" +version = "1.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" +checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" dependencies = [ "cc", "libc", @@ -10834,7 +10808,7 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" name = "minimal-template-node" version = "0.0.0" dependencies = [ - "clap 4.5.26", + "clap 4.5.13", "docify", "futures", "futures-timer", @@ -10864,15 +10838,6 @@ dependencies = [ "adler", ] -[[package]] -name = "miniz_oxide" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ffbe83022cedc1d264172192511ae958937694cd57ce297164951b8b3568394" -dependencies = [ - "adler2", -] - [[package]] name = "mio" version = "1.0.2" @@ -11350,7 +11315,7 @@ version = "0.9.0-dev" dependencies = [ "array-bytes", "async-trait", - "clap 4.5.26", + "clap 4.5.13", "derive_more 0.99.17", "fs_extra", "futures", @@ -11426,7 +11391,7 @@ dependencies = [ name = "node-runtime-generate-bags" version = "3.0.0" dependencies = [ - "clap 4.5.26", + "clap 4.5.13", "generate-bags", "kitchensink-runtime", ] @@ -11435,7 +11400,7 @@ dependencies = [ name = "node-template-release" version = "3.0.0" dependencies = [ - "clap 4.5.26", + "clap 4.5.13", "flate2", "fs_extra", "glob", @@ -13635,6 +13600,24 @@ dependencies = [ "sp-runtime 31.0.1", ] +[[package]] +name = "pallet-example-view-functions" +version = "1.0.0" +dependencies = [ + "frame-benchmarking 28.0.0", + "frame-metadata 18.0.0", + "frame-support 28.0.0", + "frame-system 28.0.0", + "log", + "parity-scale-codec", + "pretty_assertions", + "scale-info", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-metadata-ir 0.6.0", + "sp-runtime 31.0.1", +] + [[package]] name = "pallet-examples" version = "4.0.0-dev" @@ -13649,6 +13632,7 @@ dependencies = [ "pallet-example-single-block-migrations", "pallet-example-split", "pallet-example-tasks", + "pallet-example-view-functions", ] [[package]] @@ -14850,6 +14834,9 @@ dependencies = [ "serde_json", "sp-api 26.0.0", "sp-arithmetic 23.0.0", + "sp-consensus-aura 0.32.0", + "sp-consensus-babe 0.32.0", + "sp-consensus-slots 0.32.0", "sp-core 28.0.0", "sp-io 30.0.0", "sp-keystore 0.34.0", @@ -14896,7 +14883,7 @@ name = "pallet-revive-eth-rpc" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.5.26", + "clap 4.5.13", "env_logger 0.11.3", "ethabi", "futures", @@ -16239,7 +16226,7 @@ dependencies = [ name = "parachain-template-node" version = "0.0.0" dependencies = [ - "clap 4.5.26", + "clap 4.5.13", "color-print", "docify", "futures", @@ -16417,7 +16404,7 @@ checksum = "4e69bf016dc406eff7d53a7d3f7cf1c2e72c82b9088aac1118591e36dd2cd3e9" dependencies = [ "bitcoin_hashes 0.13.0", "rand", - "rand_core 0.5.1", + "rand_core 0.6.4", "serde", "unicode-normalization", ] @@ -17227,7 +17214,7 @@ name = "polkadot-cli" version = "7.0.0" dependencies = [ "cfg-if", - "clap 4.5.26", + "clap 4.5.13", "frame-benchmarking-cli", "futures", "log", @@ -18096,7 +18083,7 @@ version = "0.1.0" dependencies = [ "assert_cmd", "async-trait", - "clap 4.5.26", + "clap 4.5.13", "color-print", "cumulus-client-cli", "cumulus-client-collator", @@ -19612,7 +19599,7 @@ dependencies = [ "async-trait", "bincode", "bitvec", - "clap 4.5.26", + "clap 4.5.13", "clap-num", "color-eyre", "colored", @@ -19714,7 +19701,7 @@ version = "1.0.0" dependencies = [ "assert_matches", "async-trait", - "clap 4.5.26", + "clap 4.5.13", "color-eyre", "futures", "futures-timer", @@ -19854,7 +19841,7 @@ dependencies = [ name = "polkadot-voter-bags" version = "7.0.0" dependencies = [ - "clap 4.5.26", + "clap 4.5.13", "generate-bags", "sp-io 30.0.0", "westend-runtime", @@ -20670,7 +20657,7 @@ checksum = "f8650aabb6c35b860610e9cff5dc1af886c9e25073b7b1712a68972af4281302" dependencies = [ "bytes", "heck 0.5.0", - "itertools 0.12.1", + "itertools 0.13.0", "log", "multimap", "once_cell", @@ -20716,7 +20703,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acf0c195eebb4af52c752bec4f52f645da98b6e92077a04110c7f349477ae5ac" dependencies = [ "anyhow", - "itertools 0.12.1", + "itertools 0.13.0", "proc-macro2 1.0.86", "quote 1.0.37", "syn 2.0.87", @@ -21178,6 +21165,12 @@ dependencies = [ "regex-syntax 0.6.29", ] +[[package]] +name = "regex-automata" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" + [[package]] name = "regex-automata" version = "0.4.8" @@ -21276,7 +21269,7 @@ dependencies = [ name = "remote-ext-tests-bags-list" version = "1.0.0" dependencies = [ - "clap 4.5.26", + "clap 4.5.13", "frame-system 28.0.0", "log", "pallet-bags-list-remote-tests", @@ -21885,7 +21878,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.24", + "semver 1.0.18", ] [[package]] @@ -22304,7 +22297,7 @@ name = "sc-chain-spec" version = "28.0.0" dependencies = [ "array-bytes", - "clap 4.5.26", + "clap 4.5.13", "docify", "log", "memmap2 0.9.3", @@ -22347,7 +22340,7 @@ version = "0.36.0" dependencies = [ "array-bytes", "chrono", - "clap 4.5.26", + "clap 4.5.13", "fdlimit", "futures", "futures-timer", @@ -23687,7 +23680,7 @@ dependencies = [ name = "sc-storage-monitor" version = "0.16.0" dependencies = [ - "clap 4.5.26", + "clap 4.5.13", "fs4", "log", "sp-core 28.0.0", @@ -23826,6 +23819,7 @@ dependencies = [ "thiserror", "tokio", "tokio-stream", + "tracing", ] [[package]] @@ -24254,9 +24248,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.24" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" +checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" dependencies = [ "serde", ] @@ -25558,7 +25552,7 @@ dependencies = [ name = "solochain-template-node" version = "0.0.0" dependencies = [ - "clap 4.5.26", + "clap 4.5.13", "frame-benchmarking-cli", "frame-metadata-hash-extension 0.1.0", "frame-system 28.0.0", @@ -26999,7 +26993,7 @@ dependencies = [ name = "sp-npos-elections-fuzzer" version = "2.0.0-alpha.5" dependencies = [ - "clap 4.5.26", + "clap 4.5.13", "honggfuzz", "rand", "sp-npos-elections 26.0.0", @@ -28256,7 +28250,7 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" name = "staging-chain-spec-builder" version = "1.6.1" dependencies = [ - "clap 4.5.26", + "clap 4.5.13", "cmd_lib", "docify", "log", @@ -28273,7 +28267,7 @@ version = "3.0.0-dev" dependencies = [ "array-bytes", "assert_cmd", - "clap 4.5.26", + "clap 4.5.13", "clap_complete", "criterion", "futures", @@ -28309,7 +28303,7 @@ dependencies = [ name = "staging-node-inspect" version = "0.12.0" dependencies = [ - "clap 4.5.26", + "clap 4.5.13", "parity-scale-codec", "sc-cli", "sc-client-api", @@ -28358,7 +28352,7 @@ version = "7.0.0" dependencies = [ "array-bytes", "bounded-collections", - "derivative", + "derive-where", "environmental", "frame-support 28.0.0", "hex", @@ -28653,7 +28647,7 @@ dependencies = [ name = "subkey" version = "9.0.0" dependencies = [ - "clap 4.5.26", + "clap 4.5.13", "sc-cli", ] @@ -29072,7 +29066,7 @@ dependencies = [ "rand", "reqwest 0.12.9", "scale-info", - "semver 1.0.24", + "semver 1.0.18", "serde", "serde_json", "sp-version 35.0.0", @@ -29086,9 +29080,9 @@ dependencies = [ [[package]] name = "subxt" -version = "0.38.1" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c17d7ec2359d33133b63c97e28c8b7cd3f0a5bc6ce567ae3aef9d9e85be3433" +checksum = "c53029d133e4e0cb7933f1fe06f2c68804b956de9bb8fa930ffca44e9e5e4230" dependencies = [ "async-trait", "derive-where", @@ -29485,9 +29479,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tar" -version = "0.4.43" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c65998313f8e17d0d553d28f91a0df93e4dbbbf770279c7bc21ca0f09ea1a1f6" +checksum = "b16afcea1f22891c49a00c751c7b63b2233284064f11a200fc624137c51e2ddb" dependencies = [ "filetime", "libc", @@ -29535,12 +29529,12 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.4.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5352447f921fda68cf61b4101566c0bdb5104eff6804d0678e5227580ab6a4e9" +checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" dependencies = [ "rustix 0.38.42", - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] @@ -29587,7 +29581,7 @@ dependencies = [ name = "test-parachain-adder-collator" version = "1.0.0" dependencies = [ - "clap 4.5.26", + "clap 4.5.13", "futures", "futures-timer", "log", @@ -29624,6 +29618,7 @@ dependencies = [ "log", "parity-scale-codec", "polkadot-parachain-primitives 6.0.0", + "polkadot-primitives 7.0.0", "sp-io 30.0.0", "substrate-wasm-builder 17.0.0", "tiny-keccak", @@ -29633,12 +29628,13 @@ dependencies = [ name = "test-parachain-undying-collator" version = "1.0.0" dependencies = [ - "clap 4.5.26", + "clap 4.5.13", "futures", "futures-timer", "log", "parity-scale-codec", "polkadot-cli", + "polkadot-erasure-coding", "polkadot-node-core-pvf", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -29647,6 +29643,7 @@ dependencies = [ "polkadot-service", "polkadot-test-service", "sc-cli", + "sc-client-api", "sc-service", "sp-core 28.0.0", "sp-keyring 31.0.0", @@ -31902,13 +31899,11 @@ dependencies = [ [[package]] name = "xattr" -version = "1.4.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e105d177a3871454f754b33bb0ee637ecaaac997446375fd3e5d43a2ed00c909" +checksum = "f4686009f71ff3e5c4dbcf1a282d0a44db3f021ba69350cd42086b3e5f1c6985" dependencies = [ "libc", - "linux-raw-sys 0.4.14", - "rustix 0.38.42", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 7a906e7c0d64f..0d415fe4fdbd4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -363,6 +363,7 @@ members = [ "substrate/frame/examples/single-block-migrations", "substrate/frame/examples/split", "substrate/frame/examples/tasks", + "substrate/frame/examples/view-functions", "substrate/frame/executive", "substrate/frame/fast-unstake", "substrate/frame/glutton", @@ -739,8 +740,8 @@ cumulus-test-relay-sproof-builder = { path = "cumulus/test/relay-sproof-builder" cumulus-test-runtime = { path = "cumulus/test/runtime" } cumulus-test-service = { path = "cumulus/test/service" } curve25519-dalek = { version = "4.1.3" } -derivative = { version = "2.2.0", default-features = false } derive-syn-parse = { version = "0.2.0" } +derive-where = { version = "1.2.7" } derive_more = { version = "0.99.17", default-features = false } digest = { version = "0.10.3", default-features = false } directories = { version = "5.0.1" } @@ -941,6 +942,7 @@ pallet-example-offchain-worker = { path = "substrate/frame/examples/offchain-wor pallet-example-single-block-migrations = { path = "substrate/frame/examples/single-block-migrations", default-features = false } pallet-example-split = { path = "substrate/frame/examples/split", default-features = false } pallet-example-tasks = { path = "substrate/frame/examples/tasks", default-features = false } +pallet-example-view-functions = { path = "substrate/frame/examples/view-functions", default-features = false } pallet-examples = { path = "substrate/frame/examples" } pallet-fast-unstake = { path = "substrate/frame/fast-unstake", default-features = false } pallet-glutton = { path = "substrate/frame/glutton", default-features = false } diff --git a/bridges/modules/xcm-bridge-hub/src/lib.rs b/bridges/modules/xcm-bridge-hub/src/lib.rs index 682db811efa77..1633e99d7f303 100644 --- a/bridges/modules/xcm-bridge-hub/src/lib.rs +++ b/bridges/modules/xcm-bridge-hub/src/lib.rs @@ -145,8 +145,10 @@ use bp_messages::{LaneState, MessageNonce}; use bp_runtime::{AccountIdOf, BalanceOf, RangeInclusiveExt}; -pub use bp_xcm_bridge_hub::{Bridge, BridgeId, BridgeState, LocalXcmChannelManager}; -use bp_xcm_bridge_hub::{BridgeLocations, BridgeLocationsError}; +use bp_xcm_bridge_hub::BridgeLocationsError; +pub use bp_xcm_bridge_hub::{ + Bridge, BridgeId, BridgeLocations, BridgeState, LocalXcmChannelManager, +}; use frame_support::{traits::fungible::MutateHold, DefaultNoBound}; use frame_system::Config as SystemConfig; use pallet_bridge_messages::{Config as BridgeMessagesConfig, LanesManagerError}; diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index fa754ea29ccf5..624f91e7fdfb3 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -309,8 +309,12 @@ pub mod pallet { >::kill(); let relay_upgrade_go_ahead = >::take(); - let vfp = >::get() - .expect("set_validation_data inherent needs to be present in every block!"); + let vfp = >::get().expect( + r"Missing required set_validation_data inherent. This inherent must be + present in every block. This error typically occurs when the set_validation_data + execution failed and was rejected by the block builder. Check earlier log entries + for the specific cause of the failure.", + ); LastRelayChainBlockNumber::::put(vfp.relay_parent_number); diff --git a/cumulus/pallets/session-benchmarking/src/inner.rs b/cumulus/pallets/session-benchmarking/src/inner.rs index 8d5954304878d..6c5188921362e 100644 --- a/cumulus/pallets/session-benchmarking/src/inner.rs +++ b/cumulus/pallets/session-benchmarking/src/inner.rs @@ -14,29 +14,49 @@ // limitations under the License. //! Benchmarking setup for pallet-session. +#![cfg(feature = "runtime-benchmarks")] use alloc::{vec, vec::Vec}; use codec::Decode; -use frame_benchmarking::{benchmarks, whitelisted_caller}; +use frame_benchmarking::v2::*; use frame_system::RawOrigin; use pallet_session::*; pub struct Pallet(pallet_session::Pallet); pub trait Config: pallet_session::Config {} -benchmarks! { - set_keys { +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn set_keys() -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); frame_system::Pallet::::inc_providers(&caller); let keys = T::Keys::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()).unwrap(); - let proof: Vec = vec![0,1,2,3]; - }: _(RawOrigin::Signed(caller), keys, proof) + let proof: Vec = vec![0, 1, 2, 3]; + + #[extrinsic_call] + _(RawOrigin::Signed(caller), keys, proof); + + Ok(()) + } - purge_keys { + #[benchmark] + fn purge_keys() -> Result<(), BenchmarkError> { let caller: T::AccountId = whitelisted_caller(); frame_system::Pallet::::inc_providers(&caller); let keys = T::Keys::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()).unwrap(); - let proof: Vec = vec![0,1,2,3]; - let _t = pallet_session::Pallet::::set_keys(RawOrigin::Signed(caller.clone()).into(), keys, proof); - }: _(RawOrigin::Signed(caller)) + let proof: Vec = vec![0, 1, 2, 3]; + let _t = pallet_session::Pallet::::set_keys( + RawOrigin::Signed(caller.clone()).into(), + keys, + proof, + ); + + #[extrinsic_call] + _(RawOrigin::Signed(caller)); + + Ok(()) + } } diff --git a/cumulus/pallets/weight-reclaim/Cargo.toml b/cumulus/pallets/weight-reclaim/Cargo.toml index 8bde6abaff6a1..d412a9b105d98 100644 --- a/cumulus/pallets/weight-reclaim/Cargo.toml +++ b/cumulus/pallets/weight-reclaim/Cargo.toml @@ -27,7 +27,7 @@ frame-system = { workspace = true } # Other dependencies codec = { features = ["derive"], workspace = true } -derivative = { features = ["use_core"], workspace = true } +derive-where = { workspace = true } docify = { workspace = true } log = { workspace = true, default-features = true } scale-info = { features = ["derive"], workspace = true } diff --git a/cumulus/pallets/weight-reclaim/src/lib.rs b/cumulus/pallets/weight-reclaim/src/lib.rs index bd9929033af14..7bbd2cf29d831 100644 --- a/cumulus/pallets/weight-reclaim/src/lib.rs +++ b/cumulus/pallets/weight-reclaim/src/lib.rs @@ -29,7 +29,7 @@ extern crate alloc; use alloc::vec::Vec; use codec::{Decode, Encode}; use cumulus_primitives_storage_weight_reclaim::get_proof_size; -use derivative::Derivative; +use derive_where::derive_where; use frame_support::{ dispatch::{DispatchInfo, PostDispatchInfo}, pallet_prelude::Weight, @@ -83,13 +83,8 @@ pub mod pallet { /// calculates the unused weight using the post information and reclaim the unused weight. /// So this extension can be used as a drop-in replacement for `WeightReclaim` extension for /// parachains. -#[derive(Encode, Decode, TypeInfo, Derivative)] -#[derivative( - Clone(bound = "S: Clone"), - Eq(bound = "S: Eq"), - PartialEq(bound = "S: PartialEq"), - Default(bound = "S: Default") -)] +#[derive(Encode, Decode, TypeInfo)] +#[derive_where(Clone, Eq, PartialEq, Default; S)] #[scale_info(skip_type_params(T))] pub struct StorageWeightReclaim(pub S, core::marker::PhantomData); diff --git a/cumulus/pallets/weight-reclaim/src/tests.rs b/cumulus/pallets/weight-reclaim/src/tests.rs index b87c107c7ec71..ce647445b3327 100644 --- a/cumulus/pallets/weight-reclaim/src/tests.rs +++ b/cumulus/pallets/weight-reclaim/src/tests.rs @@ -89,7 +89,8 @@ mod runtime { RuntimeHoldReason, RuntimeSlashReason, RuntimeLockId, - RuntimeTask + RuntimeTask, + RuntimeViewFunction )] pub struct Test; diff --git a/cumulus/pallets/xcmp-queue/src/bridging.rs b/cumulus/pallets/xcmp-queue/src/bridging.rs index 8ed11505a27a9..355691a41659d 100644 --- a/cumulus/pallets/xcmp-queue/src/bridging.rs +++ b/cumulus/pallets/xcmp-queue/src/bridging.rs @@ -45,12 +45,11 @@ impl bp_xcm_bridge_hub_router::XcmChannelStatusProvider } } -/// Adapter implementation for `bp_xcm_bridge_hub_router::XcmChannelStatusProvider` which checks -/// only `OutboundXcmpStatus` for defined `SiblingParaId` if is suspended. +/// Adapter implementation for `bp_xcm_bridge::ChannelStatusProvider` and/or +/// `bp_xcm_bridge_hub_router::XcmChannelStatusProvider` which checks only `OutboundXcmpStatus` +/// for defined `Location` if is suspended. pub struct OutXcmpChannelStatusProvider(core::marker::PhantomData); -impl bp_xcm_bridge_hub_router::XcmChannelStatusProvider - for OutXcmpChannelStatusProvider -{ +impl OutXcmpChannelStatusProvider { fn is_congested(with: &Location) -> bool { // handle congestion only for a sibling parachain locations. let sibling_para_id: ParaId = match with.unpack() { @@ -88,6 +87,14 @@ impl bp_xcm_bridge_hub_router::XcmChannelStatusProvider } } +impl bp_xcm_bridge_hub_router::XcmChannelStatusProvider + for OutXcmpChannelStatusProvider +{ + fn is_congested(with: &Location) -> bool { + Self::is_congested(with) + } +} + #[cfg(feature = "runtime-benchmarks")] pub fn suspend_channel_for_benchmarks(target: ParaId) { pallet::Pallet::::suspend_channel(target) diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml index 7bb7277df45c0..35ceffe4c6953 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml @@ -32,7 +32,6 @@ xcm-runtime-apis = { workspace = true } # Bridges pallet-bridge-messages = { workspace = true } -pallet-xcm-bridge-hub = { workspace = true } # Cumulus cumulus-pallet-xcmp-queue = { workspace = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml index dc3bbb269d70e..f718e7e77f597 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml @@ -33,7 +33,6 @@ xcm-runtime-apis = { workspace = true } # Bridges pallet-bridge-messages = { workspace = true } -pallet-xcm-bridge-hub = { workspace = true } # Cumulus asset-hub-westend-runtime = { workspace = true } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index ecbe1fb0e62af..3a7e3ef131c3f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1081,6 +1081,7 @@ impl pallet_revive::Config for Runtime { type ChainId = ConstU64<420_420_421>; type NativeToEthRatio = ConstU32<1_000_000>; // 10^(18 - 12) Eth is 10^18, Native is 10^12. type EthGasEncoder = (); + type FindAuthor = ::FindAuthor; } impl TryFrom for pallet_revive::Call { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 3824a4e9a7cb3..36b565bdca1c9 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -394,10 +394,14 @@ parameter_types! { impl pallet_message_queue::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = weights::pallet_message_queue::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] + // Use the NoopMessageProcessor exclusively for benchmarks, not for tests with the + // runtime-benchmarks feature as tests require the BridgeHubMessageRouter to process messages. + // The "test" feature flag doesn't work, hence the reliance on the "std" feature, which is + // enabled during tests. + #[cfg(all(not(feature = "std"), feature = "runtime-benchmarks"))] type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor; - #[cfg(not(feature = "runtime-benchmarks"))] + #[cfg(any(feature = "std", not(feature = "runtime-benchmarks")))] type MessageProcessor = bridge_hub_common::BridgeHubMessageRouter< xcm_builder::ProcessXcmMessage< AggregateMessageOrigin, diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml index 132e42deea4a0..dc390d48cc777 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml @@ -26,6 +26,7 @@ sp-core = { workspace = true } sp-io = { workspace = true } sp-keyring = { workspace = true, default-features = true } sp-runtime = { workspace = true } +sp-std = { workspace = true } sp-tracing = { workspace = true, default-features = true } # Cumulus @@ -49,7 +50,6 @@ bp-polkadot-core = { workspace = true } bp-relayers = { workspace = true } bp-runtime = { workspace = true } bp-test-utils = { workspace = true } -bp-xcm-bridge-hub = { workspace = true } pallet-bridge-grandpa = { workspace = true } pallet-bridge-messages = { features = ["test-helpers"], workspace = true } pallet-bridge-parachains = { workspace = true } @@ -67,7 +67,6 @@ std = [ "bp-relayers/std", "bp-runtime/std", "bp-test-utils/std", - "bp-xcm-bridge-hub/std", "codec/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-xcmp-queue/std", @@ -88,6 +87,7 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", + "sp-std/std", "xcm-builder/std", "xcm-executor/std", "xcm/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/lib.rs index bc28df0eb829c..240aac6c40635 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/lib.rs @@ -24,7 +24,7 @@ extern crate alloc; pub use bp_test_utils::test_header; pub use parachains_runtimes_test_utils::*; use sp_runtime::Perbill; -pub use test_cases::helpers::{ +pub use test_cases::helpers::for_pallet_xcm_bridge_hub::{ ensure_opened_bridge, open_bridge_with_extrinsic, open_bridge_with_storage, }; diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs index 358c184c815d9..4a7975b2d9f28 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs @@ -20,13 +20,13 @@ use crate::{ test_cases::{bridges_prelude::*, helpers, run_test}, test_data, + test_data::XcmAsPlainPayload, }; use alloc::{boxed::Box, vec}; use bp_header_chain::ChainWithGrandpa; use bp_messages::UnrewardedRelayersState; use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; -use bp_xcm_bridge_hub::XcmAsPlainPayload; use frame_support::traits::{OnFinalize, OnInitialize}; use frame_system::pallet_prelude::BlockNumberFor; use pallet_bridge_messages::{BridgedChainOf, LaneIdOf, ThisChainOf}; diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs index d8fff55b4b50a..7e87d703888a3 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs @@ -20,6 +20,7 @@ use crate::{ test_cases::{bridges_prelude::*, helpers, run_test}, test_data, + test_data::XcmAsPlainPayload, }; use alloc::{boxed::Box, vec}; @@ -28,7 +29,6 @@ use bp_messages::UnrewardedRelayersState; use bp_polkadot_core::parachains::ParaHash; use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; use bp_runtime::{Chain, Parachain}; -use bp_xcm_bridge_hub::XcmAsPlainPayload; use frame_support::traits::{OnFinalize, OnInitialize}; use frame_system::pallet_prelude::BlockNumberFor; use pallet_bridge_messages::{BridgedChainOf, LaneIdOf, ThisChainOf}; diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs index a99bda5bfdf47..505babdb64155 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs @@ -23,7 +23,6 @@ use bp_messages::MessageNonce; use bp_polkadot_core::parachains::{ParaHash, ParaId}; use bp_relayers::RewardsAccountParams; use bp_runtime::Chain; -use bp_xcm_bridge_hub::BridgeLocations; use codec::Decode; use core::marker::PhantomData; use frame_support::{ @@ -388,203 +387,210 @@ fn execute_and_verify_calls( } } -/// Helper function to open the bridge/lane for `source` and `destination` while ensuring all -/// required balances are placed into the SA of the source. -pub fn ensure_opened_bridge< - Runtime, - XcmOverBridgePalletInstance, - LocationToAccountId, - TokenLocation> -(source: Location, destination: InteriorLocation, is_paid_xcm_execution: bool, bridge_opener: impl Fn(BridgeLocations, Option)) -> (BridgeLocations, pallet_xcm_bridge_hub::LaneIdOf) -where - Runtime: BasicParachainRuntime + BridgeXcmOverBridgeConfig, - XcmOverBridgePalletInstance: 'static, - ::RuntimeCall: GetDispatchInfo + From>, - ::Balance: From<<>::BridgeMessagesPalletInstance>>::ThisChain as bp_runtime::Chain>::Balance>, - ::Balance: From, - LocationToAccountId: ConvertLocation>, -TokenLocation: Get{ - // construct expected bridge configuration - let locations = - pallet_xcm_bridge_hub::Pallet::::bridge_locations( - source.clone().into(), - destination.clone().into(), +pub(crate) mod for_pallet_xcm_bridge_hub { + use super::{super::for_pallet_xcm_bridge_hub::*, *}; + + /// Helper function to open the bridge/lane for `source` and `destination` while ensuring all + /// required balances are placed into the SA of the source. + pub fn ensure_opened_bridge< + Runtime, + XcmOverBridgePalletInstance, + LocationToAccountId, + TokenLocation> + (source: Location, destination: InteriorLocation, is_paid_xcm_execution: bool, bridge_opener: impl Fn(pallet_xcm_bridge_hub::BridgeLocations, Option)) -> (pallet_xcm_bridge_hub::BridgeLocations, pallet_xcm_bridge_hub::LaneIdOf) + where + Runtime: BasicParachainRuntime + BridgeXcmOverBridgeConfig, + XcmOverBridgePalletInstance: 'static, + ::RuntimeCall: GetDispatchInfo + From>, + ::Balance: From<<>::BridgeMessagesPalletInstance>>::ThisChain as bp_runtime::Chain>::Balance>, + ::Balance: From, + LocationToAccountId: ConvertLocation>, + TokenLocation: Get + { + // construct expected bridge configuration + let locations = + pallet_xcm_bridge_hub::Pallet::::bridge_locations( + source.clone().into(), + destination.clone().into(), + ) + .expect("valid bridge locations"); + assert!(pallet_xcm_bridge_hub::Bridges::::get( + locations.bridge_id() ) - .expect("valid bridge locations"); - assert!(pallet_xcm_bridge_hub::Bridges::::get( - locations.bridge_id() - ) - .is_none()); + .is_none()); + + // SA of source location needs to have some required balance + if !>::AllowWithoutBridgeDeposit::contains(&source) { + // required balance: ED + fee + BridgeDeposit + let bridge_deposit = + >::BridgeDeposit::get(); + let balance_needed = ::ExistentialDeposit::get() + bridge_deposit.into(); + + let source_account_id = LocationToAccountId::convert_location(&source).expect("valid location"); + let _ = >::mint_into(&source_account_id, balance_needed) + .expect("mint_into passes"); + }; + + let maybe_paid_execution = if is_paid_xcm_execution { + // random high enough value for `BuyExecution` fees + let buy_execution_fee_amount = 5_000_000_000_000_u128; + let buy_execution_fee = (TokenLocation::get(), buy_execution_fee_amount).into(); + + let balance_needed = ::ExistentialDeposit::get() + + buy_execution_fee_amount.into(); + let source_account_id = + LocationToAccountId::convert_location(&source).expect("valid location"); + let _ = + >::mint_into(&source_account_id, balance_needed) + .expect("mint_into passes"); + Some(buy_execution_fee) + } else { + None + }; + + // call the bridge opener + bridge_opener(*locations.clone(), maybe_paid_execution); + + // check opened bridge + let bridge = pallet_xcm_bridge_hub::Bridges::::get( + locations.bridge_id(), + ) + .expect("opened bridge"); - // SA of source location needs to have some required balance - if !>::AllowWithoutBridgeDeposit::contains(&source) { - // required balance: ED + fee + BridgeDeposit - let bridge_deposit = - >::BridgeDeposit::get( - ); - let balance_needed = ::ExistentialDeposit::get() + bridge_deposit.into(); - - let source_account_id = LocationToAccountId::convert_location(&source).expect("valid location"); - let _ = >::mint_into(&source_account_id, balance_needed) - .expect("mint_into passes"); - }; - - let maybe_paid_execution = if is_paid_xcm_execution { - // random high enough value for `BuyExecution` fees - let buy_execution_fee_amount = 5_000_000_000_000_u128; - let buy_execution_fee = (TokenLocation::get(), buy_execution_fee_amount).into(); - - let balance_needed = ::ExistentialDeposit::get() + - buy_execution_fee_amount.into(); - let source_account_id = - LocationToAccountId::convert_location(&source).expect("valid location"); - let _ = >::mint_into(&source_account_id, balance_needed) - .expect("mint_into passes"); - Some(buy_execution_fee) - } else { - None - }; - - // call the bridge opener - bridge_opener(*locations.clone(), maybe_paid_execution); - - // check opened bridge - let bridge = pallet_xcm_bridge_hub::Bridges::::get( - locations.bridge_id(), - ) - .expect("opened bridge"); + // check state + assert_ok!( + pallet_xcm_bridge_hub::Pallet::::do_try_state() + ); - // check state - assert_ok!( - pallet_xcm_bridge_hub::Pallet::::do_try_state() - ); + // return locations + (*locations, bridge.lane_id) + } - // return locations - (*locations, bridge.lane_id) -} + /// Utility for opening bridge with dedicated `pallet_xcm_bridge_hub`'s extrinsic. + pub fn open_bridge_with_extrinsic( + (origin, origin_kind): (Location, OriginKind), + bridge_destination_universal_location: InteriorLocation, + maybe_paid_execution: Option, + ) where + Runtime: frame_system::Config + + pallet_xcm_bridge_hub::Config + + cumulus_pallet_parachain_system::Config + + pallet_xcm::Config, + XcmOverBridgePalletInstance: 'static, + ::RuntimeCall: + GetDispatchInfo + From>, + { + // open bridge with `Transact` call + let open_bridge_call = RuntimeCallOf::::from(BridgeXcmOverBridgeCall::< + Runtime, + XcmOverBridgePalletInstance, + >::open_bridge { + bridge_destination_universal_location: Box::new( + bridge_destination_universal_location.clone().into(), + ), + }); + + // execute XCM as source origin would do with `Transact -> Origin::Xcm` + assert_ok!(RuntimeHelper::::execute_as_origin( + (origin, origin_kind), + open_bridge_call, + maybe_paid_execution + ) + .ensure_complete()); + } -/// Utility for opening bridge with dedicated `pallet_xcm_bridge_hub`'s extrinsic. -pub fn open_bridge_with_extrinsic( - (origin, origin_kind): (Location, OriginKind), - bridge_destination_universal_location: InteriorLocation, - maybe_paid_execution: Option, -) where - Runtime: frame_system::Config - + pallet_xcm_bridge_hub::Config - + cumulus_pallet_parachain_system::Config - + pallet_xcm::Config, - XcmOverBridgePalletInstance: 'static, - ::RuntimeCall: - GetDispatchInfo + From>, -{ - // open bridge with `Transact` call - let open_bridge_call = RuntimeCallOf::::from(BridgeXcmOverBridgeCall::< - Runtime, - XcmOverBridgePalletInstance, - >::open_bridge { - bridge_destination_universal_location: Box::new( - bridge_destination_universal_location.clone().into(), - ), - }); - - // execute XCM as source origin would do with `Transact -> Origin::Xcm` - assert_ok!(RuntimeHelper::::execute_as_origin( - (origin, origin_kind), - open_bridge_call, - maybe_paid_execution - ) - .ensure_complete()); -} + /// Utility for opening bridge directly inserting data to the `pallet_xcm_bridge_hub`'s storage + /// (used only for legacy purposes). + pub fn open_bridge_with_storage( + locations: pallet_xcm_bridge_hub::BridgeLocations, + lane_id: pallet_xcm_bridge_hub::LaneIdOf, + ) where + Runtime: pallet_xcm_bridge_hub::Config, + XcmOverBridgePalletInstance: 'static, + { + // insert bridge data directly to the storage + assert_ok!( + pallet_xcm_bridge_hub::Pallet::::do_open_bridge( + Box::new(locations), + lane_id, + true + ) + ); + } -/// Utility for opening bridge directly inserting data to the storage (used only for legacy -/// purposes). -pub fn open_bridge_with_storage( - locations: BridgeLocations, - lane_id: pallet_xcm_bridge_hub::LaneIdOf, -) where - Runtime: pallet_xcm_bridge_hub::Config, - XcmOverBridgePalletInstance: 'static, -{ - // insert bridge data directly to the storage - assert_ok!( - pallet_xcm_bridge_hub::Pallet::::do_open_bridge( - Box::new(locations), - lane_id, - true + /// Helper function to close the bridge/lane for `source` and `destination`. + pub fn close_bridge( + expected_source: Location, + bridge_destination_universal_location: InteriorLocation, + (origin, origin_kind): (Location, OriginKind), + is_paid_xcm_execution: bool + ) where + Runtime: BasicParachainRuntime + BridgeXcmOverBridgeConfig, + XcmOverBridgePalletInstance: 'static, + ::RuntimeCall: GetDispatchInfo + From>, + ::Balance: From<<>::BridgeMessagesPalletInstance>>::ThisChain as bp_runtime::Chain>::Balance>, + ::Balance: From, + LocationToAccountId: ConvertLocation>, + TokenLocation: Get + { + // construct expected bridge configuration + let locations = + pallet_xcm_bridge_hub::Pallet::::bridge_locations( + expected_source.clone().into(), + bridge_destination_universal_location.clone().into(), + ) + .expect("valid bridge locations"); + assert!(pallet_xcm_bridge_hub::Bridges::::get( + locations.bridge_id() ) - ); -} + .is_some()); -/// Helper function to close the bridge/lane for `source` and `destination`. -pub fn close_bridge( - expected_source: Location, - bridge_destination_universal_location: InteriorLocation, - (origin, origin_kind): (Location, OriginKind), - is_paid_xcm_execution: bool -) where - Runtime: BasicParachainRuntime + BridgeXcmOverBridgeConfig, - XcmOverBridgePalletInstance: 'static, - ::RuntimeCall: GetDispatchInfo + From>, - ::Balance: From<<>::BridgeMessagesPalletInstance>>::ThisChain as bp_runtime::Chain>::Balance>, - ::Balance: From, - LocationToAccountId: ConvertLocation>, -TokenLocation: Get{ - // construct expected bridge configuration - let locations = - pallet_xcm_bridge_hub::Pallet::::bridge_locations( - expected_source.clone().into(), - bridge_destination_universal_location.clone().into(), + // required balance: ED + fee + BridgeDeposit + let maybe_paid_execution = if is_paid_xcm_execution { + // random high enough value for `BuyExecution` fees + let buy_execution_fee_amount = 2_500_000_000_000_u128; + let buy_execution_fee = (TokenLocation::get(), buy_execution_fee_amount).into(); + + let balance_needed = ::ExistentialDeposit::get() + + buy_execution_fee_amount.into(); + let source_account_id = + LocationToAccountId::convert_location(&expected_source).expect("valid location"); + let _ = + >::mint_into(&source_account_id, balance_needed) + .expect("mint_into passes"); + Some(buy_execution_fee) + } else { + None + }; + + // close bridge with `Transact` call + let close_bridge_call = RuntimeCallOf::::from(BridgeXcmOverBridgeCall::< + Runtime, + XcmOverBridgePalletInstance, + >::close_bridge { + bridge_destination_universal_location: Box::new( + bridge_destination_universal_location.into(), + ), + may_prune_messages: 16, + }); + + // execute XCM as source origin would do with `Transact -> Origin::Xcm` + assert_ok!(RuntimeHelper::::execute_as_origin( + (origin, origin_kind), + close_bridge_call, + maybe_paid_execution ) - .expect("valid bridge locations"); - assert!(pallet_xcm_bridge_hub::Bridges::::get( - locations.bridge_id() - ) - .is_some()); - - // required balance: ED + fee + BridgeDeposit - let maybe_paid_execution = if is_paid_xcm_execution { - // random high enough value for `BuyExecution` fees - let buy_execution_fee_amount = 2_500_000_000_000_u128; - let buy_execution_fee = (TokenLocation::get(), buy_execution_fee_amount).into(); - - let balance_needed = ::ExistentialDeposit::get() + - buy_execution_fee_amount.into(); - let source_account_id = - LocationToAccountId::convert_location(&expected_source).expect("valid location"); - let _ = >::mint_into(&source_account_id, balance_needed) - .expect("mint_into passes"); - Some(buy_execution_fee) - } else { - None - }; - - // close bridge with `Transact` call - let close_bridge_call = RuntimeCallOf::::from(BridgeXcmOverBridgeCall::< - Runtime, - XcmOverBridgePalletInstance, - >::close_bridge { - bridge_destination_universal_location: Box::new( - bridge_destination_universal_location.into(), - ), - may_prune_messages: 16, - }); - - // execute XCM as source origin would do with `Transact -> Origin::Xcm` - assert_ok!(RuntimeHelper::::execute_as_origin( - (origin, origin_kind), - close_bridge_call, - maybe_paid_execution - ) - .ensure_complete()); + .ensure_complete()); - // bridge is closed - assert!(pallet_xcm_bridge_hub::Bridges::::get( - locations.bridge_id() - ) - .is_none()); + // bridge is closed + assert!(pallet_xcm_bridge_hub::Bridges::::get( + locations.bridge_id() + ) + .is_none()); - // check state - assert_ok!( - pallet_xcm_bridge_hub::Pallet::::do_try_state() - ); + // check state + assert_ok!( + pallet_xcm_bridge_hub::Pallet::::do_try_state() + ); + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs index f96d0bf405b9c..fa0229ce06881 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/mod.rs @@ -32,7 +32,6 @@ use bp_messages::{ LaneState, MessageKey, MessagesOperatingMode, OutboundLaneData, }; use bp_runtime::BasicOperatingMode; -use bp_xcm_bridge_hub::{Bridge, BridgeState, XcmAsPlainPayload}; use codec::Encode; use frame_support::{ assert_ok, @@ -63,12 +62,11 @@ pub(crate) mod bridges_prelude { pub use pallet_bridge_parachains::{ Call as BridgeParachainsCall, Config as BridgeParachainsConfig, }; - pub use pallet_xcm_bridge_hub::{ - Call as BridgeXcmOverBridgeCall, Config as BridgeXcmOverBridgeConfig, LanesManagerOf, - XcmBlobMessageDispatchResult, - }; } +// Re-export test-case +pub use for_pallet_xcm_bridge_hub::open_and_close_bridge_works; + // Re-export test_case from assets pub use asset_test_utils::include_teleports_for_native_asset_works; use pallet_bridge_messages::LaneIdOf; @@ -77,7 +75,6 @@ pub type RuntimeHelper = parachains_runtimes_test_utils::RuntimeHelper; // Re-export test_case from `parachains-runtimes-test-utils` -use crate::test_cases::helpers::open_bridge_with_extrinsic; pub use parachains_runtimes_test_utils::test_cases::{ change_storage_constant_by_governance_works, set_storage_keys_by_governance_works, }; @@ -439,7 +436,7 @@ pub fn message_dispatch_routing_works< ) where Runtime: BasicParachainRuntime + cumulus_pallet_xcmp_queue::Config - + BridgeMessagesConfig, + + BridgeMessagesConfig, AllPalletsWithoutSystem: OnInitialize> + OnFinalize>, AccountIdOf: From @@ -459,9 +456,15 @@ pub fn message_dispatch_routing_works< Location::new(C::get(), [GlobalConsensus(N::get())]) } } - assert_ne!(runtime_para_id, sibling_parachain_id); + #[derive(Debug)] + enum XcmBlobMessageDispatchResult { + Dispatched, + #[allow(dead_code)] + NotDispatched(Option), + } + run_test::(collator_session_key, runtime_para_id, vec![], || { prepare_configuration(); @@ -650,139 +653,150 @@ where estimated_fee.into() } -/// Test-case makes sure that `Runtime` can open/close bridges. -pub fn open_and_close_bridge_works( - collator_session_key: CollatorSessionKeys, - runtime_para_id: u32, - expected_source: Location, - destination: InteriorLocation, - origin_with_origin_kind: (Location, OriginKind), - is_paid_xcm_execution: bool, -) where - Runtime: BasicParachainRuntime + BridgeXcmOverBridgeConfig, - XcmOverBridgePalletInstance: 'static, - ::RuntimeCall: GetDispatchInfo + From>, - ::Balance: From<<>::BridgeMessagesPalletInstance>>::ThisChain as bp_runtime::Chain>::Balance>, - ::Balance: From, - <>::BridgeMessagesPalletInstance>>::ThisChain as bp_runtime::Chain>::AccountId: From<::AccountId>, - LocationToAccountId: ConvertLocation>, - TokenLocation: Get, -{ - run_test::(collator_session_key, runtime_para_id, vec![], || { - // construct expected bridge configuration - let locations = pallet_xcm_bridge_hub::Pallet::::bridge_locations( - expected_source.clone().into(), - destination.clone().into(), - ).expect("valid bridge locations"); - let expected_lane_id = - locations.calculate_lane_id(xcm::latest::VERSION).expect("valid laneId"); - let lanes_manager = LanesManagerOf::::new(); - - let expected_deposit = if >::AllowWithoutBridgeDeposit::contains( - locations.bridge_origin_relative_location() - ) { - Zero::zero() - } else { - ( + collator_session_key: CollatorSessionKeys, + runtime_para_id: u32, + expected_source: Location, + destination: InteriorLocation, + origin_with_origin_kind: (Location, OriginKind), + is_paid_xcm_execution: bool, + ) where + Runtime: BasicParachainRuntime + BridgeXcmOverBridgeConfig, + XcmOverBridgePalletInstance: 'static, + ::RuntimeCall: GetDispatchInfo + From>, + ::Balance: From<<>::BridgeMessagesPalletInstance>>::ThisChain as bp_runtime::Chain>::Balance>, + ::Balance: From, + <>::BridgeMessagesPalletInstance>>::ThisChain as bp_runtime::Chain>::AccountId: From<::AccountId>, + LocationToAccountId: ConvertLocation>, + TokenLocation: Get, + { + run_test::(collator_session_key, runtime_para_id, vec![], || { + // construct expected bridge configuration + let locations = pallet_xcm_bridge_hub::Pallet::::bridge_locations( + expected_source.clone().into(), + destination.clone().into(), + ).expect("valid bridge locations"); + let expected_lane_id = + locations.calculate_lane_id(xcm::latest::VERSION).expect("valid laneId"); + let lanes_manager = LanesManagerOf::::new(); + + let expected_deposit = if >::BridgeDeposit::get() - }; + >>::AllowWithoutBridgeDeposit::contains( + locations.bridge_origin_relative_location() + ) { + Zero::zero() + } else { + >::BridgeDeposit::get() + }; - // check bridge/lane DOES not exist - assert_eq!( - pallet_xcm_bridge_hub::Bridges::::get( - locations.bridge_id() - ), - None - ); - assert_eq!( - lanes_manager.active_inbound_lane(expected_lane_id).map(drop), - Err(LanesManagerError::UnknownInboundLane) - ); - assert_eq!( - lanes_manager.active_outbound_lane(expected_lane_id).map(drop), - Err(LanesManagerError::UnknownOutboundLane) - ); + // check bridge/lane DOES not exist + assert_eq!( + pallet_xcm_bridge_hub::Bridges::::get( + locations.bridge_id() + ), + None + ); + assert_eq!( + lanes_manager.active_inbound_lane(expected_lane_id).map(drop), + Err(LanesManagerError::UnknownInboundLane) + ); + assert_eq!( + lanes_manager.active_outbound_lane(expected_lane_id).map(drop), + Err(LanesManagerError::UnknownOutboundLane) + ); - // open bridge with Transact call - assert_eq!( - helpers::ensure_opened_bridge::< - Runtime, - XcmOverBridgePalletInstance, - LocationToAccountId, - TokenLocation, - >( - expected_source.clone(), - destination.clone(), - is_paid_xcm_execution, - |locations, maybe_paid_execution| open_bridge_with_extrinsic::< + // open bridge with Transact call + assert_eq!( + ensure_opened_bridge::< Runtime, XcmOverBridgePalletInstance, + LocationToAccountId, + TokenLocation, >( - origin_with_origin_kind.clone(), - locations.bridge_destination_universal_location().clone(), - maybe_paid_execution + expected_source.clone(), + destination.clone(), + is_paid_xcm_execution, + |locations, maybe_paid_execution| open_bridge_with_extrinsic::< + Runtime, + XcmOverBridgePalletInstance, + >( + origin_with_origin_kind.clone(), + locations.bridge_destination_universal_location().clone(), + maybe_paid_execution + ) ) - ) - .0 - .bridge_id(), - locations.bridge_id() - ); - - // check bridge/lane DOES exist - assert_eq!( - pallet_xcm_bridge_hub::Bridges::::get( + .0 + .bridge_id(), locations.bridge_id() - ), - Some(Bridge { - bridge_origin_relative_location: Box::new(expected_source.clone().into()), - bridge_origin_universal_location: Box::new( - locations.bridge_origin_universal_location().clone().into() - ), - bridge_destination_universal_location: Box::new( - locations.bridge_destination_universal_location().clone().into() + ); + + // check bridge/lane DOES exist + assert_eq!( + pallet_xcm_bridge_hub::Bridges::::get( + locations.bridge_id() ), - state: BridgeState::Opened, - bridge_owner_account: LocationToAccountId::convert_location(&expected_source) - .expect("valid location") - .into(), - deposit: expected_deposit, - lane_id: expected_lane_id - }) - ); - assert_eq!( - lanes_manager.active_inbound_lane(expected_lane_id).map(|lane| lane.state()), - Ok(LaneState::Opened) - ); - assert_eq!( - lanes_manager.active_outbound_lane(expected_lane_id).map(|lane| lane.state()), - Ok(LaneState::Opened) - ); + Some(Bridge { + bridge_origin_relative_location: Box::new(expected_source.clone().into()), + bridge_origin_universal_location: Box::new( + locations.bridge_origin_universal_location().clone().into() + ), + bridge_destination_universal_location: Box::new( + locations.bridge_destination_universal_location().clone().into() + ), + state: BridgeState::Opened, + bridge_owner_account: LocationToAccountId::convert_location(&expected_source) + .expect("valid location") + .into(), + deposit: expected_deposit, + lane_id: expected_lane_id, + }) + ); + assert_eq!( + lanes_manager.active_inbound_lane(expected_lane_id).map(|lane| lane.state()), + Ok(LaneState::Opened) + ); + assert_eq!( + lanes_manager.active_outbound_lane(expected_lane_id).map(|lane| lane.state()), + Ok(LaneState::Opened) + ); - // close bridge with Transact call - helpers::close_bridge::< - Runtime, - XcmOverBridgePalletInstance, - LocationToAccountId, - TokenLocation, - >(expected_source, destination, origin_with_origin_kind, is_paid_xcm_execution); + // close bridge with Transact call + close_bridge::( + expected_source, + destination, + origin_with_origin_kind, + is_paid_xcm_execution, + ); - // check bridge/lane DOES not exist - assert_eq!( - pallet_xcm_bridge_hub::Bridges::::get( - locations.bridge_id() - ), - None - ); - assert_eq!( - lanes_manager.active_inbound_lane(expected_lane_id).map(drop), - Err(LanesManagerError::UnknownInboundLane) - ); - assert_eq!( - lanes_manager.active_outbound_lane(expected_lane_id).map(drop), - Err(LanesManagerError::UnknownOutboundLane) - ); - }); + // check bridge/lane DOES not exist + assert_eq!( + pallet_xcm_bridge_hub::Bridges::::get( + locations.bridge_id() + ), + None + ); + assert_eq!( + lanes_manager.active_inbound_lane(expected_lane_id).map(drop), + Err(LanesManagerError::UnknownInboundLane) + ); + assert_eq!( + lanes_manager.active_outbound_lane(expected_lane_id).map(drop), + Err(LanesManagerError::UnknownOutboundLane) + ); + }); + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs index 7461085330f27..37605350b8e64 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs @@ -16,7 +16,7 @@ //! Generating test data for bridges with remote GRANDPA chains. -use crate::test_data::prepare_inbound_xcm; +use crate::test_data::{prepare_inbound_xcm, XcmAsPlainPayload}; use bp_messages::{ source_chain::FromBridgedChainMessagesDeliveryProof, @@ -25,7 +25,6 @@ use bp_messages::{ }; use bp_runtime::{AccountIdOf, BlockNumberOf, Chain, HeaderOf, UnverifiedStorageProofParams}; use bp_test_utils::make_default_justification; -use bp_xcm_bridge_hub::XcmAsPlainPayload; use codec::Encode; use pallet_bridge_grandpa::{BridgedChain, BridgedHeader}; use sp_runtime::traits::Header as HeaderT; diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs index a6659b8241dfd..4d91c82158800 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs @@ -16,7 +16,10 @@ //! Generating test data for bridges with remote parachains. -use super::{from_grandpa_chain::make_complex_bridged_grandpa_header_proof, prepare_inbound_xcm}; +use super::{ + from_grandpa_chain::make_complex_bridged_grandpa_header_proof, prepare_inbound_xcm, + XcmAsPlainPayload, +}; use bp_messages::{ source_chain::FromBridgedChainMessagesDeliveryProof, @@ -28,7 +31,6 @@ use bp_runtime::{ AccountIdOf, BlockNumberOf, Chain, HeaderOf, Parachain, UnverifiedStorageProofParams, }; use bp_test_utils::prepare_parachain_heads_proof; -use bp_xcm_bridge_hub::XcmAsPlainPayload; use codec::Encode; use pallet_bridge_grandpa::BridgedHeader; use sp_runtime::traits::Header as HeaderT; diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs index c34188af50689..cef3c84b81785 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/mod.rs @@ -35,6 +35,8 @@ use xcm::GetVersion; use xcm_builder::{BridgeMessage, HaulBlob, HaulBlobError, HaulBlobExporter}; use xcm_executor::traits::{validate_export, ExportXcm}; +pub(crate) type XcmAsPlainPayload = sp_std::vec::Vec; + pub fn prepare_inbound_xcm(xcm_message: Xcm<()>, destination: InteriorLocation) -> Vec { let location = xcm::VersionedInteriorLocation::from(destination); let xcm = xcm::VersionedXcm::<()>::from(xcm_message); diff --git a/docs/sdk/src/reference_docs/chain_spec_runtime/src/runtime.rs b/docs/sdk/src/reference_docs/chain_spec_runtime/src/runtime.rs index 282fc1ff489c0..3ffa3d61263f2 100644 --- a/docs/sdk/src/reference_docs/chain_spec_runtime/src/runtime.rs +++ b/docs/sdk/src/reference_docs/chain_spec_runtime/src/runtime.rs @@ -57,7 +57,14 @@ type SignedExtra = (); mod runtime { /// The main runtime type. #[runtime::runtime] - #[runtime::derive(RuntimeCall, RuntimeEvent, RuntimeError, RuntimeOrigin, RuntimeTask)] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeTask, + RuntimeViewFunction + )] pub struct Runtime; /// Mandatory system pallet that should always be included in a FRAME runtime. diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index 1e5ce6489bc82..845daa2679850 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -59,7 +59,7 @@ pub use disputes::{ /// relatively rare. /// /// The associated worker binaries should use the same version as the node that spawns them. -pub const NODE_VERSION: &'static str = "1.17.0"; +pub const NODE_VERSION: &'static str = "1.17.1"; // For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node // plus some overhead: diff --git a/polkadot/node/test/service/src/chain_spec.rs b/polkadot/node/test/service/src/chain_spec.rs index ae4e84b7725e5..ef83c4795dc68 100644 --- a/polkadot/node/test/service/src/chain_spec.rs +++ b/polkadot/node/test/service/src/chain_spec.rs @@ -18,7 +18,8 @@ use pallet_staking::Forcing; use polkadot_primitives::{ - AccountId, AssignmentId, SchedulerParams, ValidatorId, MAX_CODE_SIZE, MAX_POV_SIZE, + node_features, AccountId, AssignmentId, NodeFeatures, SchedulerParams, ValidatorId, + MAX_CODE_SIZE, MAX_POV_SIZE, }; use polkadot_service::chain_spec::Extensions; use polkadot_test_runtime::BABE_GENESIS_EPOCH_CONFIG; @@ -110,6 +111,11 @@ fn polkadot_testnet_genesis( const ENDOWMENT: u128 = 1_000_000 * DOTS; const STASH: u128 = 100 * DOTS; + // Prepare node features with V2 receipts enabled. + let mut node_features = NodeFeatures::new(); + node_features.resize(node_features::FeatureIndex::CandidateReceiptV2 as usize + 1, false); + node_features.set(node_features::FeatureIndex::CandidateReceiptV2 as u8 as usize, true); + serde_json::json!({ "balances": { "balances": endowed_accounts.iter().map(|k| (k.clone(), ENDOWMENT)).collect::>(), @@ -158,6 +164,7 @@ fn polkadot_testnet_genesis( no_show_slots: 10, minimum_validation_upgrade_delay: 5, max_downward_message_size: 1024, + node_features, scheduler_params: SchedulerParams { group_rotation_frequency: 20, paras_availability_period: 4, diff --git a/polkadot/parachain/test-parachains/undying/Cargo.toml b/polkadot/parachain/test-parachains/undying/Cargo.toml index 43b5a3352434f..f8dfc8936c0ef 100644 --- a/polkadot/parachain/test-parachains/undying/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/Cargo.toml @@ -16,6 +16,7 @@ codec = { features = ["derive"], workspace = true } dlmalloc = { features = ["global"], workspace = true } log = { workspace = true } polkadot-parachain-primitives = { features = ["wasm-api"], workspace = true } +polkadot-primitives = { workspace = true, default-features = false } tiny-keccak = { features = ["keccak"], workspace = true } # We need to make sure the global allocator is disabled until we have support of full substrate externalities @@ -30,5 +31,6 @@ std = [ "codec/std", "log/std", "polkadot-parachain-primitives/std", + "polkadot-primitives/std", "sp-io/std", ] diff --git a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml index f4e6d4e585427..e26b9f59acd4b 100644 --- a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml @@ -22,6 +22,7 @@ futures-timer = { workspace = true } log = { workspace = true, default-features = true } polkadot-cli = { workspace = true, default-features = true } +polkadot-erasure-coding = { workspace = true, default-features = true } polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } @@ -29,6 +30,7 @@ polkadot-service = { features = ["rococo-native"], workspace = true, default-fea test-parachain-undying = { workspace = true } sc-cli = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } sc-service = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } diff --git a/polkadot/parachain/test-parachains/undying/collator/src/cli.rs b/polkadot/parachain/test-parachains/undying/collator/src/cli.rs index 9572887a51a2a..a3de7c80d214a 100644 --- a/polkadot/parachain/test-parachains/undying/collator/src/cli.rs +++ b/polkadot/parachain/test-parachains/undying/collator/src/cli.rs @@ -61,6 +61,15 @@ pub struct ExportGenesisWasmCommand { pub output: Option, } +/// Enum representing different types of malicious behaviors for collators. +#[derive(Debug, Parser, Clone, PartialEq, clap::ValueEnum)] +pub enum MalusType { + /// No malicious behavior. + None, + /// Submit the same collations to all assigned cores. + DuplicateCollations, +} + #[allow(missing_docs)] #[derive(Debug, Parser)] #[group(skip)] @@ -81,6 +90,10 @@ pub struct RunCmd { /// we compute per block. #[arg(long, default_value_t = 1)] pub pvf_complexity: u32, + + /// Specifies the malicious behavior of the collator. + #[arg(long, value_enum, default_value_t = MalusType::None)] + pub malus_type: MalusType, } #[allow(missing_docs)] diff --git a/polkadot/parachain/test-parachains/undying/collator/src/lib.rs b/polkadot/parachain/test-parachains/undying/collator/src/lib.rs index 448c181ae062b..3d5724ae79ba8 100644 --- a/polkadot/parachain/test-parachains/undying/collator/src/lib.rs +++ b/polkadot/parachain/test-parachains/undying/collator/src/lib.rs @@ -17,14 +17,25 @@ //! Collator for the `Undying` test parachain. use codec::{Decode, Encode}; -use futures::channel::oneshot; +use futures::{channel::oneshot, StreamExt}; use futures_timer::Delay; +use polkadot_cli::ProvideRuntimeApi; use polkadot_node_primitives::{ - maybe_compress_pov, Collation, CollationResult, CollationSecondedSignal, CollatorFn, - MaybeCompressedPoV, PoV, Statement, + maybe_compress_pov, AvailableData, Collation, CollationResult, CollationSecondedSignal, + CollatorFn, MaybeCompressedPoV, PoV, Statement, UpwardMessages, }; -use polkadot_primitives::{CollatorId, CollatorPair, Hash}; +use polkadot_node_subsystem::messages::CollatorProtocolMessage; +use polkadot_primitives::{ + vstaging::{ + CandidateDescriptorV2, CandidateReceiptV2, ClaimQueueOffset, DEFAULT_CLAIM_QUEUE_OFFSET, + }, + CandidateCommitments, CollatorId, CollatorPair, CoreIndex, Hash, Id as ParaId, + OccupiedCoreAssumption, +}; +use polkadot_service::{Handle, NewFull, ParachainHost}; +use sc_client_api::client::BlockchainEvents; use sp_core::Pair; + use std::{ collections::HashMap, sync::{ @@ -37,6 +48,8 @@ use test_parachain_undying::{ execute, hash_state, BlockData, GraveyardState, HeadData, StateMismatch, }; +pub const LOG_TARGET: &str = "parachain::undying-collator"; + /// Default PoV size which also drives state size. const DEFAULT_POV_SIZE: usize = 1000; /// Default PVF time complexity - 1 signature per block. @@ -52,19 +65,20 @@ fn calculate_head_and_state_for_number( let mut graveyard = vec![0u8; graveyard_size * graveyard_size]; let zombies = 0; let seal = [0u8; 32]; + let core_selector_number = 0; // Ensure a larger compressed PoV. graveyard.iter_mut().enumerate().for_each(|(i, grave)| { *grave = i as u8; }); - let mut state = GraveyardState { index, graveyard, zombies, seal }; + let mut state = GraveyardState { index, graveyard, zombies, seal, core_selector_number }; let mut head = HeadData { number: 0, parent_hash: Hash::default().into(), post_state: hash_state(&state) }; while head.number < number { let block = BlockData { state, tombstones: 1_000, iterations: pvf_complexity }; - let (new_head, new_state) = execute(head.hash(), head.clone(), block)?; + let (new_head, new_state, _) = execute(head.hash(), head.clone(), block)?; head = new_head; state = new_state; } @@ -99,13 +113,14 @@ impl State { let mut graveyard = vec![0u8; graveyard_size * graveyard_size]; let zombies = 0; let seal = [0u8; 32]; + let core_selector_number = 0; // Ensure a larger compressed PoV. graveyard.iter_mut().enumerate().for_each(|(i, grave)| { *grave = i as u8; }); - let state = GraveyardState { index, graveyard, zombies, seal }; + let state = GraveyardState { index, graveyard, zombies, seal, core_selector_number }; let head_data = HeadData { number: 0, parent_hash: Default::default(), post_state: hash_state(&state) }; @@ -123,7 +138,10 @@ impl State { /// Advance the state and produce a new block based on the given `parent_head`. /// /// Returns the new [`BlockData`] and the new [`HeadData`]. - fn advance(&mut self, parent_head: HeadData) -> Result<(BlockData, HeadData), StateMismatch> { + fn advance( + &mut self, + parent_head: HeadData, + ) -> Result<(BlockData, HeadData, UpwardMessages), StateMismatch> { self.best_block = parent_head.number; let state = if let Some(state) = self @@ -144,14 +162,15 @@ impl State { // Start with prev state and transaction to execute (place 1000 tombstones). let block = BlockData { state, tombstones: 1000, iterations: self.pvf_complexity }; - let (new_head, new_state) = execute(parent_head.hash(), parent_head, block.clone())?; + let (new_head, new_state, upward_messages) = + execute(parent_head.hash(), parent_head, block.clone())?; let new_head_arc = Arc::new(new_head.clone()); self.head_to_state.insert(new_head_arc.clone(), new_state); self.number_to_head.insert(new_head.number, new_head_arc); - Ok((block, new_head)) + Ok((block, new_head, upward_messages)) } } @@ -175,13 +194,18 @@ impl Collator { let graveyard_size = ((pov_size / std::mem::size_of::()) as f64).sqrt().ceil() as usize; log::info!( + target: LOG_TARGET, "PoV target size: {} bytes. Graveyard size: ({} x {})", pov_size, graveyard_size, - graveyard_size + graveyard_size, ); - log::info!("PVF time complexity: {}", pvf_complexity); + log::info!( + target: LOG_TARGET, + "PVF time complexity: {}", + pvf_complexity, + ); Self { state: Arc::new(Mutex::new(State::genesis(graveyard_size, pvf_complexity))), @@ -232,21 +256,32 @@ impl Collator { Box::new(move |relay_parent, validation_data| { let parent = match HeadData::decode(&mut &validation_data.parent_head.0[..]) { Err(err) => { - log::error!("Requested to build on top of malformed head-data: {:?}", err); + log::error!( + target: LOG_TARGET, + "Requested to build on top of malformed head-data: {:?}", + err, + ); return futures::future::ready(None).boxed() }, Ok(p) => p, }; - let (block_data, head_data) = match state.lock().unwrap().advance(parent.clone()) { - Err(err) => { - log::error!("Unable to build on top of {:?}: {:?}", parent, err); - return futures::future::ready(None).boxed() - }, - Ok(x) => x, - }; + let (block_data, head_data, upward_messages) = + match state.lock().unwrap().advance(parent.clone()) { + Err(err) => { + log::error!( + target: LOG_TARGET, + "Unable to build on top of {:?}: {:?}", + parent, + err, + ); + return futures::future::ready(None).boxed() + }, + Ok(x) => x, + }; log::info!( + target: LOG_TARGET, "created a new collation on relay-parent({}): {:?}", relay_parent, head_data, @@ -256,7 +291,7 @@ impl Collator { let pov = PoV { block_data: block_data.encode().into() }; let collation = Collation { - upward_messages: Default::default(), + upward_messages, horizontal_messages: Default::default(), new_validation_code: None, head_data: head_data.encode().into(), @@ -265,10 +300,15 @@ impl Collator { hrmp_watermark: validation_data.relay_parent_number, }; - log::info!("Raw PoV size for collation: {} bytes", pov.block_data.0.len(),); + log::info!( + target: LOG_TARGET, + "Raw PoV size for collation: {} bytes", + pov.block_data.0.len(), + ); let compressed_pov = maybe_compress_pov(pov); log::info!( + target: LOG_TARGET, "Compressed PoV size for collation: {} bytes", compressed_pov.block_data.0.len(), ); @@ -285,8 +325,9 @@ impl Collator { Statement::Seconded(s) if s.descriptor.pov_hash() == compressed_pov.hash(), ) { log::error!( + target: LOG_TARGET, "Seconded statement should match our collation: {:?}", - res.statement.payload() + res.statement.payload(), ); } @@ -330,6 +371,259 @@ impl Collator { } } } + + pub fn send_same_collations_to_all_assigned_cores( + &self, + full_node: &NewFull, + mut overseer_handle: Handle, + para_id: ParaId, + ) { + let client = full_node.client.clone(); + + let collation_function = + self.create_collation_function(full_node.task_manager.spawn_handle()); + + full_node + .task_manager + .spawn_handle() + .spawn("malus-undying-collator", None, async move { + // Subscribe to relay chain block import notifications. In each iteration, build a + // collation in response to a block import notification and submits it to all cores + // assigned to the parachain. + let mut import_notifications = client.import_notification_stream(); + + while let Some(notification) = import_notifications.next().await { + let relay_parent = notification.hash; + + // Get the list of cores assigned to the parachain. + let claim_queue = match client.runtime_api().claim_queue(relay_parent) { + Ok(claim_queue) => claim_queue, + Err(error) => { + log::error!( + target: LOG_TARGET, + "Failed to query claim queue runtime API: {error:?}", + ); + continue; + }, + }; + + let claim_queue_offset = ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET); + + let scheduled_cores: Vec = claim_queue + .iter() + .filter_map(move |(core_index, paras)| { + paras.get(claim_queue_offset.0 as usize).and_then(|core_para_id| { + (core_para_id == ¶_id).then_some(*core_index) + }) + }) + .collect(); + + if scheduled_cores.is_empty() { + log::info!( + target: LOG_TARGET, + "Scheduled cores is empty.", + ); + continue; + } + + if scheduled_cores.len() == 1 { + log::info!( + target: LOG_TARGET, + "Malus collator configured with duplicate collations, but only 1 core assigned. \ + Collator will not do anything malicious.", + ); + } + + // Fetch validation data for the collation. + let validation_data = match client.runtime_api().persisted_validation_data( + relay_parent, + para_id, + OccupiedCoreAssumption::Included, + ) { + Ok(Some(validation_data)) => validation_data, + Ok(None) => { + log::info!( + target: LOG_TARGET, + "Persisted validation data is None.", + ); + continue; + }, + Err(error) => { + log::error!( + target: LOG_TARGET, + "Failed to query persisted validation data runtime API: {error:?}", + ); + continue; + }, + }; + + // Generate the collation. + let collation = + match collation_function(relay_parent, &validation_data).await { + Some(collation) => collation, + None => { + log::info!( + target: LOG_TARGET, + "Collation result is None.", + ); + continue; + }, + } + .collation; + + // Fetch the validation code hash. + let validation_code_hash = match client.runtime_api().validation_code_hash( + relay_parent, + para_id, + OccupiedCoreAssumption::Included, + ) { + Ok(Some(validation_code_hash)) => validation_code_hash, + Ok(None) => { + log::info!( + target: LOG_TARGET, + "Validation code hash is None.", + ); + continue; + }, + Err(error) => { + log::error!( + target: LOG_TARGET, + "Failed to query validation code hash runtime API: {error:?}", + ); + continue; + }, + }; + + // Fetch the session index. + let session_index = + match client.runtime_api().session_index_for_child(relay_parent) { + Ok(session_index) => session_index, + Err(error) => { + log::error!( + target: LOG_TARGET, + "Failed to query session index for child runtime API: {error:?}", + ); + continue; + }, + }; + + let persisted_validation_data_hash = validation_data.hash(); + let parent_head_data = validation_data.parent_head.clone(); + let parent_head_data_hash = validation_data.parent_head.hash(); + + // Apply compression to the block data. + let pov = { + let pov = collation.proof_of_validity.into_compressed(); + let encoded_size = pov.encoded_size(); + let max_pov_size = validation_data.max_pov_size as usize; + + // As long as `POV_BOMB_LIMIT` is at least `max_pov_size`, this ensures + // that honest collators never produce a PoV which is uncompressed. + // + // As such, honest collators never produce an uncompressed PoV which starts + // with a compression magic number, which would lead validators to + // reject the collation. + if encoded_size > max_pov_size { + log::error!( + target: LOG_TARGET, + "PoV size {encoded_size} exceeded maximum size of {max_pov_size}", + ); + continue; + } + + pov + }; + + let pov_hash = pov.hash(); + + // Fetch the session info. + let session_info = + match client.runtime_api().session_info(relay_parent, session_index) { + Ok(Some(session_info)) => session_info, + Ok(None) => { + log::info!( + target: LOG_TARGET, + "Session info is None.", + ); + continue; + }, + Err(error) => { + log::error!( + target: LOG_TARGET, + "Failed to query session info runtime API: {error:?}", + ); + continue; + }, + }; + + let n_validators = session_info.validators.len(); + + let available_data = + AvailableData { validation_data, pov: Arc::new(pov.clone()) }; + let chunks = match polkadot_erasure_coding::obtain_chunks_v1( + n_validators, + &available_data, + ) { + Ok(chunks) => chunks, + Err(error) => { + log::error!( + target: LOG_TARGET, + "Failed to obtain chunks v1: {error:?}", + ); + continue; + }, + }; + let erasure_root = polkadot_erasure_coding::branches(&chunks).root(); + + let commitments = CandidateCommitments { + upward_messages: collation.upward_messages, + horizontal_messages: collation.horizontal_messages, + new_validation_code: collation.new_validation_code, + head_data: collation.head_data, + processed_downward_messages: collation.processed_downward_messages, + hrmp_watermark: collation.hrmp_watermark, + }; + + // Submit the same collation to all assigned cores. + for core_index in &scheduled_cores { + let candidate_receipt = CandidateReceiptV2 { + descriptor: CandidateDescriptorV2::new( + para_id, + relay_parent, + *core_index, + session_index, + persisted_validation_data_hash, + pov_hash, + erasure_root, + commitments.head_data.hash(), + validation_code_hash, + ), + commitments_hash: commitments.hash(), + }; + + // We cannot use SubmitCollation here because it includes an additional + // check for the core index by calling `check_core_index`. This check + // enforces that the parachain always selects the correct core by comparing + // the descriptor and commitments core indexes. To bypass this check, we are + // simulating the behavior of SubmitCollation while skipping the core index + // validation. + overseer_handle + .send_msg( + CollatorProtocolMessage::DistributeCollation { + candidate_receipt, + parent_head_data_hash, + pov: pov.clone(), + parent_head_data: parent_head_data.clone(), + result_sender: None, + core_index: *core_index, + }, + "Collator", + ) + .await; + } + } + }); + } } use sp_core::traits::SpawnNamed; diff --git a/polkadot/parachain/test-parachains/undying/collator/src/main.rs b/polkadot/parachain/test-parachains/undying/collator/src/main.rs index 017eefe5ee31e..9d993dd818b2f 100644 --- a/polkadot/parachain/test-parachains/undying/collator/src/main.rs +++ b/polkadot/parachain/test-parachains/undying/collator/src/main.rs @@ -29,7 +29,7 @@ use std::{ use test_parachain_undying_collator::Collator; mod cli; -use cli::Cli; +use cli::{Cli, MalusType}; fn main() -> Result<()> { let cli = Cli::from_args(); @@ -105,6 +105,7 @@ fn main() -> Result<()> { .map_err(|e| e.to_string())?; let mut overseer_handle = full_node .overseer_handle + .clone() .expect("Overseer handle should be initialized for collators"); let genesis_head_hex = @@ -120,9 +121,16 @@ fn main() -> Result<()> { let config = CollationGenerationConfig { key: collator.collator_key(), - collator: Some( - collator.create_collation_function(full_node.task_manager.spawn_handle()), - ), + // If the collator is malicious, disable the collation function + // (set to None) and manually handle collation submission later. + collator: if cli.run.malus_type == MalusType::None { + Some( + collator + .create_collation_function(full_node.task_manager.spawn_handle()), + ) + } else { + None + }, para_id, }; overseer_handle @@ -133,6 +141,16 @@ fn main() -> Result<()> { .send_msg(CollatorProtocolMessage::CollateOn(para_id), "Collator") .await; + // If the collator is configured to behave maliciously, simulate the specified + // malicious behavior. + if cli.run.malus_type == MalusType::DuplicateCollations { + collator.send_same_collations_to_all_assigned_cores( + &full_node, + overseer_handle, + para_id, + ); + } + Ok(full_node.task_manager) }) }, diff --git a/polkadot/parachain/test-parachains/undying/collator/tests/integration.rs b/polkadot/parachain/test-parachains/undying/collator/tests/integration.rs index b8e32b13bc9c7..866b2f888f84e 100644 --- a/polkadot/parachain/test-parachains/undying/collator/tests/integration.rs +++ b/polkadot/parachain/test-parachains/undying/collator/tests/integration.rs @@ -19,6 +19,12 @@ // If this test is failing, make sure to run all tests with the `real-overseer` feature being // enabled. + +use polkadot_node_subsystem::TimeoutExt; +use std::time::Duration; + +const TIMEOUT: Duration = Duration::from_secs(120); + #[tokio::test(flavor = "multi_thread")] async fn collating_using_undying_collator() { use polkadot_primitives::Id as ParaId; @@ -82,8 +88,16 @@ async fn collating_using_undying_collator() { .await; // Wait until the parachain has 4 blocks produced. - collator.wait_for_blocks(4).await; + collator + .wait_for_blocks(4) + .timeout(TIMEOUT) + .await + .expect("Timed out waiting for 4 produced blocks"); // Wait until the collator received `12` seconded statements for its collations. - collator.wait_for_seconded_collations(12).await; + collator + .wait_for_seconded_collations(12) + .timeout(TIMEOUT) + .await + .expect("Timed out waiting for 12 seconded collations"); } diff --git a/polkadot/parachain/test-parachains/undying/src/lib.rs b/polkadot/parachain/test-parachains/undying/src/lib.rs index e4ec7e99346bb..4f014320d09bb 100644 --- a/polkadot/parachain/test-parachains/undying/src/lib.rs +++ b/polkadot/parachain/test-parachains/undying/src/lib.rs @@ -22,6 +22,10 @@ extern crate alloc; use alloc::vec::Vec; use codec::{Decode, Encode}; +use polkadot_parachain_primitives::primitives::UpwardMessages; +use polkadot_primitives::vstaging::{ + ClaimQueueOffset, CoreSelector, UMPSignal, DEFAULT_CLAIM_QUEUE_OFFSET, UMP_SEPARATOR, +}; use tiny_keccak::{Hasher as _, Keccak}; #[cfg(not(feature = "std"))] @@ -86,6 +90,8 @@ pub struct GraveyardState { pub zombies: u64, // Grave seal. pub seal: [u8; 32], + // Increasing sequence number for core selector. + pub core_selector_number: u8, } /// Block data for this parachain. @@ -119,6 +125,7 @@ pub fn execute_transaction(mut block_data: BlockData) -> GraveyardState { // Chain hash the seals and burn CPU. block_data.state.seal = hash_state(&block_data.state); } + block_data.state.core_selector_number = block_data.state.core_selector_number.wrapping_add(1); block_data.state } @@ -133,7 +140,7 @@ pub fn execute( parent_hash: [u8; 32], parent_head: HeadData, block_data: BlockData, -) -> Result<(HeadData, GraveyardState), StateMismatch> { +) -> Result<(HeadData, GraveyardState, UpwardMessages), StateMismatch> { assert_eq!(parent_hash, parent_head.hash()); if hash_state(&block_data.state) != parent_head.post_state { @@ -146,6 +153,16 @@ pub fn execute( return Err(StateMismatch) } + let mut upward_messages: UpwardMessages = Default::default(); + upward_messages.force_push(UMP_SEPARATOR); + upward_messages.force_push( + UMPSignal::SelectCore( + CoreSelector(block_data.state.core_selector_number), + ClaimQueueOffset(DEFAULT_CLAIM_QUEUE_OFFSET), + ) + .encode(), + ); + // We need to clone the block data as the fn will mutate it's state. let new_state = execute_transaction(block_data.clone()); @@ -156,5 +173,6 @@ pub fn execute( post_state: hash_state(&new_state), }, new_state, + upward_messages, )) } diff --git a/polkadot/parachain/test-parachains/undying/src/wasm_validation.rs b/polkadot/parachain/test-parachains/undying/src/wasm_validation.rs index 46b66aa518e49..42917484cfdc2 100644 --- a/polkadot/parachain/test-parachains/undying/src/wasm_validation.rs +++ b/polkadot/parachain/test-parachains/undying/src/wasm_validation.rs @@ -31,13 +31,13 @@ pub extern "C" fn validate_block(params: *const u8, len: usize) -> u64 { let parent_hash = crate::keccak256(¶ms.parent_head.0[..]); - let (new_head, _) = + let (new_head, _, upward_messages) = crate::execute(parent_hash, parent_head, block_data).expect("Executes block"); polkadot_parachain_primitives::write_result(&ValidationResult { head_data: GenericHeadData(new_head.encode()), new_validation_code: None, - upward_messages: alloc::vec::Vec::new().try_into().expect("empty vec fits within bounds"), + upward_messages, horizontal_messages: alloc::vec::Vec::new() .try_into() .expect("empty vec fits within bounds"), diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 935b62c23388e..4d5b56bcd911a 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1613,7 +1613,8 @@ mod runtime { RuntimeHoldReason, RuntimeSlashReason, RuntimeLockId, - RuntimeTask + RuntimeTask, + RuntimeViewFunction )] pub struct Runtime; @@ -1975,6 +1976,12 @@ sp_api::impl_runtime_apis! { } } + impl frame_support::view_functions::runtime_api::RuntimeViewFunction for Runtime { + fn execute_view_function(id: frame_support::view_functions::ViewFunctionId, input: Vec) -> Result, frame_support::view_functions::ViewFunctionDispatchError> { + Runtime::execute_view_function(id, input) + } + } + impl sp_block_builder::BlockBuilder for Runtime { fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { Executive::apply_extrinsic(extrinsic) diff --git a/polkadot/xcm/Cargo.toml b/polkadot/xcm/Cargo.toml index e90354e4e6ac7..f5f824ee409f0 100644 --- a/polkadot/xcm/Cargo.toml +++ b/polkadot/xcm/Cargo.toml @@ -15,7 +15,7 @@ workspace = true array-bytes = { workspace = true, default-features = true } bounded-collections = { features = ["serde"], workspace = true } codec = { features = ["derive", "max-encoded-len"], workspace = true } -derivative = { features = ["use_core"], workspace = true } +derive-where = { workspace = true } environmental = { workspace = true } frame-support = { workspace = true } hex-literal = { workspace = true, default-features = true } diff --git a/polkadot/xcm/src/lib.rs b/polkadot/xcm/src/lib.rs index a41a8e797b0f7..2271835a9a5e8 100644 --- a/polkadot/xcm/src/lib.rs +++ b/polkadot/xcm/src/lib.rs @@ -25,7 +25,7 @@ extern crate alloc; use codec::{Decode, DecodeLimit, Encode, Error as CodecError, Input, MaxEncodedLen}; -use derivative::Derivative; +use derive_where::derive_where; use frame_support::dispatch::GetDispatchInfo; use scale_info::TypeInfo; @@ -88,13 +88,7 @@ macro_rules! versioned_type { $(#[$index5:meta])+ V5($v5:ty), }) => { - #[derive(Derivative, Encode, Decode, TypeInfo)] - #[derivative( - Clone(bound = ""), - Eq(bound = ""), - PartialEq(bound = ""), - Debug(bound = "") - )] + #[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, TypeInfo)] #[codec(encode_bound())] #[codec(decode_bound())] #[scale_info(replace_segment("staging_xcm", "xcm"))] @@ -311,8 +305,8 @@ versioned_type! { } /// A single XCM message, together with its version code. -#[derive(Derivative, Encode, Decode, TypeInfo)] -#[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] +#[derive(Encode, Decode, TypeInfo)] +#[derive_where(Clone, Eq, PartialEq, Debug)] #[codec(encode_bound())] #[codec(decode_bound())] #[scale_info(bounds(), skip_type_params(RuntimeCall))] diff --git a/polkadot/xcm/src/v3/mod.rs b/polkadot/xcm/src/v3/mod.rs index b60209a440c62..6ae987a9830f1 100644 --- a/polkadot/xcm/src/v3/mod.rs +++ b/polkadot/xcm/src/v3/mod.rs @@ -28,7 +28,7 @@ use codec::{ MaxEncodedLen, }; use core::{fmt::Debug, result}; -use derivative::Derivative; +use derive_where::derive_where; use scale_info::TypeInfo; mod junction; @@ -57,8 +57,8 @@ pub const VERSION: super::Version = 3; /// An identifier for a query. pub type QueryId = u64; -#[derive(Derivative, Default, Encode, TypeInfo)] -#[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] +#[derive(Default, Encode, TypeInfo)] +#[derive_where(Clone, Eq, PartialEq, Debug)] #[codec(encode_bound())] #[scale_info(bounds(), skip_type_params(Call))] #[scale_info(replace_segment("staging_xcm", "xcm"))] @@ -474,15 +474,8 @@ impl XcmContext { /// /// This is the inner XCM format and is version-sensitive. Messages are typically passed using the /// outer XCM format, known as `VersionedXcm`. -#[derive( - Derivative, - Encode, - Decode, - TypeInfo, - xcm_procedural::XcmWeightInfoTrait, - xcm_procedural::Builder, -)] -#[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] +#[derive(Encode, Decode, TypeInfo, xcm_procedural::XcmWeightInfoTrait, xcm_procedural::Builder)] +#[derive_where(Clone, Eq, PartialEq, Debug)] #[codec(encode_bound())] #[codec(decode_bound())] #[scale_info(bounds(), skip_type_params(Call))] diff --git a/polkadot/xcm/src/v4/mod.rs b/polkadot/xcm/src/v4/mod.rs index a0ce551b7608c..66816e2fb6e7e 100644 --- a/polkadot/xcm/src/v4/mod.rs +++ b/polkadot/xcm/src/v4/mod.rs @@ -35,7 +35,7 @@ use codec::{ MaxEncodedLen, }; use core::{fmt::Debug, result}; -use derivative::Derivative; +use derive_where::derive_where; use frame_support::dispatch::GetDispatchInfo; use scale_info::TypeInfo; @@ -65,8 +65,8 @@ pub const VERSION: super::Version = 4; /// An identifier for a query. pub type QueryId = u64; -#[derive(Derivative, Default, Encode, TypeInfo)] -#[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] +#[derive(Default, Encode, TypeInfo)] +#[derive_where(Clone, Eq, PartialEq, Debug)] #[codec(encode_bound())] #[codec(decode_bound())] #[scale_info(bounds(), skip_type_params(Call))] @@ -436,15 +436,8 @@ impl XcmContext { /// /// This is the inner XCM format and is version-sensitive. Messages are typically passed using the /// outer XCM format, known as `VersionedXcm`. -#[derive( - Derivative, - Encode, - Decode, - TypeInfo, - xcm_procedural::XcmWeightInfoTrait, - xcm_procedural::Builder, -)] -#[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] +#[derive(Encode, Decode, TypeInfo, xcm_procedural::XcmWeightInfoTrait, xcm_procedural::Builder)] +#[derive_where(Clone, Eq, PartialEq, Debug)] #[codec(encode_bound())] #[codec(decode_bound())] #[scale_info(bounds(), skip_type_params(Call))] diff --git a/polkadot/xcm/src/v5/mod.rs b/polkadot/xcm/src/v5/mod.rs index 21845d07529ef..51f6d839e972a 100644 --- a/polkadot/xcm/src/v5/mod.rs +++ b/polkadot/xcm/src/v5/mod.rs @@ -29,7 +29,7 @@ use codec::{ MaxEncodedLen, }; use core::{fmt::Debug, result}; -use derivative::Derivative; +use derive_where::derive_where; use scale_info::TypeInfo; mod asset; @@ -59,8 +59,8 @@ pub const VERSION: super::Version = 5; /// An identifier for a query. pub type QueryId = u64; -#[derive(Derivative, Default, Encode, TypeInfo)] -#[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] +#[derive(Default, Encode, TypeInfo)] +#[derive_where(Clone, Eq, PartialEq, Debug)] #[codec(encode_bound())] #[codec(decode_bound())] #[scale_info(bounds(), skip_type_params(Call))] @@ -378,15 +378,8 @@ impl XcmContext { /// /// This is the inner XCM format and is version-sensitive. Messages are typically passed using the /// outer XCM format, known as `VersionedXcm`. -#[derive( - Derivative, - Encode, - Decode, - TypeInfo, - xcm_procedural::XcmWeightInfoTrait, - xcm_procedural::Builder, -)] -#[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] +#[derive(Encode, Decode, TypeInfo, xcm_procedural::XcmWeightInfoTrait, xcm_procedural::Builder)] +#[derive_where(Clone, Eq, PartialEq, Debug)] #[codec(encode_bound())] #[codec(decode_bound())] #[scale_info(bounds(), skip_type_params(Call))] diff --git a/polkadot/zombienet-sdk-tests/tests/functional/duplicate_collations.rs b/polkadot/zombienet-sdk-tests/tests/functional/duplicate_collations.rs new file mode 100644 index 0000000000000..43420692d32ed --- /dev/null +++ b/polkadot/zombienet-sdk-tests/tests/functional/duplicate_collations.rs @@ -0,0 +1,154 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Test that a parachain using a malus undying collator, sending the same collation to all assigned +// cores, does not break the relay chain and that blocks are included, backed by a normal collator. + +use anyhow::anyhow; + +use crate::helpers::{ + assert_para_throughput, rococo, + rococo::runtime_types::{ + pallet_broker::coretime_interface::CoreAssignment, + polkadot_runtime_parachains::assigner_coretime::PartsOf57600, + }, +}; +use polkadot_primitives::Id as ParaId; +use serde_json::json; +use subxt::{OnlineClient, PolkadotConfig}; +use subxt_signer::sr25519::dev; +use zombienet_sdk::NetworkConfigBuilder; + +const VALIDATOR_COUNT: u8 = 3; + +#[tokio::test(flavor = "multi_thread")] +async fn duplicate_collations_test() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + let images = zombienet_sdk::environment::get_images_from_env(); + + let config = NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_args(vec![("-lparachain=debug").into()]) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + "num_cores": 2 + }, + "async_backing_params": { + "max_candidate_depth": 6 + } + } + } + })) + // Have to set a `with_node` outside of the loop below, so that `r` has the right + // type. + .with_node(|node| node.with_name("validator-0")); + + (1..VALIDATOR_COUNT) + .fold(r, |acc, i| acc.with_node(|node| node.with_name(&format!("validator-{i}")))) + }) + .with_parachain(|p| { + p.with_id(2000) + .with_default_command("undying-collator") + .cumulus_based(false) + .with_default_image( + std::env::var("COL_IMAGE") + .unwrap_or("docker.io/paritypr/colander:latest".to_string()) + .as_str(), + ) + .with_collator(|n| { + n.with_name("normal-collator").with_args(vec![("-lparachain=debug").into()]) + }) + .with_collator(|n| { + n.with_name("malus-collator").with_args(vec![ + ("-lparachain=debug").into(), + ("--malus-type=duplicate-collations").into(), + ]) + }) + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + })?; + + let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); + let network = spawn_fn(config).await?; + + let relay_node = network.get_node("validator-0")?; + + let relay_client: OnlineClient = relay_node.wait_client().await?; + let alice = dev::alice(); + + // Assign two extra cores to parachain-2000. + relay_client + .tx() + .sign_and_submit_then_watch_default( + &rococo::tx() + .sudo() + .sudo(rococo::runtime_types::rococo_runtime::RuntimeCall::Utility( + rococo::runtime_types::pallet_utility::pallet::Call::batch { + calls: vec![ + rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime( + rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core { + core: 0, + begin: 0, + assignment: vec![(CoreAssignment::Task(2000), PartsOf57600(57600))], + end_hint: None + } + ), + rococo::runtime_types::rococo_runtime::RuntimeCall::Coretime( + rococo::runtime_types::polkadot_runtime_parachains::coretime::pallet::Call::assign_core { + core: 1, + begin: 0, + assignment: vec![(CoreAssignment::Task(2000), PartsOf57600(57600))], + end_hint: None + } + ), + ], + }, + )), + &alice, + ) + .await? + .wait_for_finalized_success() + .await?; + + log::info!("2 more cores assigned to parachain-2000"); + + assert_para_throughput(&relay_client, 15, [(ParaId::from(2000), 40..46)].into_iter().collect()) + .await?; + + // Verify that all validators detect the malicious collator by checking their logs. This check + // must be performed after the para throughput check because the validator group needs to rotate + // at least once. This ensures that all validators have had a chance to detect the malicious + // behavior. + for i in 0..VALIDATOR_COUNT { + let validator_name = &format!("validator-{}", i); + let validator_node = network.get_node(validator_name)?; + validator_node + .wait_log_line_count_with_timeout( + "Candidate core index is invalid: The core index in commitments doesn't match the one in descriptor", + false, + 1_usize, + // Since we have this check after the para throughput check, all validators + // should have already detected the malicious collator, and all expected logs + // should have already appeared, so there is no need to wait more than 1 second. + 1_u64, + ) + .await + .unwrap_or_else(|error| panic!("Expected log not found for {}: {:?}", validator_name, error)); + } + + log::info!("Test finished successfully"); + + Ok(()) +} diff --git a/polkadot/zombienet-sdk-tests/tests/functional/mod.rs b/polkadot/zombienet-sdk-tests/tests/functional/mod.rs index ecdab38e1d286..7e5d313ff68dd 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/mod.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/mod.rs @@ -2,4 +2,5 @@ // SPDX-License-Identifier: Apache-2.0 mod async_backing_6_seconds_rate; +mod duplicate_collations; mod sync_backing; diff --git a/polkadot/zombienet_tests/misc/0002-upgrade-node.toml b/polkadot/zombienet_tests/misc/0002-upgrade-node.toml index 1edb18abcecec..5e5e3719936ab 100644 --- a/polkadot/zombienet_tests/misc/0002-upgrade-node.toml +++ b/polkadot/zombienet_tests/misc/0002-upgrade-node.toml @@ -30,7 +30,7 @@ addToGenesis = true [parachains.collator] name = "collator01" image = "{{COL_IMAGE}}" - command = "undying-collator" + command = "adder-collator" args = ["-lparachain=debug"] [[parachains]] @@ -40,7 +40,7 @@ addToGenesis = true [parachains.collator] name = "collator02" image = "{{COL_IMAGE}}" - command = "undying-collator" + command = "adder-collator" args = ["-lparachain=debug"] [types.Header] diff --git a/prdoc/pr_4722.prdoc b/prdoc/pr_4722.prdoc new file mode 100644 index 0000000000000..a5bdbbeb3df9a --- /dev/null +++ b/prdoc/pr_4722.prdoc @@ -0,0 +1,33 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Implement pallet view functions + +doc: + - audience: Runtime Dev + description: | + Read-only view functions can now be defined on pallets. These functions provide an interface for querying state, + from both outside and inside the runtime. Common queries can be defined on pallets, without users having to + access the storage directly. + + - audience: Runtime User + description: | + Querying the runtime state is now easier with the introduction of pallet view functions. Clients can call commonly + defined view functions rather than accessing the storage directly. These are similar to the Runtime APIs, but + are defined within the runtime itself. + +crates: + - name: frame-support + bump: minor + - name: sp-metadata-ir + bump: major + - name: frame-support-procedural + bump: patch + - name: pallet-example-view-functions + bump: patch + - name: cumulus-pov-validator + bump: none + - name: cumulus-pallet-weight-reclaim + bump: patch + - name: westend-runtime + bump: minor \ No newline at end of file diff --git a/prdoc/pr_6897.prdoc b/prdoc/pr_6897.prdoc new file mode 100644 index 0000000000000..38fd9417f48ab --- /dev/null +++ b/prdoc/pr_6897.prdoc @@ -0,0 +1,7 @@ +title: 'Tracing Log for fork-aware transaction pool' +doc: +- audience: Node Dev + description: Replacement of log crate with tracing crate for better logging. +crates: +- name: sc-transaction-pool + bump: minor \ No newline at end of file diff --git a/prdoc/pr_6924.prdoc b/prdoc/pr_6924.prdoc new file mode 100644 index 0000000000000..dc27bb9adfcba --- /dev/null +++ b/prdoc/pr_6924.prdoc @@ -0,0 +1,19 @@ +title: "malus-collator: implement malicious collator submitting same collation to all backing groups" + +doc: + - audience: Node Dev + description: | + This PR modifies the undying collator to include a malus mode, + enabling it to submit the same collation to all assigned backing groups. + + It also includes a test that spawns a network with the malus collator + and verifies that everything functions correctly. + +crates: + - name: polkadot + bump: none + validate: false + - name: test-parachain-undying + bump: patch + - name: test-parachain-undying-collator + bump: patch diff --git a/prdoc/pr_7198.prdoc b/prdoc/pr_7198.prdoc new file mode 100644 index 0000000000000..15478d9341d68 --- /dev/null +++ b/prdoc/pr_7198.prdoc @@ -0,0 +1,12 @@ +title: '[pallet-revive] implement the block author API ' +doc: +- audience: Runtime Dev + description: This PR implements the block author API method. Runtimes ought to implement + it such that it corresponds to the `coinbase` EVM opcode. +crates: +- name: pallet-revive + bump: major +- name: pallet-revive-fixtures + bump: minor +- name: pallet-revive-uapi + bump: minor diff --git a/prdoc/pr_7324.prdoc b/prdoc/pr_7324.prdoc new file mode 100644 index 0000000000000..e4fb7db781766 --- /dev/null +++ b/prdoc/pr_7324.prdoc @@ -0,0 +1,17 @@ +title: Replace derivative dependency with derive-where +author: conr2d +topic: runtime + +doc: +- audience: Runtime Dev + description: |- + The `derivative` crate, previously used to derive basic traits for structs with + generics or enums, is no longer actively maintained. It has been replaced with + the `derive-where` crate, which offers a more straightforward syntax while + providing the same features as `derivative`. + +crates: + - name: cumulus-pallet-weight-reclaim + bump: patch + - name: staging-xcm + bump: patch diff --git a/prdoc/pr_7338.prdoc b/prdoc/pr_7338.prdoc new file mode 100644 index 0000000000000..20948eb0d52f6 --- /dev/null +++ b/prdoc/pr_7338.prdoc @@ -0,0 +1,10 @@ +title: '[net/libp2p] Use raw `Identify` observed addresses to discover external addresses' +doc: +- audience: Node Dev + description: |- + Instead of using libp2p-provided external address candidates, susceptible to address translation issues, use litep2p-backend approach based on confirming addresses observed by multiple peers as external. + + Fixes https://github.com/paritytech/polkadot-sdk/issues/7207. +crates: +- name: sc-network + bump: major diff --git a/prdoc/pr_7359.prdoc b/prdoc/pr_7359.prdoc new file mode 100644 index 0000000000000..e54fcb877d1de --- /dev/null +++ b/prdoc/pr_7359.prdoc @@ -0,0 +1,7 @@ +title: Improve `set_validation_data` error message. +doc: +- audience: Runtime Dev + description: Adds a more elaborate error message to the error that appears when `set_validation_data` is missing in a parachain block. +crates: +- name: cumulus-pallet-parachain-system + bump: patch diff --git a/prdoc/pr_7365.prdoc b/prdoc/pr_7365.prdoc new file mode 100644 index 0000000000000..dcee76e01c789 --- /dev/null +++ b/prdoc/pr_7365.prdoc @@ -0,0 +1,12 @@ +title: Use checked math in frame-balances named_reserve +doc: +- audience: Runtime Dev + description: |- + This PR modifies `named_reserve()` in frame-balances to use checked math instead of defensive saturating math. + + The use of saturating math relies on the assumption that the value will always fit in `u128::MAX`. However, there is nothing preventing the implementing pallet from passing a larger value which overflows. This can happen if the implementing pallet does not validate user input and instead relies on `named_reserve()` to return an error (this saves an additional read) + + This is not a security concern, as the method will subsequently return an error thanks to `>::reserve(who, value)?;`. However, the `defensive_saturating_add` will panic in `--all-features`, creating false positive crashes in fuzzing operations. +crates: +- name: pallet-balances + bump: patch diff --git a/prdoc/pr_7378.prdoc b/prdoc/pr_7378.prdoc new file mode 100644 index 0000000000000..8754966d3e82c --- /dev/null +++ b/prdoc/pr_7378.prdoc @@ -0,0 +1,13 @@ +title: fix pre-dispatch PoV underweight for ParasInherent +doc: +- audience: Runtime Dev + description: |- + This should fix the error log related to PoV pre-dispatch weight being lower than post-dispatch for `ParasInherent`: + ``` + ERROR tokio-runtime-worker runtime::frame-support: Post dispatch weight is greater than pre dispatch weight. Pre dispatch weight may underestimating the actual weight. Greater post dispatch weight components are ignored. + Pre dispatch weight: Weight { ref_time: 47793353978, proof_size: 1019 }, + Post dispatch weight: Weight { ref_time: 5030321719, proof_size: 135395 } + ``` +crates: +- name: polkadot-runtime-parachains + bump: patch diff --git a/prdoc/pr_7379.prdoc b/prdoc/pr_7379.prdoc new file mode 100644 index 0000000000000..0bd904346d68d --- /dev/null +++ b/prdoc/pr_7379.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Add support for feature pallet_balances/insecure_zero_ed in benchmarks and testing" + +doc: + - audience: Runtime Dev + description: | + Currently benchmarks and tests on pallet_balances would fail when the feature insecure_zero_ed is enabled. This PR allows to run such benchmark and tests keeping into account the fact that accounts would not be deleted when their balance goes below a threshold. + +crates: + - name: pallet-balances + bump: patch diff --git a/prdoc/pr_7383.prdoc b/prdoc/pr_7383.prdoc new file mode 100644 index 0000000000000..bd421d73ab202 --- /dev/null +++ b/prdoc/pr_7383.prdoc @@ -0,0 +1,12 @@ +title: Bridges small nits/improvements +doc: +- audience: Runtime Dev + description: 'This PR contains small fixes and backwards compatibility issues identified + during work on the larger PR: https://github.com/paritytech/polkadot-sdk/issues/6906.' +crates: +- name: cumulus-pallet-xcmp-queue + bump: patch +- name: pallet-xcm-bridge-hub + bump: minor +- name: bridge-hub-test-utils + bump: minor diff --git a/prdoc/pr_6463.prdoc b/prdoc/stable2412-1/pr_6463.prdoc similarity index 100% rename from prdoc/pr_6463.prdoc rename to prdoc/stable2412-1/pr_6463.prdoc diff --git a/prdoc/pr_6807.prdoc b/prdoc/stable2412-1/pr_6807.prdoc similarity index 100% rename from prdoc/pr_6807.prdoc rename to prdoc/stable2412-1/pr_6807.prdoc diff --git a/prdoc/pr_6825.prdoc b/prdoc/stable2412-1/pr_6825.prdoc similarity index 100% rename from prdoc/pr_6825.prdoc rename to prdoc/stable2412-1/pr_6825.prdoc diff --git a/prdoc/pr_6855.prdoc b/prdoc/stable2412-1/pr_6855.prdoc similarity index 100% rename from prdoc/pr_6855.prdoc rename to prdoc/stable2412-1/pr_6855.prdoc diff --git a/prdoc/pr_6971.prdoc b/prdoc/stable2412-1/pr_6971.prdoc similarity index 100% rename from prdoc/pr_6971.prdoc rename to prdoc/stable2412-1/pr_6971.prdoc diff --git a/prdoc/pr_6973.prdoc b/prdoc/stable2412-1/pr_6973.prdoc similarity index 100% rename from prdoc/pr_6973.prdoc rename to prdoc/stable2412-1/pr_6973.prdoc diff --git a/prdoc/pr_7013.prdoc b/prdoc/stable2412-1/pr_7013.prdoc similarity index 100% rename from prdoc/pr_7013.prdoc rename to prdoc/stable2412-1/pr_7013.prdoc diff --git a/prdoc/pr_7028.prdoc b/prdoc/stable2412-1/pr_7028.prdoc similarity index 100% rename from prdoc/pr_7028.prdoc rename to prdoc/stable2412-1/pr_7028.prdoc diff --git a/prdoc/pr_7050.prdoc b/prdoc/stable2412-1/pr_7050.prdoc similarity index 100% rename from prdoc/pr_7050.prdoc rename to prdoc/stable2412-1/pr_7050.prdoc diff --git a/prdoc/stable2412-1/pr_7067.prdoc b/prdoc/stable2412-1/pr_7067.prdoc new file mode 100644 index 0000000000000..ead918fc2e007 --- /dev/null +++ b/prdoc/stable2412-1/pr_7067.prdoc @@ -0,0 +1,25 @@ +title: 'Fix implication order in implementation of `TransactionExtension` for tuple' +doc: +- audience: + - Runtime Dev + - Runtime User + description: |- + Before this PR, the implications were different in the pipeline `(A, B, C)` and `((A, B), C)`. + This PR fixes this behavior and make nested tuple transparant, the implication order of tuple of + tuple is now the same as in a single tuple. + + For runtime users this mean that the implication can be breaking depending on the pipeline used + in the runtime. + + For runtime developers this breaks usage of `TransactionExtension::validate`. + When calling `TransactionExtension::validate` the implication must now implement `Implication` + trait, you can use `TxBaseImplication` to wrap the type and use it as the base implication. + E.g. instead of `&(extension_version, call),` you can write `&TxBaseImplication((extension_version, call))`. + +crates: +- name: sp-runtime + bump: major +- name: pallet-skip-feeless-payment + bump: major +- name: frame-system + bump: major diff --git a/prdoc/pr_7074.prdoc b/prdoc/stable2412-1/pr_7074.prdoc similarity index 100% rename from prdoc/pr_7074.prdoc rename to prdoc/stable2412-1/pr_7074.prdoc diff --git a/prdoc/stable2412-1/pr_7090.prdoc b/prdoc/stable2412-1/pr_7090.prdoc new file mode 100644 index 0000000000000..a665115ce6c72 --- /dev/null +++ b/prdoc/stable2412-1/pr_7090.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Snowbridge - Support bridging native ETH + +doc: + - audience: Runtime User + description: + Support Native ETH as an asset type instead of only supporting WETH. WETH is still supported, but adds + support for ETH in the inbound and outbound routers. + +crates: + - name: snowbridge-router-primitives + bump: minor + - name: snowbridge-pallet-inbound-queue-fixtures + bump: minor diff --git a/prdoc/pr_7099.prdoc b/prdoc/stable2412-1/pr_7099.prdoc similarity index 100% rename from prdoc/pr_7099.prdoc rename to prdoc/stable2412-1/pr_7099.prdoc diff --git a/prdoc/pr_7116.prdoc b/prdoc/stable2412-1/pr_7116.prdoc similarity index 100% rename from prdoc/pr_7116.prdoc rename to prdoc/stable2412-1/pr_7116.prdoc diff --git a/prdoc/pr_7133.prdoc b/prdoc/stable2412-1/pr_7133.prdoc similarity index 100% rename from prdoc/pr_7133.prdoc rename to prdoc/stable2412-1/pr_7133.prdoc diff --git a/prdoc/pr_7158.prdoc b/prdoc/stable2412-1/pr_7158.prdoc similarity index 100% rename from prdoc/pr_7158.prdoc rename to prdoc/stable2412-1/pr_7158.prdoc diff --git a/prdoc/pr_7205.prdoc b/prdoc/stable2412-1/pr_7205.prdoc similarity index 100% rename from prdoc/pr_7205.prdoc rename to prdoc/stable2412-1/pr_7205.prdoc diff --git a/prdoc/pr_7222.prdoc b/prdoc/stable2412-1/pr_7222.prdoc similarity index 100% rename from prdoc/pr_7222.prdoc rename to prdoc/stable2412-1/pr_7222.prdoc diff --git a/prdoc/pr_7322.prdoc b/prdoc/stable2412-1/pr_7322.prdoc similarity index 100% rename from prdoc/pr_7322.prdoc rename to prdoc/stable2412-1/pr_7322.prdoc diff --git a/prdoc/pr_7344.prdoc b/prdoc/stable2412-1/pr_7344.prdoc similarity index 100% rename from prdoc/pr_7344.prdoc rename to prdoc/stable2412-1/pr_7344.prdoc diff --git a/substrate/.maintain/frame-weight-template.hbs b/substrate/.maintain/frame-weight-template.hbs index b174823b38403..ec9eee205cee3 100644 --- a/substrate/.maintain/frame-weight-template.hbs +++ b/substrate/.maintain/frame-weight-template.hbs @@ -17,7 +17,8 @@ #![allow(unused_imports)] #![allow(missing_docs)] -use frame::weights_prelude::*; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; /// Weight functions needed for `{{pallet}}`. pub trait WeightInfo { diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 220929fdfd838..6b9080c773a04 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -1495,6 +1495,7 @@ impl pallet_revive::Config for Runtime { type ChainId = ConstU64<420_420_420>; type NativeToEthRatio = ConstU32<1_000_000>; // 10^(18 - 12) Eth is 10^18, Native is 10^12. type EthGasEncoder = (); + type FindAuthor = ::FindAuthor; } impl pallet_sudo::Config for Runtime { @@ -2459,7 +2460,8 @@ mod runtime { RuntimeHoldReason, RuntimeSlashReason, RuntimeLockId, - RuntimeTask + RuntimeTask, + RuntimeViewFunction )] pub struct Runtime; @@ -3013,6 +3015,12 @@ impl_runtime_apis! { } } + impl frame_support::view_functions::runtime_api::RuntimeViewFunction for Runtime { + fn execute_view_function(id: frame_support::view_functions::ViewFunctionId, input: Vec) -> Result, frame_support::view_functions::ViewFunctionDispatchError> { + Runtime::execute_view_function(id, input) + } + } + impl sp_block_builder::BlockBuilder for Runtime { fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { Executive::apply_extrinsic(extrinsic) diff --git a/substrate/client/network/src/behaviour.rs b/substrate/client/network/src/behaviour.rs index e2a91e9616688..0f6b1ab345078 100644 --- a/substrate/client/network/src/behaviour.rs +++ b/substrate/client/network/src/behaviour.rs @@ -184,6 +184,7 @@ impl Behaviour { request_response_protocols: Vec, peer_store_handle: Arc, external_addresses: Arc>>, + public_addresses: Vec, connection_limits: ConnectionLimits, ) -> Result { Ok(Self { @@ -192,6 +193,7 @@ impl Behaviour { user_agent, local_public_key, external_addresses, + public_addresses, ), discovery: disco_config.finish(), request_responses: request_responses::RequestResponsesBehaviour::new( diff --git a/substrate/client/network/src/lib.rs b/substrate/client/network/src/lib.rs index 9300cbccc9ad3..f19c4dd2191a1 100644 --- a/substrate/client/network/src/lib.rs +++ b/substrate/client/network/src/lib.rs @@ -291,6 +291,9 @@ pub use service::{ }; pub use types::ProtocolName; +/// Log target for `sc-network`. +const LOG_TARGET: &str = "sub-libp2p"; + /// The maximum allowed number of established connections per peer. /// /// Typically, and by design of the network behaviours in this crate, diff --git a/substrate/client/network/src/litep2p/discovery.rs b/substrate/client/network/src/litep2p/discovery.rs index eb571804f30e6..48ec0684e763c 100644 --- a/substrate/client/network/src/litep2p/discovery.rs +++ b/substrate/client/network/src/litep2p/discovery.rs @@ -50,6 +50,7 @@ use schnellru::{ByLength, LruMap}; use std::{ cmp, collections::{HashMap, HashSet, VecDeque}, + iter, num::NonZeroUsize, pin::Pin, sync::Arc, @@ -72,11 +73,9 @@ const GET_RECORD_REDUNDANCY_FACTOR: usize = 4; /// The maximum number of tracked external addresses we allow. const MAX_EXTERNAL_ADDRESSES: u32 = 32; -/// Minimum number of confirmations received before an address is verified. -/// -/// Note: all addresses are confirmed by libp2p on the first encounter. This aims to make -/// addresses a bit more robust. -const MIN_ADDRESS_CONFIRMATIONS: usize = 2; +/// Number of times observed address is received from different peers before it is confirmed as +/// external. +const MIN_ADDRESS_CONFIRMATIONS: usize = 3; /// Discovery events. #[derive(Debug)] @@ -509,7 +508,7 @@ impl Discovery { .flatten() .flatten(); - self.address_confirmations.insert(address.clone(), Default::default()); + self.address_confirmations.insert(address.clone(), iter::once(peer).collect()); return (false, oldest) }, diff --git a/substrate/client/network/src/peer_info.rs b/substrate/client/network/src/peer_info.rs index a673f06fd6225..29544b8be70aa 100644 --- a/substrate/client/network/src/peer_info.rs +++ b/substrate/client/network/src/peer_info.rs @@ -19,7 +19,7 @@ //! [`PeerInfoBehaviour`] is implementation of `NetworkBehaviour` that holds information about peers //! in cache. -use crate::utils::interval; +use crate::{utils::interval, LOG_TARGET}; use either::Either; use fnv::FnvHashMap; @@ -31,24 +31,26 @@ use libp2p::{ Info as IdentifyInfo, }, identity::PublicKey, + multiaddr::Protocol, ping::{Behaviour as Ping, Config as PingConfig, Event as PingEvent}, swarm::{ behaviour::{ - AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, - ExternalAddrConfirmed, FromSwarm, ListenFailure, + AddressChange, ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm, + ListenFailure, }, ConnectionDenied, ConnectionHandler, ConnectionHandlerSelect, ConnectionId, - NetworkBehaviour, NewExternalAddrCandidate, THandler, THandlerInEvent, THandlerOutEvent, - ToSwarm, + NetworkBehaviour, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }, Multiaddr, PeerId, }; -use log::{debug, error, trace}; +use log::{debug, error, trace, warn}; use parking_lot::Mutex; +use schnellru::{ByLength, LruMap}; use smallvec::SmallVec; use std::{ collections::{hash_map::Entry, HashSet, VecDeque}, + iter, pin::Pin, sync::Arc, task::{Context, Poll}, @@ -59,6 +61,11 @@ use std::{ const CACHE_EXPIRE: Duration = Duration::from_secs(10 * 60); /// Interval at which we perform garbage collection on the node info. const GARBAGE_COLLECT_INTERVAL: Duration = Duration::from_secs(2 * 60); +/// The maximum number of tracked external addresses we allow. +const MAX_EXTERNAL_ADDRESSES: u32 = 32; +/// Number of times observed address is received from different peers before it is confirmed as +/// external. +const MIN_ADDRESS_CONFIRMATIONS: usize = 3; /// Implementation of `NetworkBehaviour` that holds information about peers in cache. pub struct PeerInfoBehaviour { @@ -70,7 +77,16 @@ pub struct PeerInfoBehaviour { nodes_info: FnvHashMap, /// Interval at which we perform garbage collection in `nodes_info`. garbage_collect: Pin + Send>>, + /// PeerId of the local node. + local_peer_id: PeerId, + /// Public addresses supplied by the operator. Never expire. + public_addresses: Vec, + /// Listen addresses. External addresses matching listen addresses never expire. + listen_addresses: HashSet, + /// External address confirmations. + address_confirmations: LruMap>, /// Record keeping of external addresses. Data is queried by the `NetworkService`. + /// The addresses contain the `/p2p/...` part with local peer ID. external_addresses: ExternalAddresses, /// Pending events to emit to [`Swarm`](libp2p::swarm::Swarm). pending_actions: VecDeque>>, @@ -106,13 +122,13 @@ pub struct ExternalAddresses { impl ExternalAddresses { /// Add an external address. - pub fn add(&mut self, addr: Multiaddr) { - self.addresses.lock().insert(addr); + pub fn add(&mut self, addr: Multiaddr) -> bool { + self.addresses.lock().insert(addr) } /// Remove an external address. - pub fn remove(&mut self, addr: &Multiaddr) { - self.addresses.lock().remove(addr); + pub fn remove(&mut self, addr: &Multiaddr) -> bool { + self.addresses.lock().remove(addr) } } @@ -122,9 +138,10 @@ impl PeerInfoBehaviour { user_agent: String, local_public_key: PublicKey, external_addresses: Arc>>, + public_addresses: Vec, ) -> Self { let identify = { - let cfg = IdentifyConfig::new("/substrate/1.0".to_string(), local_public_key) + let cfg = IdentifyConfig::new("/substrate/1.0".to_string(), local_public_key.clone()) .with_agent_version(user_agent) // We don't need any peer information cached. .with_cache_size(0); @@ -136,6 +153,10 @@ impl PeerInfoBehaviour { identify, nodes_info: FnvHashMap::default(), garbage_collect: Box::pin(interval(GARBAGE_COLLECT_INTERVAL)), + local_peer_id: local_public_key.to_peer_id(), + public_addresses, + listen_addresses: HashSet::new(), + address_confirmations: LruMap::new(ByLength::new(MAX_EXTERNAL_ADDRESSES)), external_addresses: ExternalAddresses { addresses: external_addresses }, pending_actions: Default::default(), } @@ -158,25 +179,137 @@ impl PeerInfoBehaviour { ping_time: Duration, connection: ConnectionId, ) { - trace!(target: "sub-libp2p", "Ping time with {:?} via {:?}: {:?}", peer_id, connection, ping_time); + trace!(target: LOG_TARGET, "Ping time with {:?} via {:?}: {:?}", peer_id, connection, ping_time); if let Some(entry) = self.nodes_info.get_mut(peer_id) { entry.latest_ping = Some(ping_time); } else { - error!(target: "sub-libp2p", + error!(target: LOG_TARGET, "Received ping from node we're not connected to {:?} via {:?}", peer_id, connection); } } - /// Inserts an identify record in the cache. Has no effect if we don't have any entry for that - /// node, which shouldn't happen. + /// Ensure address has the `/p2p/...` part with local peer id. Returns `Err` if the address + /// already contains a different peer id. + fn with_local_peer_id(&self, address: Multiaddr) -> Result { + if let Some(Protocol::P2p(peer_id)) = address.iter().last() { + if peer_id == self.local_peer_id { + Ok(address) + } else { + Err(address) + } + } else { + Ok(address.with(Protocol::P2p(self.local_peer_id))) + } + } + + /// Inserts an identify record in the cache & discovers external addresses when multiple + /// peers report the same address as observed. fn handle_identify_report(&mut self, peer_id: &PeerId, info: &IdentifyInfo) { - trace!(target: "sub-libp2p", "Identified {:?} => {:?}", peer_id, info); + trace!(target: LOG_TARGET, "Identified {:?} => {:?}", peer_id, info); if let Some(entry) = self.nodes_info.get_mut(peer_id) { entry.client_version = Some(info.agent_version.clone()); } else { - error!(target: "sub-libp2p", - "Received pong from node we're not connected to {:?}", peer_id); + error!(target: LOG_TARGET, + "Received identify message from node we're not connected to {peer_id:?}"); + } + // Discover external addresses. + match self.with_local_peer_id(info.observed_addr.clone()) { + Ok(observed_addr) => { + let (is_new, expired) = self.is_new_external_address(&observed_addr, *peer_id); + if is_new && self.external_addresses.add(observed_addr.clone()) { + trace!( + target: LOG_TARGET, + "Observed address reported by Identify confirmed as external {}", + observed_addr, + ); + self.pending_actions.push_back(ToSwarm::ExternalAddrConfirmed(observed_addr)); + } + if let Some(expired) = expired { + trace!(target: LOG_TARGET, "Removing replaced external address: {expired}"); + self.external_addresses.remove(&expired); + self.pending_actions.push_back(ToSwarm::ExternalAddrExpired(expired)); + } + }, + Err(addr) => { + warn!( + target: LOG_TARGET, + "Identify reported observed address for a peer that is not us: {addr}", + ); + }, + } + } + + /// Check if addresses are equal taking into account they can contain or not contain + /// the `/p2p/...` part. + fn is_same_address(left: &Multiaddr, right: &Multiaddr) -> bool { + let mut left = left.iter(); + let mut right = right.iter(); + + loop { + match (left.next(), right.next()) { + (None, None) => return true, + (None, Some(Protocol::P2p(_))) => return true, + (Some(Protocol::P2p(_)), None) => return true, + (left, right) if left != right => return false, + _ => {}, + } + } + } + + /// Check if `address` can be considered a new external address. + /// + /// If this address replaces an older address, the expired address is returned. + fn is_new_external_address( + &mut self, + address: &Multiaddr, + peer_id: PeerId, + ) -> (bool, Option) { + trace!(target: LOG_TARGET, "Verify new external address: {address}"); + + // Public and listen addresses don't count towards discovered external addresses + // and are always confirmed. + // Because they are not kept in the LRU, they are never replaced by discovered + // external addresses. + if self + .listen_addresses + .iter() + .chain(self.public_addresses.iter()) + .any(|known_address| PeerInfoBehaviour::is_same_address(&known_address, &address)) + { + return (true, None) } + + match self.address_confirmations.get(address) { + Some(confirmations) => { + confirmations.insert(peer_id); + + if confirmations.len() >= MIN_ADDRESS_CONFIRMATIONS { + return (true, None) + } + }, + None => { + let oldest = (self.address_confirmations.len() >= + self.address_confirmations.limiter().max_length() as usize) + .then(|| { + self.address_confirmations.pop_oldest().map(|(address, peers)| { + if peers.len() >= MIN_ADDRESS_CONFIRMATIONS { + return Some(address) + } else { + None + } + }) + }) + .flatten() + .flatten(); + + self.address_confirmations + .insert(address.clone(), iter::once(peer_id).collect()); + + return (false, oldest) + }, + } + + (false, None) } } @@ -346,7 +479,7 @@ impl NetworkBehaviour for PeerInfoBehaviour { } entry.endpoints.retain(|ep| ep != endpoint) } else { - error!(target: "sub-libp2p", + error!(target: LOG_TARGET, "Unknown connection to {:?} closed: {:?}", peer_id, endpoint); } }, @@ -400,28 +533,36 @@ impl NetworkBehaviour for PeerInfoBehaviour { self.ping.on_swarm_event(FromSwarm::NewListener(e)); self.identify.on_swarm_event(FromSwarm::NewListener(e)); }, + FromSwarm::NewListenAddr(e) => { + self.ping.on_swarm_event(FromSwarm::NewListenAddr(e)); + self.identify.on_swarm_event(FromSwarm::NewListenAddr(e)); + self.listen_addresses.insert(e.addr.clone()); + }, FromSwarm::ExpiredListenAddr(e) => { self.ping.on_swarm_event(FromSwarm::ExpiredListenAddr(e)); self.identify.on_swarm_event(FromSwarm::ExpiredListenAddr(e)); - self.external_addresses.remove(e.addr); + self.listen_addresses.remove(e.addr); + // Remove matching external address. + match self.with_local_peer_id(e.addr.clone()) { + Ok(addr) => { + self.external_addresses.remove(&addr); + self.pending_actions.push_back(ToSwarm::ExternalAddrExpired(addr)); + }, + Err(addr) => { + warn!( + target: LOG_TARGET, + "Listen address expired with peer ID that is not us: {addr}", + ); + }, + } }, - FromSwarm::NewExternalAddrCandidate(e @ NewExternalAddrCandidate { addr }) => { + FromSwarm::NewExternalAddrCandidate(e) => { self.ping.on_swarm_event(FromSwarm::NewExternalAddrCandidate(e)); self.identify.on_swarm_event(FromSwarm::NewExternalAddrCandidate(e)); - - // Manually confirm all external address candidates. - // TODO: consider adding [AutoNAT protocol](https://docs.rs/libp2p/0.52.3/libp2p/autonat/index.html) - // (must go through the polkadot protocol spec) or implemeting heuristics for - // approving external address candidates. This can be done, for example, by - // approving only addresses reported by multiple peers. - // See also https://github.com/libp2p/rust-libp2p/pull/4721 introduced - // in libp2p v0.53 for heuristics approach. - self.pending_actions.push_back(ToSwarm::ExternalAddrConfirmed(addr.clone())); }, - FromSwarm::ExternalAddrConfirmed(e @ ExternalAddrConfirmed { addr }) => { + FromSwarm::ExternalAddrConfirmed(e) => { self.ping.on_swarm_event(FromSwarm::ExternalAddrConfirmed(e)); self.identify.on_swarm_event(FromSwarm::ExternalAddrConfirmed(e)); - self.external_addresses.add(addr.clone()); }, FromSwarm::AddressChange(e @ AddressChange { peer_id, old, new, .. }) => { self.ping.on_swarm_event(FromSwarm::AddressChange(e)); @@ -431,20 +572,16 @@ impl NetworkBehaviour for PeerInfoBehaviour { if let Some(endpoint) = entry.endpoints.iter_mut().find(|e| e == &old) { *endpoint = new.clone(); } else { - error!(target: "sub-libp2p", + error!(target: LOG_TARGET, "Unknown address change for peer {:?} from {:?} to {:?}", peer_id, old, new); } } else { - error!(target: "sub-libp2p", + error!(target: LOG_TARGET, "Unknown peer {:?} to change address from {:?} to {:?}", peer_id, old, new); } }, - FromSwarm::NewListenAddr(e) => { - self.ping.on_swarm_event(FromSwarm::NewListenAddr(e)); - self.identify.on_swarm_event(FromSwarm::NewListenAddr(e)); - }, event => { - debug!(target: "sub-libp2p", "New unknown `FromSwarm` libp2p event: {event:?}"); + debug!(target: LOG_TARGET, "New unknown `FromSwarm` libp2p event: {event:?}"); self.ping.on_swarm_event(event); self.identify.on_swarm_event(event); }, @@ -497,7 +634,7 @@ impl NetworkBehaviour for PeerInfoBehaviour { }, IdentifyEvent::Error { connection_id, peer_id, error } => { debug!( - target: "sub-libp2p", + target: LOG_TARGET, "Identification with peer {peer_id:?}({connection_id}) failed => {error}" ); }, diff --git a/substrate/client/network/src/service.rs b/substrate/client/network/src/service.rs index 751183ae19a9d..b4463ad480891 100644 --- a/substrate/client/network/src/service.rs +++ b/substrate/client/network/src/service.rs @@ -516,6 +516,7 @@ where request_response_protocols, Arc::clone(&peer_store_handle), external_addresses.clone(), + network_config.public_addresses.iter().cloned().map(Into::into).collect(), ConnectionLimits::default() .with_max_established_per_peer(Some(crate::MAX_CONNECTIONS_PER_PEER as u32)) .with_max_established_incoming(Some( diff --git a/substrate/client/transaction-pool/Cargo.toml b/substrate/client/transaction-pool/Cargo.toml index 72586b984920b..26bbf58f1522d 100644 --- a/substrate/client/transaction-pool/Cargo.toml +++ b/substrate/client/transaction-pool/Cargo.toml @@ -40,6 +40,7 @@ sp-transaction-pool = { workspace = true, default-features = true } thiserror = { workspace = true } tokio = { workspace = true, default-features = true, features = ["macros", "time"] } tokio-stream = { workspace = true } +tracing = { workspace = true, default-features = true } [dev-dependencies] array-bytes = { workspace = true, default-features = true } diff --git a/substrate/client/transaction-pool/src/common/mod.rs b/substrate/client/transaction-pool/src/common/mod.rs index fb280e8780ad4..446a5c2ec0225 100644 --- a/substrate/client/transaction-pool/src/common/mod.rs +++ b/substrate/client/transaction-pool/src/common/mod.rs @@ -25,6 +25,7 @@ pub(crate) mod log_xt; pub(crate) mod metrics; #[cfg(test)] pub(crate) mod tests; +pub(crate) mod tracing_log_xt; use futures::StreamExt; use std::sync::Arc; diff --git a/substrate/client/transaction-pool/src/common/tracing_log_xt.rs b/substrate/client/transaction-pool/src/common/tracing_log_xt.rs new file mode 100644 index 0000000000000..4d1c5d09cc7ac --- /dev/null +++ b/substrate/client/transaction-pool/src/common/tracing_log_xt.rs @@ -0,0 +1,69 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Utility for logging transaction collections with tracing crate. + +/// Logs every transaction from given `tx_collection` with given level. +macro_rules! log_xt { + (data: hash, target: $target:expr, $level:expr, $tx_collection:expr, $text_with_format:expr) => { + for tx in $tx_collection { + tracing::event!( + $level, + target = $target, + tx_hash = format!("{:?}", tx), + $text_with_format, + ); + } + }; + (data: hash, target: $target:expr, $level:expr, $tx_collection:expr, $text_with_format:expr, $($arg:expr),*) => { + for tx in $tx_collection { + tracing::event!( + $level, + target = $target, + tx_hash = format!("{:?}", tx), + $text_with_format, + $($arg),* + ); + } + }; + (data: tuple, target: $target:expr, $level:expr, $tx_collection:expr, $text_with_format:expr) => { + for tx in $tx_collection { + tracing::event!( + $level, + target = $target, + tx_hash = format!("{:?}", tx.0), + $text_with_format, + tx.1 + ); + } + }; +} +macro_rules! log_xt_trace { + (data: $datatype:ident, target: $target:expr, $($arg:tt)+) => { + $crate::common::tracing_log_xt::log_xt!(data: $datatype, target: $target, tracing::Level::TRACE, $($arg)+); + }; + (target: $target:expr, $tx_collection:expr, $text_with_format:expr) => { + $crate::common::tracing_log_xt::log_xt!(data: hash, target: $target, tracing::Level::TRACE, $tx_collection, $text_with_format); + }; + (target: $target:expr, $tx_collection:expr, $text_with_format:expr, $($arg:expr)*) => { + $crate::common::tracing_log_xt::log_xt!(data: hash, target: $target, tracing::Level::TRACE, $tx_collection, $text_with_format, $($arg)*); + }; +} + +pub(crate) use log_xt; +pub(crate) use log_xt_trace; diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs index bf61558b00b0d..3588645344ba5 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/dropped_watcher.rs @@ -22,13 +22,12 @@ //! by any view are detected and properly notified. use crate::{ - common::log_xt::log_xt_trace, + common::tracing_log_xt::log_xt_trace, fork_aware_txpool::stream_map_util::next_event, graph::{self, BlockHash, ExtrinsicHash}, LOG_TARGET, }; use futures::stream::StreamExt; -use log::{debug, trace}; use sc_transaction_pool_api::TransactionStatus; use sc_utils::mpsc; use sp_runtime::traits::Block as BlockT; @@ -41,6 +40,7 @@ use std::{ pin::Pin, }; use tokio_stream::StreamMap; +use tracing::{debug, trace}; /// Represents a transaction that was removed from the transaction pool, including the reason of its /// removal. @@ -225,7 +225,7 @@ where log_xt_trace!( target: LOG_TARGET, xts.clone(), - "[{:?}] dropped_watcher: finalized xt removed" + "dropped_watcher: finalized xt removed" ); xts.iter().for_each(|xt| { self.ready_transaction_views.remove(xt); @@ -279,7 +279,7 @@ where return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) } } else { - debug!("[{:?}] dropped_watcher: removing (non-tracked) tx", tx_hash); + debug!(target: LOG_TARGET, ?tx_hash, "dropped_watcher: removing (non-tracked) tx"); return Some(DroppedTransaction::new_enforced_by_limts(tx_hash)) } }, diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 7660457182520..c609ee2da22e5 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -29,7 +29,7 @@ use super::{ }; use crate::{ api::FullChainApi, - common::log_xt::log_xt_trace, + common::tracing_log_xt::log_xt_trace, enactment_state::{EnactmentAction, EnactmentState}, fork_aware_txpool::{ dropped_watcher::{DroppedReason, DroppedTransaction}, @@ -70,6 +70,7 @@ use std::{ time::Instant, }; use tokio::select; +use tracing::{debug, info, trace, warn}; /// Fork aware transaction pool task, that needs to be polled. pub type ForkAwareTxPoolTask = Pin + Send>>; @@ -105,10 +106,10 @@ where /// /// `ready_iterator` is a closure that generates the result data to be sent to the pollers. fn trigger(&mut self, at: Block::Hash, ready_iterator: impl Fn() -> T) { - log::trace!(target: LOG_TARGET, "fatp::trigger {at:?} pending keys: {:?}", self.pollers.keys()); + trace!(target: LOG_TARGET, ?at, keys = ?self.pollers.keys(), "fatp::trigger"); let Some(pollers) = self.pollers.remove(&at) else { return }; pollers.into_iter().for_each(|p| { - log::debug!(target: LOG_TARGET, "trigger ready signal at block {}", at); + debug!(target: LOG_TARGET, "trigger ready signal at block {}", at); let _ = p.send(ready_iterator()); }); } @@ -265,11 +266,16 @@ where ) { loop { let Some(dropped) = dropped_stream.next().await else { - log::debug!(target: LOG_TARGET, "fatp::dropped_monitor_task: terminated..."); + debug!(target: LOG_TARGET, "fatp::dropped_monitor_task: terminated..."); break; }; - let dropped_tx_hash = dropped.tx_hash; - log::trace!(target: LOG_TARGET, "[{:?}] fatp::dropped notification {:?}, removing", dropped_tx_hash,dropped.reason); + let tx_hash = dropped.tx_hash; + trace!( + target: LOG_TARGET, + ?tx_hash, + reason = ?dropped.reason, + "fatp::dropped notification, removing" + ); match dropped.reason { DroppedReason::Usurped(new_tx_hash) => { if let Some(new_tx) = mempool.get_by_hash(new_tx_hash) { @@ -277,24 +283,24 @@ where .replace_transaction( new_tx.source(), new_tx.tx(), - dropped_tx_hash, + tx_hash, new_tx.is_watched(), ) .await; } else { - log::trace!( - target:LOG_TARGET, - "error: dropped_monitor_task: no entry in mempool for new transaction {:?}", - new_tx_hash, + trace!( + target: LOG_TARGET, + tx_hash = ?new_tx_hash, + "error: dropped_monitor_task: no entry in mempool for new transaction" ); } }, DroppedReason::LimitsEnforced => {}, }; - mempool.remove_transaction(&dropped_tx_hash); + mempool.remove_transaction(&tx_hash); view_store.listener.transaction_dropped(dropped); - import_notification_sink.clean_notified_items(&[dropped_tx_hash]); + import_notification_sink.clean_notified_items(&[tx_hash]); } } @@ -433,7 +439,11 @@ where pub async fn ready_at_light(&self, at: Block::Hash) -> ReadyIteratorFor { let start = Instant::now(); let api = self.api.clone(); - log::trace!(target: LOG_TARGET, "fatp::ready_at_light {:?}", at); + trace!( + target: LOG_TARGET, + ?at, + "fatp::ready_at_light" + ); let Ok(block_number) = self.api.resolve_block_number(at) else { return Box::new(std::iter::empty()) @@ -465,8 +475,12 @@ where let extrinsics = api .block_body(h.hash) .await - .unwrap_or_else(|e| { - log::warn!(target: LOG_TARGET, "Compute ready light transactions: error request: {}", e); + .unwrap_or_else(|error| { + warn!( + target: LOG_TARGET, + %error, + "Compute ready light transactions: error request" + ); None }) .unwrap_or_default() @@ -487,19 +501,25 @@ where let _ = tmp_view.pool.validated_pool().prune_tags(tags); let after_count = tmp_view.pool.validated_pool().status().ready; - log::debug!(target: LOG_TARGET, - "fatp::ready_at_light {} from {} before: {} to be removed: {} after: {} took:{:?}", - at, - best_view.at.hash, + debug!( + target: LOG_TARGET, + ?at, + best_view_hash = ?best_view.at.hash, before_count, - all_extrinsics.len(), + to_be_removed = all_extrinsics.len(), after_count, - start.elapsed() + duration = ?start.elapsed(), + "fatp::ready_at_light" ); Box::new(tmp_view.pool.validated_pool().ready()) } else { let empty: ReadyIteratorFor = Box::new(std::iter::empty()); - log::debug!(target: LOG_TARGET, "fatp::ready_at_light {} -> empty, took:{:?}", at, start.elapsed()); + debug!( + target: LOG_TARGET, + ?at, + duration = ?start.elapsed(), + "fatp::ready_at_light -> empty" + ); empty } } @@ -519,8 +539,12 @@ where at: Block::Hash, timeout: std::time::Duration, ) -> ReadyIteratorFor { - log::debug!(target: LOG_TARGET, "fatp::ready_at_with_timeout at {:?} allowed delay: {:?}", at, timeout); - + debug!( + target: LOG_TARGET, + ?at, + ?timeout, + "fatp::ready_at_with_timeout" + ); let timeout = futures_timer::Delay::new(timeout); let (view_already_exists, ready_at) = self.ready_at_internal(at); @@ -532,10 +556,10 @@ where select! { ready = ready_at => Some(ready), _ = timeout => { - log::warn!(target: LOG_TARGET, - "Timeout fired waiting for transaction pool at block: ({:?}). \ - Proceeding with production.", - at, + warn!( + target: LOG_TARGET, + ?at, + "Timeout fired waiting for transaction pool at block. Proceeding with production." ); None } @@ -555,7 +579,12 @@ where let mut ready_poll = self.ready_poll.lock(); if let Some((view, inactive)) = self.view_store.get_view_at(at, true) { - log::debug!(target: LOG_TARGET, "fatp::ready_at_internal {at:?} (inactive:{inactive:?})"); + debug!( + target: LOG_TARGET, + ?at, + ?inactive, + "fatp::ready_at_internal" + ); let iterator: ReadyIteratorFor = Box::new(view.pool.validated_pool().ready()); return (true, async move { iterator }.boxed()); } @@ -563,15 +592,21 @@ where let pending = ready_poll .add(at) .map(|received| { - received.unwrap_or_else(|e| { - log::warn!(target: LOG_TARGET, "Error receiving ready-set iterator: {:?}", e); + received.unwrap_or_else(|error| { + warn!( + target: LOG_TARGET, + %error, + "Error receiving ready-set iterator" + ); Box::new(std::iter::empty()) }) }) .boxed(); - log::debug!(target: LOG_TARGET, - "fatp::ready_at_internal {at:?} pending keys: {:?}", - ready_poll.pollers.keys() + debug!( + target: LOG_TARGET, + ?at, + pending_keys = ?ready_poll.pollers.keys(), + "fatp::ready_at_internal" ); (false, pending) } @@ -649,8 +684,13 @@ where xts: Vec>, ) -> Result, Self::Error>>, Self::Error> { let view_store = self.view_store.clone(); - log::debug!(target: LOG_TARGET, "fatp::submit_at count:{} views:{}", xts.len(), self.active_views_count()); - log_xt_trace!(target: LOG_TARGET, xts.iter().map(|xt| self.tx_hash(xt)), "[{:?}] fatp::submit_at"); + debug!( + target: LOG_TARGET, + count = xts.len(), + active_views_count = self.active_views_count(), + "fatp::submit_at" + ); + log_xt_trace!(target: LOG_TARGET, xts.iter().map(|xt| self.tx_hash(xt)), "fatp::submit_at"); let xts = xts.into_iter().map(Arc::from).collect::>(); let mempool_results = self.mempool.extend_unwatched(source, &xts); @@ -741,7 +781,12 @@ where source: TransactionSource, xt: TransactionFor, ) -> Result, Self::Error> { - log::trace!(target: LOG_TARGET, "[{:?}] fatp::submit_one views:{}", self.tx_hash(&xt), self.active_views_count()); + trace!( + target: LOG_TARGET, + tx_hash = ?self.tx_hash(&xt), + active_views_count = self.active_views_count(), + "fatp::submit_one" + ); match self.submit_at(_at, source, vec![xt]).await { Ok(mut v) => v.pop().expect("There is exactly one element in result of submit_at. qed."), @@ -759,7 +804,12 @@ where source: TransactionSource, xt: TransactionFor, ) -> Result>>, Self::Error> { - log::trace!(target: LOG_TARGET, "[{:?}] fatp::submit_and_watch views:{}", self.tx_hash(&xt), self.active_views_count()); + trace!( + target: LOG_TARGET, + tx_hash = ?self.tx_hash(&xt), + views = self.active_views_count(), + "fatp::submit_and_watch" + ); let xt = Arc::from(xt); let InsertionInfo { hash: xt_hash, source: timed_source, .. } = @@ -791,8 +841,7 @@ where // useful for verification for debugging purposes). fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { if !hashes.is_empty() { - log::debug!(target: LOG_TARGET, "fatp::remove_invalid {}", hashes.len()); - log_xt_trace!(target:LOG_TARGET, hashes, "[{:?}] fatp::remove_invalid"); + log_xt_trace!(target:LOG_TARGET, hashes, "fatp::remove_invalid"); self.metrics .report(|metrics| metrics.removed_invalid_txs.inc_by(hashes.len() as _)); } @@ -842,11 +891,12 @@ where let result = most_recent_view .map(|block_hash| self.view_store.ready_transaction(block_hash, tx_hash)) .flatten(); - log::trace!( + trace!( target: LOG_TARGET, - "[{tx_hash:?}] ready_transaction: {} {:?}", - result.is_some(), - most_recent_view + ?tx_hash, + is_ready = result.is_some(), + ?most_recent_view, + "ready_transaction" ); result } @@ -902,7 +952,11 @@ where _at: Block::Hash, xt: sc_transaction_pool_api::LocalTransactionFor, ) -> Result { - log::debug!(target: LOG_TARGET, "fatp::submit_local views:{}", self.active_views_count()); + debug!( + target: LOG_TARGET, + active_views_count = self.active_views_count(), + "fatp::submit_local" + ); let xt = Arc::from(xt); let result = @@ -947,20 +1001,20 @@ where let hash_and_number = match tree_route.last() { Some(hash_and_number) => hash_and_number, None => { - log::warn!( + warn!( target: LOG_TARGET, - "Skipping ChainEvent - no last block in tree route {:?}", - tree_route, + ?tree_route, + "Skipping ChainEvent - no last block in tree route" ); return }, }; if self.has_view(&hash_and_number.hash) { - log::trace!( + trace!( target: LOG_TARGET, - "view already exists for block: {:?}", - hash_and_number, + ?hash_and_number, + "view already exists for block" ); return } @@ -995,12 +1049,12 @@ where at: &HashAndNumber, tree_route: &TreeRoute, ) -> Option>> { - log::debug!( + debug!( target: LOG_TARGET, - "build_new_view: for: {:?} from: {:?} tree_route: {:?}", - at, - origin_view.as_ref().map(|v| v.at.clone()), - tree_route + ?at, + origin_view_at = ?origin_view.as_ref().map(|v| v.at.clone()), + ?tree_route, + "build_new_view" ); let mut view = if let Some(origin_view) = origin_view { let mut view = View::new_from_other(&origin_view, at); @@ -1009,7 +1063,11 @@ where } view } else { - log::debug!(target: LOG_TARGET, "creating non-cloned view: for: {at:?}"); + debug!( + target: LOG_TARGET, + ?at, + "creating non-cloned view" + ); View::new( self.api.clone(), at.clone(), @@ -1037,21 +1095,35 @@ where // sync the transactions statuses and referencing views in all the listeners with newly // cloned view. view.pool.validated_pool().retrigger_notifications(); - log::debug!(target: LOG_TARGET, "register_listeners: at {at:?} took {duration:?}"); + debug!( + target: LOG_TARGET, + ?at, + ?duration, + "register_listeners" + ); // 2. Handle transactions from the tree route. Pruning transactions from the view first // will make some space for mempool transactions in case we are at the view's limits. let start = Instant::now(); self.update_view_with_fork(&view, tree_route, at.clone()).await; let duration = start.elapsed(); - log::debug!(target: LOG_TARGET, "update_view_with_fork: at {at:?} took {duration:?}"); + debug!( + target: LOG_TARGET, + ?at, + ?duration, + "update_view_with_fork" + ); // 3. Finally, submit transactions from the mempool. let start = Instant::now(); self.update_view_with_mempool(&mut view, watched_xts).await; let duration = start.elapsed(); - log::debug!(target: LOG_TARGET, "update_view_with_mempool: at {at:?} took {duration:?}"); - + debug!( + target: LOG_TARGET, + ?at, + ?duration, + "update_view_with_mempool" + ); let view = Arc::from(view); self.view_store.insert_new_view(view.clone(), tree_route).await; Some(view) @@ -1074,8 +1146,12 @@ where for h in tree_route.enacted().iter().rev() { api.block_body(h.hash) .await - .unwrap_or_else(|e| { - log::warn!(target: LOG_TARGET, "Compute ready light transactions: error request: {}", e); + .unwrap_or_else(|error| { + warn!( + target: LOG_TARGET, + %error, + "Compute ready light transactions: error request" + ); None }) .unwrap_or_default() @@ -1086,12 +1162,13 @@ where }); } - log::debug!(target: LOG_TARGET, - "fatp::extrinsics_included_since_finalized {} from {} count: {} took:{:?}", - at, - recent_finalized_block, - all_extrinsics.len(), - start.elapsed() + debug!( + target: LOG_TARGET, + ?at, + ?recent_finalized_block, + extrinsics_count = all_extrinsics.len(), + duration = ?start.elapsed(), + "fatp::extrinsics_included_since_finalized" ); all_extrinsics } @@ -1106,12 +1183,12 @@ where &self, view: &View, ) -> Vec<(ExtrinsicHash, Arc>)> { - log::debug!( + debug!( target: LOG_TARGET, - "register_listeners: {:?} xts:{:?} v:{}", - view.at, - self.mempool.unwatched_and_watched_count(), - self.active_views_count() + view_at = ?view.at, + xts_count = ?self.mempool.unwatched_and_watched_count(), + active_views_count = self.active_views_count(), + "register_listeners" ); //todo [#5495]: maybe we don't need to register listener in view? We could use @@ -1124,7 +1201,12 @@ where let watcher = view.create_watcher(tx_hash); let at = view.at.clone(); async move { - log::trace!(target: LOG_TARGET, "[{:?}] adding watcher {:?}", tx_hash, at.hash); + trace!( + target: LOG_TARGET, + ?tx_hash, + at = ?at.hash, + "adding watcher" + ); self.view_store.listener.add_view_watcher_for_tx( tx_hash, at.hash, @@ -1156,12 +1238,12 @@ where view: &View, watched_xts: Vec<(ExtrinsicHash, Arc>)>, ) { - log::debug!( + debug!( target: LOG_TARGET, - "update_view_with_mempool: {:?} xts:{:?} v:{}", - view.at, - self.mempool.unwatched_and_watched_count(), - self.active_views_count() + view_at = ?view.at, + xts_count = ?self.mempool.unwatched_and_watched_count(), + active_views_count = self.active_views_count(), + "update_view_with_mempool" ); let included_xts = self.extrinsics_included_since_finalized(view.at.hash).await; @@ -1187,12 +1269,12 @@ where let submitted_count = watched_results.len(); - log::debug!( + debug!( target: LOG_TARGET, - "update_view_with_mempool: at {:?} submitted {}/{}", - view.at.hash, + view_at_hash = ?view.at.hash, submitted_count, - self.mempool.len() + mempool_len = self.mempool.len(), + "update_view_with_mempool" ); self.metrics @@ -1220,7 +1302,12 @@ where tree_route: &TreeRoute, hash_and_number: HashAndNumber, ) { - log::debug!(target: LOG_TARGET, "update_view_with_fork tree_route: {:?} {tree_route:?}", view.at); + debug!( + target: LOG_TARGET, + ?tree_route, + at = ?view.at, + "update_view_with_fork" + ); let api = self.api.clone(); // We keep track of everything we prune so that later we won't add @@ -1249,8 +1336,12 @@ where let block_transactions = api .block_body(hash) .await - .unwrap_or_else(|e| { - log::warn!(target: LOG_TARGET, "Failed to fetch block body: {}", e); + .unwrap_or_else(|error| { + warn!( + target: LOG_TARGET, + %error, + "Failed to fetch block body" + ); None }) .unwrap_or_default() @@ -1269,11 +1360,11 @@ where resubmitted_to_report += 1; if !contains { - log::trace!( + trace!( target: LOG_TARGET, - "[{:?}]: Resubmitting from retracted block {:?}", - tx_hash, - hash, + ?tx_hash, + ?hash, + "Resubmitting from retracted block" ); } !contains @@ -1307,8 +1398,13 @@ where /// - purging finalized transactions from the mempool and triggering mempool revalidation, async fn handle_finalized(&self, finalized_hash: Block::Hash, tree_route: &[Block::Hash]) { let finalized_number = self.api.block_id_to_number(&BlockId::Hash(finalized_hash)); - log::debug!(target: LOG_TARGET, "handle_finalized {finalized_number:?} tree_route: {tree_route:?} views_count:{}", self.active_views_count()); - + debug!( + target: LOG_TARGET, + ?finalized_number, + ?tree_route, + active_views_count = self.active_views_count(), + "handle_finalized" + ); let finalized_xts = self.view_store.handle_finalized(finalized_hash, tree_route).await; self.mempool.purge_finalized_transactions(&finalized_xts).await; @@ -1325,11 +1421,19 @@ where ) .await; } else { - log::trace!(target: LOG_TARGET, "purge_transactions_later skipped, cannot find block number {finalized_number:?}"); + trace!( + target: LOG_TARGET, + ?finalized_number, + "purge_transactions_later skipped, cannot find block number" + ); } self.ready_poll.lock().remove_cancelled(); - log::trace!(target: LOG_TARGET, "handle_finalized after views_count:{:?}", self.active_views_count()); + trace!( + target: LOG_TARGET, + active_views_count = self.active_views_count(), + "handle_finalized after" + ); } /// Computes a hash of the provided transaction @@ -1443,7 +1547,11 @@ where /// Executes the maintainance for the given chain event. async fn maintain(&self, event: ChainEvent) { let start = Instant::now(); - log::debug!(target: LOG_TARGET, "processing event: {event:?}"); + debug!( + target: LOG_TARGET, + ?event, + "processing event" + ); self.view_store.finish_background_revalidations().await; @@ -1467,8 +1575,12 @@ where .update(&event, &compute_tree_route, &block_id_to_number); match result { - Err(msg) => { - log::trace!(target: LOG_TARGET, "enactment_state::update error: {msg}"); + Err(error) => { + trace!( + target: LOG_TARGET, + %error, + "enactment_state::update error" + ); self.enactment_state.lock().force_update(&event); }, Ok(EnactmentAction::Skip) => return, @@ -1494,23 +1606,25 @@ where ChainEvent::Finalized { hash, ref tree_route } => { self.handle_finalized(hash, tree_route).await; - log::trace!( + trace!( target: LOG_TARGET, - "on-finalized enacted: {tree_route:?}, previously finalized: \ - {prev_finalized_block:?}", + ?tree_route, + ?prev_finalized_block, + "on-finalized enacted" ); }, } - let maintain_duration = start.elapsed(); + let duration = start.elapsed(); - log::info!( + info!( target: LOG_TARGET, - "maintain: txs:{:?} views:[{};{:?}] event:{event:?} took:{:?}", - self.mempool_len(), - self.active_views_count(), - self.views_stats(), - maintain_duration + mempool_len = format!("{:?}", self.mempool_len()), + active_views_count = self.active_views_count(), + views_stats = ?self.views_stats(), + ?event, + ?duration, + "maintain" ); self.metrics.report(|metrics| { @@ -1521,7 +1635,7 @@ where watched.try_into().map(|v| metrics.watched_txs.set(v)), unwatched.try_into().map(|v| metrics.unwatched_txs.set(v)), ); - metrics.maintain_duration.observe(maintain_duration.as_secs_f64()); + metrics.maintain_duration.observe(duration.as_secs_f64()); }); } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs index f9a41673bb8fc..1ca287fa23715 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/import_notification_sink.rs @@ -27,7 +27,6 @@ use futures::{ stream::StreamExt, Future, FutureExt, }; -use log::trace; use parking_lot::RwLock; use sc_utils::mpsc; use std::{ @@ -38,6 +37,7 @@ use std::{ sync::Arc, }; use tokio_stream::StreamMap; +use tracing::trace; /// A type alias for a pinned, boxed stream of items of type `I`. /// This alias is particularly useful for defining the types of the incoming streams from various @@ -109,14 +109,22 @@ where cmd = ctx.command_receiver.next() => { match cmd? { Command::AddView(key,stream) => { - trace!(target: LOG_TARGET,"Command::AddView {key:?}"); + trace!( + target: LOG_TARGET, + ?key, + "Command::AddView" + ); ctx.stream_map.insert(key,stream); }, } }, Some(event) = next_event(&mut ctx.stream_map) => { - trace!(target: LOG_TARGET, "import_notification_sink: select_next_some -> {:?}", event); + trace!( + target: LOG_TARGET, + ?event, + "import_notification_sink: select_next_some" + ); return Some((event.1, ctx)); } } @@ -179,9 +187,17 @@ where async move { if already_notified_items.write().insert(event.clone()) { external_sinks.write().retain_mut(|sink| { - trace!(target: LOG_TARGET, "[{:?}] import_sink_worker sending out imported", event); - if let Err(e) = sink.try_send(event.clone()) { - trace!(target: LOG_TARGET, "import_sink_worker sending message failed: {e}"); + trace!( + target: LOG_TARGET, + ?event, + "import_sink_worker sending out imported" + ); + if let Err(error) = sink.try_send(event.clone()) { + trace!( + target: LOG_TARGET, + %error, + "import_sink_worker sending message failed" + ); false } else { true @@ -199,12 +215,17 @@ where /// The new view's stream is added to the internal aggregated stream context by sending command /// to its `command_receiver`. pub fn add_view(&self, key: K, view: StreamOf) { - let _ = self - .controller - .unbounded_send(Command::AddView(key.clone(), view)) - .map_err(|e| { - trace!(target: LOG_TARGET, "add_view {key:?} send message failed: {e}"); - }); + let _ = + self.controller + .unbounded_send(Command::AddView(key.clone(), view)) + .map_err(|error| { + trace!( + target: LOG_TARGET, + ?key, + %error, + "add_view send message failed" + ); + }); } /// Creates and returns a new external stream of ready transactions hashes notifications. diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs index a00234a998082..a513559a7cd53 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs @@ -26,7 +26,6 @@ use crate::{ LOG_TARGET, }; use futures::StreamExt; -use log::{debug, trace}; use sc_transaction_pool_api::{TransactionStatus, TransactionStatusStream, TxIndex}; use sc_utils::mpsc; use sp_runtime::traits::Block as BlockT; @@ -35,6 +34,7 @@ use std::{ pin::Pin, }; use tokio_stream::StreamMap; +use tracing::{debug, trace}; use super::dropped_watcher::{DroppedReason, DroppedTransaction}; @@ -182,9 +182,14 @@ where hash: BlockHash, ) -> Option, BlockHash>> { trace!( - target: LOG_TARGET, "[{:?}] mvl handle event from {hash:?}: {status:?} views:{:?}", self.tx_hash, - self.status_stream_map.keys().collect::>() + target: LOG_TARGET, + tx_hash = ?self.tx_hash, + ?hash, + ?status, + views = ?self.status_stream_map.keys().collect::>(), + "mvl handle event" ); + match status { TransactionStatus::Future => { self.views_keeping_tx_valid.insert(hash); @@ -238,8 +243,9 @@ where ); trace!( target: LOG_TARGET, - "[{:?}] got invalidate_transaction: views:{:?}", self.tx_hash, - self.status_stream_map.keys().collect::>() + tx_hash = ?self.tx_hash, + views = ?self.status_stream_map.keys().collect::>(), + "got invalidate_transaction" ); if self.views_keeping_tx_valid.is_disjoint(&keys) { self.terminate = true; @@ -261,7 +267,13 @@ where /// the stream map. fn add_stream(&mut self, block_hash: BlockHash, stream: TxStatusStream) { self.status_stream_map.insert(block_hash, stream); - trace!(target: LOG_TARGET, "[{:?}] AddView view: {:?} views:{:?}", self.tx_hash, block_hash, self.status_stream_map.keys().collect::>()); + trace!( + target: LOG_TARGET, + tx_hash = ?self.tx_hash, + ?block_hash, + views = ?self.status_stream_map.keys().collect::>(), + "AddView view" + ); } /// Removes an existing transaction status stream. @@ -271,7 +283,13 @@ where fn remove_view(&mut self, block_hash: BlockHash) { self.status_stream_map.remove(&block_hash); self.views_keeping_tx_valid.remove(&block_hash); - trace!(target: LOG_TARGET, "[{:?}] RemoveView view: {:?} views:{:?}", self.tx_hash, block_hash, self.status_stream_map.keys().collect::>()); + trace!( + target: LOG_TARGET, + tx_hash = ?self.tx_hash, + ?block_hash, + views = ?self.status_stream_map.keys().collect::>(), + "RemoveView view" + ); } } @@ -306,8 +324,11 @@ where return None } - trace!(target: LOG_TARGET, "[{:?}] create_external_watcher_for_tx", tx_hash); - + trace!( + target: LOG_TARGET, + ?tx_hash, + "create_external_watcher_for_tx" + ); let (tx, rx) = mpsc::tracing_unbounded("txpool-multi-view-listener", 32); controllers.insert(tx_hash, tx); @@ -323,14 +344,21 @@ where biased; Some((view_hash, status)) = next_event(&mut ctx.status_stream_map) => { if let Some(new_status) = ctx.handle(status, view_hash) { - log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: {new_status:?}", ctx.tx_hash); - return Some((new_status, ctx)) + trace!( + target: LOG_TARGET, + tx_hash = ?ctx.tx_hash, + ?new_status, + "mvl sending out" + ); + return Some((new_status, ctx)) } }, cmd = ctx.command_receiver.next() => { - log::trace!(target: LOG_TARGET, "[{:?}] select::rx views:{:?}", - ctx.tx_hash, - ctx.status_stream_map.keys().collect::>() + trace!( + target: LOG_TARGET, + tx_hash = ?ctx.tx_hash, + views = ?ctx.status_stream_map.keys().collect::>(), + "select::rx" ); match cmd? { ControllerCommand::AddViewStream(h,stream) => { @@ -341,26 +369,52 @@ where }, ControllerCommand::TransactionInvalidated => { if ctx.handle_invalidate_transaction() { - log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Invalid", ctx.tx_hash); + trace!( + target: LOG_TARGET, + tx_hash = ?ctx.tx_hash, + status = "Invalid", + "mvl sending out" + ); return Some((TransactionStatus::Invalid, ctx)) } }, ControllerCommand::FinalizeTransaction(block, index) => { - log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Finalized", ctx.tx_hash); + trace!( + target: LOG_TARGET, + tx_hash = ?ctx.tx_hash, + status = "Finalized", + "mvl sending out" + ); ctx.terminate = true; return Some((TransactionStatus::Finalized((block, index)), ctx)) }, ControllerCommand::TransactionBroadcasted(peers) => { - log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Broadcasted", ctx.tx_hash); + trace!( + target: LOG_TARGET, + tx_hash = ?ctx.tx_hash, + status = "Broadcasted", + "mvl sending out" + ); return Some((TransactionStatus::Broadcast(peers), ctx)) }, ControllerCommand::TransactionDropped(DroppedReason::LimitsEnforced) => { - log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Dropped", ctx.tx_hash); + trace!( + target: LOG_TARGET, + tx_hash = ?ctx.tx_hash, + status = "Dropped", + "mvl sending out" + ); ctx.terminate = true; return Some((TransactionStatus::Dropped, ctx)) }, ControllerCommand::TransactionDropped(DroppedReason::Usurped(by)) => { - log::trace!(target: LOG_TARGET, "[{:?}] mvl sending out: Usurped({:?})", ctx.tx_hash, by); + trace!( + target: LOG_TARGET, + tx_hash = ?ctx.tx_hash, + status = "Usurped", + ?by, + "mvl sending out" + ); ctx.terminate = true; return Some((TransactionStatus::Usurped(by), ctx)) }, @@ -386,11 +440,16 @@ where let mut controllers = self.controllers.write(); if let Entry::Occupied(mut tx) = controllers.entry(tx_hash) { - if let Err(e) = tx + if let Err(error) = tx .get_mut() .unbounded_send(ControllerCommand::AddViewStream(block_hash, stream)) { - trace!(target: LOG_TARGET, "[{:?}] add_view_watcher_for_tx: send message failed: {:?}", tx_hash, e); + trace!( + target: LOG_TARGET, + ?tx_hash, + %error, + "add_view_watcher_for_tx: send message failed" + ); tx.remove(); } } @@ -404,9 +463,14 @@ where self.controllers.write().retain(|tx_hash, sender| { sender .unbounded_send(ControllerCommand::RemoveViewStream(block_hash)) - .map_err(|e| { - log::trace!(target: LOG_TARGET, "[{:?}] remove_view: send message failed: {:?}", tx_hash, e); - e + .map_err(|error| { + trace!( + target: LOG_TARGET, + ?tx_hash, + %error, + "remove_view: send message failed" + ); + error }) .is_ok() }); @@ -423,11 +487,20 @@ where let mut controllers = self.controllers.write(); invalid_hashes.iter().for_each(|tx_hash| { if let Entry::Occupied(mut tx) = controllers.entry(*tx_hash) { - trace!(target: LOG_TARGET, "[{:?}] invalidate_transaction", tx_hash); - if let Err(e) = + trace!( + target: LOG_TARGET, + ?tx_hash, + "invalidate_transaction" + ); + if let Err(error) = tx.get_mut().unbounded_send(ControllerCommand::TransactionInvalidated) { - trace!(target: LOG_TARGET, "[{:?}] invalidate_transaction: send message failed: {:?}", tx_hash, e); + trace!( + target: LOG_TARGET, + ?tx_hash, + %error, + "invalidate_transaction: send message failed" + ); tx.remove(); } } @@ -445,9 +518,20 @@ where let mut controllers = self.controllers.write(); propagated.into_iter().for_each(|(tx_hash, peers)| { if let Entry::Occupied(mut tx) = controllers.entry(tx_hash) { - trace!(target: LOG_TARGET, "[{:?}] transaction_broadcasted", tx_hash); - if let Err(e) = tx.get_mut().unbounded_send(ControllerCommand::TransactionBroadcasted(peers)) { - trace!(target: LOG_TARGET, "[{:?}] transactions_broadcasted: send message failed: {:?}", tx_hash, e); + trace!( + target: LOG_TARGET, + ?tx_hash, + "transaction_broadcasted" + ); + if let Err(error) = + tx.get_mut().unbounded_send(ControllerCommand::TransactionBroadcasted(peers)) + { + trace!( + target: LOG_TARGET, + ?tx_hash, + %error, + "transactions_broadcasted: send message failed" + ); tx.remove(); } } @@ -460,12 +544,25 @@ where /// transaction prompting and external `Broadcasted` event. pub(crate) fn transaction_dropped(&self, dropped: DroppedTransaction>) { let mut controllers = self.controllers.write(); - debug!(target: LOG_TARGET, "mvl::transaction_dropped: {:?}", dropped); + debug!( + target: LOG_TARGET, + ?dropped, + "mvl::transaction_dropped" + ); if let Some(tx) = controllers.remove(&dropped.tx_hash) { let DroppedTransaction { tx_hash, reason } = dropped; - debug!(target: LOG_TARGET, "[{:?}] transaction_dropped", tx_hash); - if let Err(e) = tx.unbounded_send(ControllerCommand::TransactionDropped(reason)) { - trace!(target: LOG_TARGET, "[{:?}] transaction_dropped: send message failed: {:?}", tx_hash, e); + debug!( + target: LOG_TARGET, + ?tx_hash, + "transaction_dropped" + ); + if let Err(error) = tx.unbounded_send(ControllerCommand::TransactionDropped(reason)) { + trace!( + target: LOG_TARGET, + ?tx_hash, + %error, + "transaction_dropped: send message failed" + ); }; } } @@ -481,9 +578,20 @@ where ) { let mut controllers = self.controllers.write(); if let Some(tx) = controllers.remove(&tx_hash) { - trace!(target: LOG_TARGET, "[{:?}] finalize_transaction", tx_hash); - if let Err(e) = tx.unbounded_send(ControllerCommand::FinalizeTransaction(block, idx)) { - trace!(target: LOG_TARGET, "[{:?}] finalize_transaction: send message failed: {:?}", tx_hash, e); + trace!( + target: LOG_TARGET, + ?tx_hash, + "finalize_transaction" + ); + if let Err(error) = + tx.unbounded_send(ControllerCommand::FinalizeTransaction(block, idx)) + { + trace!( + target: LOG_TARGET, + ?tx_hash, + %error, + "finalize_transaction: send message failed" + ); } }; } @@ -525,7 +633,7 @@ mod tests { let out = handle.await.unwrap(); assert_eq!(out, events); - log::debug!("out: {:#?}", out); + debug!("out: {:#?}", out); } #[tokio::test] @@ -560,7 +668,7 @@ mod tests { let out = handle.await.unwrap(); - log::debug!("out: {:#?}", out); + debug!("out: {:#?}", out); assert!(out.iter().all(|v| vec![ TransactionStatus::Future, TransactionStatus::Ready, @@ -600,7 +708,7 @@ mod tests { listener.invalidate_transactions(&[tx_hash]); let out = handle.await.unwrap(); - log::debug!("out: {:#?}", out); + debug!("out: {:#?}", out); assert!(out.iter().all(|v| vec![ TransactionStatus::Future, TransactionStatus::Ready, @@ -654,8 +762,8 @@ mod tests { let out_tx0 = handle0.await.unwrap(); let out_tx1 = handle1.await.unwrap(); - log::debug!("out_tx0: {:#?}", out_tx0); - log::debug!("out_tx1: {:#?}", out_tx1); + debug!("out_tx0: {:#?}", out_tx0); + debug!("out_tx1: {:#?}", out_tx1); assert!(out_tx0.iter().all(|v| vec![ TransactionStatus::Future, TransactionStatus::Ready, @@ -707,7 +815,7 @@ mod tests { listener.invalidate_transactions(&[tx_hash]); let out = handle.await.unwrap(); - log::debug!("out: {:#?}", out); + debug!("out: {:#?}", out); // invalid shall not be sent assert!(out.iter().all(|v| vec![ @@ -740,7 +848,7 @@ mod tests { listener.add_view_watcher_for_tx(tx_hash, block_hash0, view_stream0.boxed()); let out = handle.await.unwrap(); - log::debug!("out: {:#?}", out); + debug!("out: {:#?}", out); assert!(out.iter().all(|v| vec![TransactionStatus::Invalid].contains(v))); assert_eq!(out.len(), 1); diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs index e1c65a08a70ba..0025d3e9f2d42 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs @@ -30,6 +30,7 @@ use sp_runtime::traits::Block as BlockT; use super::tx_mem_pool::TxMemPool; use futures::prelude::*; +use tracing::{trace, warn}; use super::view::{FinishRevalidationWorkerChannels, View}; @@ -131,18 +132,22 @@ where view: Arc>, finish_revalidation_worker_channels: FinishRevalidationWorkerChannels, ) { - log::trace!( + trace!( target: LOG_TARGET, - "revalidation_queue::revalidate_view: Sending view to revalidation queue at {}", - view.at.hash + view_at_hash = ?view.at.hash, + "revalidation_queue::revalidate_view: Sending view to revalidation queue" ); if let Some(ref to_worker) = self.background { - if let Err(e) = to_worker.unbounded_send(WorkerPayload::RevalidateView( + if let Err(error) = to_worker.unbounded_send(WorkerPayload::RevalidateView( view, finish_revalidation_worker_channels, )) { - log::warn!(target: LOG_TARGET, "revalidation_queue::revalidate_view: Failed to update background worker: {:?}", e); + warn!( + target: LOG_TARGET, + ?error, + "revalidation_queue::revalidate_view: Failed to update background worker" + ); } } else { view.revalidate(finish_revalidation_worker_channels).await @@ -161,17 +166,21 @@ where mempool: Arc>, finalized_hash: HashAndNumber, ) { - log::trace!( + trace!( target: LOG_TARGET, - "Sent mempool to revalidation queue at hash: {:?}", - finalized_hash + ?finalized_hash, + "Sent mempool to revalidation queue" ); if let Some(ref to_worker) = self.background { - if let Err(e) = + if let Err(error) = to_worker.unbounded_send(WorkerPayload::RevalidateMempool(mempool, finalized_hash)) { - log::warn!(target: LOG_TARGET, "Failed to update background worker: {:?}", e); + warn!( + target: LOG_TARGET, + ?error, + "Failed to update background worker" + ); } } else { mempool.revalidate(finalized_hash).await diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index c8a4d0c72dd36..440e77313d3e1 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -26,33 +26,38 @@ //! it), while on other forks tx can be valid. Depending on which view is chosen to be cloned, //! such transaction could not be present in the newly created view. -use super::{ - metrics::MetricsLink as PrometheusMetrics, multi_view_listener::MultiViewListener, - view_store::ViewStoreSubmitOutcome, -}; -use crate::{ - common::log_xt::log_xt_trace, - graph, - graph::{base_pool::TimedTransactionSource, tracked_map::Size, ExtrinsicFor, ExtrinsicHash}, - LOG_TARGET, +use std::{ + cmp::Ordering, + collections::HashMap, + sync::{ + atomic::{self, AtomicU64}, + Arc, + }, + time::Instant, }; + use futures::FutureExt; use itertools::Itertools; use parking_lot::RwLock; +use tracing::{debug, trace}; + use sc_transaction_pool_api::{TransactionPriority, TransactionSource}; use sp_blockchain::HashAndNumber; use sp_runtime::{ traits::Block as BlockT, transaction_validity::{InvalidTransaction, TransactionValidityError}, }; -use std::{ - cmp::Ordering, - collections::HashMap, - sync::{ - atomic::{self, AtomicU64}, - Arc, - }, - time::Instant, + +use crate::{ + common::tracing_log_xt::log_xt_trace, + graph, + graph::{base_pool::TimedTransactionSource, tracked_map::Size, ExtrinsicFor, ExtrinsicHash}, + LOG_TARGET, +}; + +use super::{ + metrics::MetricsLink as PrometheusMetrics, multi_view_listener::MultiViewListener, + view_store::ViewStoreSubmitOutcome, }; /// The minimum interval between single transaction revalidations. Given in blocks. @@ -324,7 +329,7 @@ where /// exceed the maximum allowed transaction count. fn try_insert( &self, - hash: ExtrinsicHash, + tx_hash: ExtrinsicHash, tx: TxInMemPool, ) -> Result>, sc_transaction_pool_api::error::Error> { let mut transactions = self.transactions.write(); @@ -333,19 +338,23 @@ where let result = match ( self.is_limit_exceeded(transactions.len() + 1, bytes + tx.bytes), - transactions.contains_key(&hash), + transactions.contains_key(&tx_hash), ) { (false, false) => { let source = tx.source(); - transactions.insert(hash, Arc::from(tx)); - Ok(InsertionInfo::new(hash, source)) + transactions.insert(tx_hash, Arc::from(tx)); + Ok(InsertionInfo::new(tx_hash, source)) }, (_, true) => - Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash))), + Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(tx_hash))), (true, _) => Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped), }; - log::trace!(target: LOG_TARGET, "[{:?}] mempool::try_insert: {:?}", hash, result.as_ref().map(|r| r.hash)); - + trace!( + target: LOG_TARGET, + ?tx_hash, + result_hash = ?result.as_ref().map(|r| r.hash), + "mempool::try_insert" + ); result } @@ -486,17 +495,21 @@ where /// Removes a transaction with given hash from the memory pool. pub(super) fn remove_transaction( &self, - hash: &ExtrinsicHash, + tx_hash: &ExtrinsicHash, ) -> Option>> { - log::debug!(target: LOG_TARGET, "[{hash:?}] mempool::remove_transaction"); - self.transactions.write().remove(hash) + debug!(target: LOG_TARGET, ?tx_hash, "mempool::remove_transaction"); + self.transactions.write().remove(tx_hash) } /// Revalidates a batch of transactions against the provided finalized block. /// /// Returns a vector of invalid transaction hashes. async fn revalidate_inner(&self, finalized_block: HashAndNumber) -> Vec { - log::trace!(target: LOG_TARGET, "mempool::revalidate at:{finalized_block:?}"); + trace!( + target: LOG_TARGET, + ?finalized_block, + "mempool::revalidate" + ); let start = Instant::now(); let (count, input) = { @@ -533,26 +546,31 @@ where let invalid_hashes = validation_results .into_iter() - .filter_map(|(xt_hash, validation_result)| match validation_result { + .filter_map(|(tx_hash, validation_result)| match validation_result { Ok(Ok(_)) | Ok(Err(TransactionValidityError::Invalid(InvalidTransaction::Future))) => None, Err(_) | Ok(Err(TransactionValidityError::Unknown(_))) | Ok(Err(TransactionValidityError::Invalid(_))) => { - log::trace!( + trace!( target: LOG_TARGET, - "[{:?}]: Purging: invalid: {:?}", - xt_hash, - validation_result, + ?tx_hash, + ?validation_result, + "Purging: invalid" ); - Some(xt_hash) + Some(tx_hash) }, }) .collect::>(); - log::debug!( + debug!( target: LOG_TARGET, - "mempool::revalidate: at {finalized_block:?} count:{input_len}/{count} invalid_hashes:{} took {duration:?}", invalid_hashes.len(), + ?finalized_block, + input_len, + count, + invalid_hashes = invalid_hashes.len(), + ?duration, + "mempool::revalidate" ); invalid_hashes @@ -563,8 +581,12 @@ where &self, finalized_xts: &Vec>, ) { - log::debug!(target: LOG_TARGET, "purge_finalized_transactions count:{:?}", finalized_xts.len()); - log_xt_trace!(target: LOG_TARGET, finalized_xts, "[{:?}] purged finalized transactions"); + debug!( + target: LOG_TARGET, + count = finalized_xts.len(), + "purge_finalized_transactions" + ); + log_xt_trace!(target: LOG_TARGET, finalized_xts, "purged finalized transactions"); let mut transactions = self.transactions.write(); finalized_xts.iter().for_each(|t| { transactions.remove(t); @@ -574,7 +596,11 @@ where /// Revalidates transactions in the memory pool against a given finalized block and removes /// invalid ones. pub(super) async fn revalidate(&self, finalized_block: HashAndNumber) { - log::trace!(target: LOG_TARGET, "purge_transactions at:{:?}", finalized_block); + trace!( + target: LOG_TARGET, + ?finalized_block, + "purge_transactions" + ); let invalid_hashes = self.revalidate_inner(finalized_block.clone()).await; self.metrics.report(|metrics| { @@ -602,10 +628,13 @@ where #[cfg(test)] mod tx_mem_pool_tests { - use super::*; - use crate::{common::tests::TestApi, graph::ChainApi}; use substrate_test_runtime::{AccountId, Extrinsic, ExtrinsicBuilder, Transfer, H256}; use substrate_test_runtime_client::Sr25519Keyring::*; + + use crate::{common::tests::TestApi, graph::ChainApi}; + + use super::*; + fn uxt(nonce: u64) -> Extrinsic { crate::common::tests::uxt(Transfer { from: Alice.into(), diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs index a35d68120a3ab..6324997da67b3 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs @@ -25,7 +25,7 @@ use super::metrics::MetricsLink as PrometheusMetrics; use crate::{ - common::log_xt::log_xt_trace, + common::tracing_log_xt::log_xt_trace, graph::{ self, base_pool::TimedTransactionSource, watcher::Watcher, ExtrinsicFor, ExtrinsicHash, IsValidator, ValidatedPoolSubmitOutcome, ValidatedTransaction, ValidatedTransactionFor, @@ -40,6 +40,7 @@ use sp_runtime::{ SaturatedConversion, }; use std::{collections::HashMap, sync::Arc, time::Instant}; +use tracing::{debug, trace}; pub(super) struct RevalidationResult { revalidated: HashMap, ValidatedTransactionFor>, @@ -159,9 +160,9 @@ where &self, xts: impl IntoIterator)>, ) -> Vec, ChainApi::Error>> { - if log::log_enabled!(target: LOG_TARGET, log::Level::Trace) { + if tracing::enabled!(target: LOG_TARGET, tracing::Level::TRACE) { let xts = xts.into_iter().collect::>(); - log_xt_trace!(target: LOG_TARGET, xts.iter().map(|(_,xt)| self.pool.validated_pool().api().hash_and_length(xt).0), "[{:?}] view::submit_many at:{}", self.at.hash); + log_xt_trace!(target: LOG_TARGET, xts.iter().map(|(_,xt)| self.pool.validated_pool().api().hash_and_length(xt).0), "view::submit_many at:{}", self.at.hash); self.pool.submit_at(&self.at, xts).await } else { self.pool.submit_at(&self.at, xts).await @@ -174,7 +175,12 @@ where source: TimedTransactionSource, xt: ExtrinsicFor, ) -> Result, ChainApi::Error> { - log::trace!(target: LOG_TARGET, "[{:?}] view::submit_and_watch at:{}", self.pool.validated_pool().api().hash_and_length(&xt).0, self.at.hash); + trace!( + target: LOG_TARGET, + tx_hash = ?self.pool.validated_pool().api().hash_and_length(&xt).0, + view_at_hash = ?self.at.hash, + "view::submit_and_watch" + ); self.pool.submit_and_watch(&self.at, source, xt).await } @@ -183,9 +189,13 @@ where &self, xt: ExtrinsicFor, ) -> Result, ChainApi::Error> { - let (hash, length) = self.pool.validated_pool().api().hash_and_length(&xt); - log::trace!(target: LOG_TARGET, "[{:?}] view::submit_local at:{}", hash, self.at.hash); - + let (tx_hash, length) = self.pool.validated_pool().api().hash_and_length(&xt); + trace!( + target: LOG_TARGET, + ?tx_hash, + view_at_hash = ?self.at.hash, + "view::submit_local" + ); let validity = self .pool .validated_pool() @@ -212,7 +222,7 @@ where let validated = ValidatedTransaction::valid_at( block_number.saturated_into::(), - hash, + tx_hash, TimedTransactionSource::new_local(true), Arc::from(xt), length, @@ -258,7 +268,11 @@ where revalidation_result_tx, } = finish_revalidation_worker_channels; - log::trace!(target:LOG_TARGET, "view::revalidate: at {} starting", self.at.hash); + trace!( + target: LOG_TARGET, + at_hash = ?self.at.hash, + "view::revalidate: at starting" + ); let start = Instant::now(); let validated_pool = self.pool.validated_pool(); let api = validated_pool.api(); @@ -279,7 +293,11 @@ where let mut should_break = false; tokio::select! { _ = finish_revalidation_request_rx.recv() => { - log::trace!(target: LOG_TARGET, "view::revalidate: finish revalidation request received at {}.", self.at.hash); + trace!( + target: LOG_TARGET, + at_hash = ?self.at.hash, + "view::revalidate: finish revalidation request received" + ); break } _ = async { @@ -302,16 +320,15 @@ where self.metrics.report(|metrics| { metrics.view_revalidation_duration.observe(revalidation_duration.as_secs_f64()); }); - log::debug!( - target:LOG_TARGET, - "view::revalidate: at {:?} count: {}/{} took {:?}", - self.at.hash, - validation_results.len(), + debug!( + target: LOG_TARGET, + at_hash = ?self.at.hash, + count = validation_results.len(), batch_len, - revalidation_duration + duration = ?revalidation_duration, + "view::revalidate" ); - log_xt_trace!(data:tuple, target:LOG_TARGET, validation_results.iter().map(|x| (x.1, &x.0)), "[{:?}] view::revalidateresult: {:?}"); - + log_xt_trace!(data:tuple, target:LOG_TARGET, validation_results.iter().map(|x| (x.1, &x.0)), "view::revalidate result: {:?}"); for (validation_result, tx_hash, tx) in validation_results { match validation_result { Ok(Err(TransactionValidityError::Invalid(_))) => { @@ -330,33 +347,42 @@ where ), ); }, - Ok(Err(TransactionValidityError::Unknown(e))) => { - log::trace!( + Ok(Err(TransactionValidityError::Unknown(error))) => { + trace!( target: LOG_TARGET, - "[{:?}]: Removing. Cannot determine transaction validity: {:?}", - tx_hash, - e + ?tx_hash, + ?error, + "Removing. Cannot determine transaction validity" ); invalid_hashes.push(tx_hash); }, - Err(validation_err) => { - log::trace!( + Err(error) => { + trace!( target: LOG_TARGET, - "[{:?}]: Removing due to error during revalidation: {}", - tx_hash, - validation_err + ?tx_hash, + %error, + "Removing due to error during revalidation" ); invalid_hashes.push(tx_hash); }, } } - log::trace!(target:LOG_TARGET, "view::revalidate: sending revalidation result at {}", self.at.hash); - if let Err(e) = revalidation_result_tx + trace!( + target: LOG_TARGET, + at_hash = ?self.at.hash, + "view::revalidate: sending revalidation result" + ); + if let Err(error) = revalidation_result_tx .send(RevalidationResult { invalid_hashes, revalidated }) .await { - log::trace!(target:LOG_TARGET, "view::revalidate: sending revalidation_result at {} failed {:?}", self.at.hash, e); + trace!( + target: LOG_TARGET, + at_hash = ?self.at.hash, + ?error, + "view::revalidate: sending revalidation_result failed" + ); } } @@ -374,7 +400,11 @@ where super::revalidation_worker::RevalidationQueue, >, ) { - log::trace!(target:LOG_TARGET,"view::start_background_revalidation: at {}", view.at.hash); + trace!( + target: LOG_TARGET, + at_hash = ?view.at.hash, + "view::start_background_revalidation" + ); let (finish_revalidation_request_tx, finish_revalidation_request_rx) = tokio::sync::mpsc::channel(1); let (revalidation_result_tx, revalidation_result_rx) = tokio::sync::mpsc::channel(1); @@ -404,10 +434,14 @@ where /// /// Refer to [*View revalidation*](../index.html#view-revalidation) for more details. pub(super) async fn finish_revalidation(&self) { - log::trace!(target:LOG_TARGET,"view::finish_revalidation: at {}", self.at.hash); + trace!( + target: LOG_TARGET, + at_hash = ?self.at.hash, + "view::finish_revalidation" + ); let Some(revalidation_worker_channels) = self.revalidation_worker_channels.lock().take() else { - log::trace!(target:LOG_TARGET, "view::finish_revalidation: no finish_revalidation_request_tx"); + trace!(target:LOG_TARGET, "view::finish_revalidation: no finish_revalidation_request_tx"); return }; @@ -417,8 +451,13 @@ where } = revalidation_worker_channels; if let Some(finish_revalidation_request_tx) = finish_revalidation_request_tx { - if let Err(e) = finish_revalidation_request_tx.send(()).await { - log::trace!(target:LOG_TARGET, "view::finish_revalidation: sending cancellation request at {} failed {:?}", self.at.hash, e); + if let Err(error) = finish_revalidation_request_tx.send(()).await { + trace!( + target: LOG_TARGET, + at_hash = ?self.at.hash, + %error, + "view::finish_revalidation: sending cancellation request failed" + ); } } @@ -444,13 +483,13 @@ where ); }); - log::debug!( - target:LOG_TARGET, - "view::finish_revalidation: applying revalidation result invalid: {} revalidated: {} at {:?} took {:?}", - revalidation_result.invalid_hashes.len(), - revalidated_len, - self.at.hash, - start.elapsed() + debug!( + target: LOG_TARGET, + invalid = revalidation_result.invalid_hashes.len(), + revalidated = revalidated_len, + at_hash = ?self.at.hash, + duration = ?start.elapsed(), + "view::finish_revalidation: applying revalidation result" ); } } diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index 43ed5bbf8869f..c4209a7d7f411 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -42,6 +42,7 @@ use std::{ sync::Arc, time::Instant, }; +use tracing::{trace, warn}; /// Helper struct to maintain the context for pending transaction submission, executed for /// newly inserted views. @@ -258,9 +259,14 @@ where .find_or_first(Result::is_ok); match result { - Some(Err(err)) => { - log::trace!(target: LOG_TARGET, "[{:?}] submit_local: err: {}", tx_hash, err); - Err(err) + Some(Err(error)) => { + trace!( + target: LOG_TARGET, + ?tx_hash, + %error, + "submit_local: err" + ); + Err(error) }, None => Ok(ViewStoreSubmitOutcome::new(tx_hash, None)), Some(Ok(r)) => Ok(r.into()), @@ -314,9 +320,14 @@ where .find_or_first(Result::is_ok); match result { - Some(Err(err)) => { - log::trace!(target: LOG_TARGET, "[{:?}] submit_and_watch: err: {}", tx_hash, err); - return Err(err); + Some(Err(error)) => { + trace!( + target: LOG_TARGET, + ?tx_hash, + %error, + "submit_and_watch: err" + ); + return Err(error); }, Some(Ok(result)) => Ok(ViewStoreSubmitOutcome::from(result).with_watcher(external_watcher)), @@ -422,8 +433,12 @@ where finalized_hash: Block::Hash, tree_route: &[Block::Hash], ) -> Vec> { - log::trace!(target: LOG_TARGET, "finalize_route finalized_hash:{finalized_hash:?} tree_route: {tree_route:?}"); - + trace!( + target: LOG_TARGET, + ?finalized_hash, + ?tree_route, + "finalize_route" + ); let mut finalized_transactions = Vec::new(); for block in tree_route.iter().chain(std::iter::once(&finalized_hash)) { @@ -431,8 +446,12 @@ where .api .block_body(*block) .await - .unwrap_or_else(|e| { - log::warn!(target: LOG_TARGET, "Finalize route: error request: {}", e); + .unwrap_or_else(|error| { + warn!( + target: LOG_TARGET, + %error, + "Finalize route: error request" + ); None }) .unwrap_or_default() @@ -500,7 +519,11 @@ where active_views.insert(view.at.hash, view.clone()); most_recent_view_lock.replace(view.at.hash); }; - log::trace!(target:LOG_TARGET,"insert_new_view: inactive_views: {:?}", self.inactive_views.read().keys()); + trace!( + target: LOG_TARGET, + inactive_views = ?self.inactive_views.read().keys(), + "insert_new_view" + ); } /// Returns an optional reference to the view at given hash. @@ -557,8 +580,11 @@ where .for_each(drop); } - log::trace!(target:LOG_TARGET,"handle_pre_finalized: removed_views: {:?}", removed_views); - + trace!( + target: LOG_TARGET, + ?removed_views, + "handle_pre_finalized" + ); removed_views.iter().for_each(|view| { self.dropped_stream_controller.remove_view(*view); }); @@ -613,10 +639,18 @@ where retain }); - log::trace!(target:LOG_TARGET,"handle_finalized: inactive_views: {:?}", inactive_views.keys()); + trace!( + target: LOG_TARGET, + inactive_views = ?inactive_views.keys(), + "handle_finalized" + ); } - log::trace!(target:LOG_TARGET,"handle_finalized: dropped_views: {:?}", dropped_views); + trace!( + target: LOG_TARGET, + ?dropped_views, + "handle_finalized" + ); self.listener.remove_stale_controllers(); self.dropped_stream_controller.remove_finalized_txs(finalized_xts.clone()); @@ -647,7 +681,11 @@ where .collect::>() }; futures::future::join_all(finish_revalidation_futures).await; - log::trace!(target:LOG_TARGET,"finish_background_revalidations took {:?}", start.elapsed()); + trace!( + target: LOG_TARGET, + duration = ?start.elapsed(), + "finish_background_revalidations" + ); } /// Replaces an existing transaction in the view_store with a new one. @@ -679,10 +717,16 @@ where return }; - let xt_hash = self.api.hash_and_length(&xt).0; - log::trace!(target:LOG_TARGET,"[{replaced:?}] replace_transaction wtih {xt_hash:?}, w:{watched}"); + let tx_hash = self.api.hash_and_length(&xt).0; + trace!( + target: LOG_TARGET, + ?replaced, + ?tx_hash, + watched, + "replace_transaction" + ); - self.replace_transaction_in_views(source, xt, xt_hash, replaced, watched).await; + self.replace_transaction_in_views(source, xt, tx_hash, replaced, watched).await; if let Some(replacement) = self.pending_txs_tasks.write().get_mut(&replaced) { replacement.mark_processed(); @@ -723,32 +767,36 @@ where view: Arc>, source: TimedTransactionSource, xt: ExtrinsicFor, - xt_hash: ExtrinsicHash, + tx_hash: ExtrinsicHash, watched: bool, ) { if watched { match view.submit_and_watch(source, xt).await { Ok(mut result) => { self.listener.add_view_watcher_for_tx( - xt_hash, + tx_hash, view.at.hash, result.expect_watcher().into_stream().boxed(), ); }, - Err(e) => { - log::trace!( - target:LOG_TARGET, - "[{:?}] replace_transaction: submit_and_watch to {} failed {}", - xt_hash, view.at.hash, e + Err(error) => { + trace!( + target: LOG_TARGET, + ?tx_hash, + at_hash = ?view.at.hash, + %error, + "replace_transaction: submit_and_watch failed" ); }, } } else { - if let Some(Err(e)) = view.submit_many(std::iter::once((source, xt))).await.pop() { - log::trace!( - target:LOG_TARGET, - "[{:?}] replace_transaction: submit to {} failed {}", - xt_hash, view.at.hash, e + if let Some(Err(error)) = view.submit_many(std::iter::once((source, xt))).await.pop() { + trace!( + target: LOG_TARGET, + ?tx_hash, + at_hash = ?view.at.hash, + %error, + "replace_transaction: submit failed" ); } } @@ -762,15 +810,15 @@ where &self, source: TimedTransactionSource, xt: ExtrinsicFor, - xt_hash: ExtrinsicHash, + tx_hash: ExtrinsicHash, replaced: ExtrinsicHash, watched: bool, ) { - if watched && !self.listener.contains_tx(&xt_hash) { - log::trace!( - target:LOG_TARGET, - "error: replace_transaction_in_views: no listener for watched transaction {:?}", - xt_hash, + if watched && !self.listener.contains_tx(&tx_hash) { + trace!( + target: LOG_TARGET, + ?tx_hash, + "error: replace_transaction_in_views: no listener for watched transaction" ); return; } @@ -787,7 +835,7 @@ where view.clone(), source.clone(), xt.clone(), - xt_hash, + tx_hash, watched, ) }) diff --git a/substrate/client/transaction-pool/src/graph/future.rs b/substrate/client/transaction-pool/src/graph/future.rs index 2c1e64c04b7f2..848893b026c5c 100644 --- a/substrate/client/transaction-pool/src/graph/future.rs +++ b/substrate/client/transaction-pool/src/graph/future.rs @@ -27,7 +27,7 @@ use sp_runtime::transaction_validity::TransactionTag as Tag; use std::time::Instant; use super::base_pool::Transaction; -use crate::{common::log_xt::log_xt_trace, LOG_TARGET}; +use crate::{common::tracing_log_xt::log_xt_trace, LOG_TARGET}; /// Transaction with partially satisfied dependencies. pub struct WaitingTransaction { @@ -184,7 +184,7 @@ impl }) .collect::>(); - log_xt_trace!(target: LOG_TARGET, &pruned, "[{:?}] FutureTransactions: removed while pruning tags."); + log_xt_trace!(target: LOG_TARGET, &pruned, "FutureTransactions: removed while pruning tags."); self.remove(&pruned) } diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs index 403712662adae..52b12e3fabae6 100644 --- a/substrate/client/transaction-pool/src/graph/pool.rs +++ b/substrate/client/transaction-pool/src/graph/pool.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{common::log_xt::log_xt_trace, LOG_TARGET}; +use crate::{common::tracing_log_xt::log_xt_trace, LOG_TARGET}; use futures::{channel::mpsc::Receiver, Future}; use indexmap::IndexMap; use sc_transaction_pool_api::error; @@ -395,7 +395,7 @@ impl Pool { let pruned_hashes = reverified_transactions.keys().map(Clone::clone).collect::>(); log::debug!(target: LOG_TARGET, "Pruning at {:?}. Resubmitting transactions: {}, reverification took: {:?}", &at, reverified_transactions.len(), now.elapsed()); - log_xt_trace!(data: tuple, target: LOG_TARGET, &reverified_transactions, "[{:?}] Resubmitting transaction: {:?}"); + log_xt_trace!(data: tuple, target: LOG_TARGET, &reverified_transactions, "Resubmitting transaction: {:?}"); // And finally - submit reverified transactions back to the pool self.validated_pool.resubmit_pruned( diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index bc2b07896dba0..bbfcb9b40acab 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -21,7 +21,7 @@ use std::{ sync::Arc, }; -use crate::{common::log_xt::log_xt_trace, LOG_TARGET}; +use crate::{common::tracing_log_xt::log_xt_trace, LOG_TARGET}; use futures::channel::mpsc::{channel, Sender}; use parking_lot::{Mutex, RwLock}; use sc_transaction_pool_api::{error, PoolStatus, ReadyTransactions, TransactionPriority}; @@ -706,7 +706,7 @@ impl ValidatedPool { let invalid = self.pool.write().remove_subtree(hashes); log::trace!(target: LOG_TARGET, "Removed invalid transactions: {:?}", invalid.len()); - log_xt_trace!(target: LOG_TARGET, invalid.iter().map(|t| t.hash), "{:?} Removed invalid transaction"); + log_xt_trace!(target: LOG_TARGET, invalid.iter().map(|t| t.hash), "Removed invalid transaction"); let mut listener = self.listener.write(); for tx in &invalid { diff --git a/substrate/frame/balances/src/benchmarking.rs b/substrate/frame/balances/src/benchmarking.rs index c825300218d46..a761f8e2af828 100644 --- a/substrate/frame/balances/src/benchmarking.rs +++ b/substrate/frame/balances/src/benchmarking.rs @@ -65,7 +65,12 @@ mod benchmarks { #[extrinsic_call] _(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount); - assert_eq!(Balances::::free_balance(&caller), Zero::zero()); + if cfg!(feature = "insecure_zero_ed") { + assert_eq!(Balances::::free_balance(&caller), balance - transfer_amount); + } else { + assert_eq!(Balances::::free_balance(&caller), Zero::zero()); + } + assert_eq!(Balances::::free_balance(&recipient), transfer_amount); } @@ -173,7 +178,12 @@ mod benchmarks { #[extrinsic_call] _(RawOrigin::Root, source_lookup, recipient_lookup, transfer_amount); - assert_eq!(Balances::::free_balance(&source), Zero::zero()); + if cfg!(feature = "insecure_zero_ed") { + assert_eq!(Balances::::free_balance(&source), balance - transfer_amount); + } else { + assert_eq!(Balances::::free_balance(&source), Zero::zero()); + } + assert_eq!(Balances::::free_balance(&recipient), transfer_amount); } @@ -208,7 +218,12 @@ mod benchmarks { #[extrinsic_call] transfer_allow_death(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount); - assert_eq!(Balances::::free_balance(&caller), Zero::zero()); + if cfg!(feature = "insecure_zero_ed") { + assert_eq!(Balances::::free_balance(&caller), balance - transfer_amount); + } else { + assert_eq!(Balances::::free_balance(&caller), Zero::zero()); + } + assert_eq!(Balances::::free_balance(&recipient), transfer_amount); } @@ -308,7 +323,7 @@ mod benchmarks { /// Benchmark `burn` extrinsic with the worst possible condition - burn kills the account. #[benchmark] fn burn_allow_death() { - let existential_deposit = T::ExistentialDeposit::get(); + let existential_deposit: T::Balance = minimum_balance::(); let caller = whitelisted_caller(); // Give some multiple of the existential deposit @@ -321,13 +336,17 @@ mod benchmarks { #[extrinsic_call] burn(RawOrigin::Signed(caller.clone()), burn_amount, false); - assert_eq!(Balances::::free_balance(&caller), Zero::zero()); + if cfg!(feature = "insecure_zero_ed") { + assert_eq!(Balances::::free_balance(&caller), balance - burn_amount); + } else { + assert_eq!(Balances::::free_balance(&caller), Zero::zero()); + } } // Benchmark `burn` extrinsic with the case where account is kept alive. #[benchmark] fn burn_keep_alive() { - let existential_deposit = T::ExistentialDeposit::get(); + let existential_deposit: T::Balance = minimum_balance::(); let caller = whitelisted_caller(); // Give some multiple of the existential deposit diff --git a/substrate/frame/balances/src/impl_currency.rs b/substrate/frame/balances/src/impl_currency.rs index bc7e77c191db8..f453b23420c40 100644 --- a/substrate/frame/balances/src/impl_currency.rs +++ b/substrate/frame/balances/src/impl_currency.rs @@ -674,8 +674,10 @@ where Reserves::::try_mutate(who, |reserves| -> DispatchResult { match reserves.binary_search_by_key(id, |data| data.id) { Ok(index) => { - // this add can't overflow but just to be defensive. - reserves[index].amount = reserves[index].amount.defensive_saturating_add(value); + reserves[index].amount = reserves[index] + .amount + .checked_add(&value) + .ok_or(ArithmeticError::Overflow)?; }, Err(index) => { reserves diff --git a/substrate/frame/balances/src/tests/currency_tests.rs b/substrate/frame/balances/src/tests/currency_tests.rs index a6377c3ad72e8..0e5d7ccb46dee 100644 --- a/substrate/frame/balances/src/tests/currency_tests.rs +++ b/substrate/frame/balances/src/tests/currency_tests.rs @@ -24,7 +24,7 @@ use frame_support::{ BalanceStatus::{Free, Reserved}, Currency, ExistenceRequirement::{self, AllowDeath, KeepAlive}, - Hooks, InspectLockableCurrency, LockIdentifier, LockableCurrency, NamedReservableCurrency, + InspectLockableCurrency, LockIdentifier, LockableCurrency, NamedReservableCurrency, ReservableCurrency, WithdrawReasons, }, StorageNoopGuard, @@ -1136,7 +1136,9 @@ fn operations_on_dead_account_should_not_change_state() { #[test] #[should_panic = "The existential deposit must be greater than zero!"] +#[cfg(not(feature = "insecure_zero_ed"))] fn zero_ed_is_prohibited() { + use frame_support::traits::Hooks; // These functions all use `mutate_account` which may introduce a storage change when // the account never existed to begin with, and shouldn't exist in the end. ExtBuilder::default().existential_deposit(0).build_and_execute_with(|| { diff --git a/substrate/frame/examples/Cargo.toml b/substrate/frame/examples/Cargo.toml index 9eac53f0d98b0..40d6959378b87 100644 --- a/substrate/frame/examples/Cargo.toml +++ b/substrate/frame/examples/Cargo.toml @@ -26,6 +26,7 @@ pallet-example-offchain-worker = { workspace = true } pallet-example-single-block-migrations = { workspace = true } pallet-example-split = { workspace = true } pallet-example-tasks = { workspace = true } +pallet-example-view-functions = { workspace = true } [features] default = ["std"] @@ -40,6 +41,7 @@ std = [ "pallet-example-single-block-migrations/std", "pallet-example-split/std", "pallet-example-tasks/std", + "pallet-example-view-functions/std", ] try-runtime = [ "pallet-default-config-example/try-runtime", @@ -51,4 +53,5 @@ try-runtime = [ "pallet-example-single-block-migrations/try-runtime", "pallet-example-split/try-runtime", "pallet-example-tasks/try-runtime", + "pallet-example-view-functions/try-runtime", ] diff --git a/substrate/frame/examples/src/lib.rs b/substrate/frame/examples/src/lib.rs index d0d30830f2f04..200e92112a3f3 100644 --- a/substrate/frame/examples/src/lib.rs +++ b/substrate/frame/examples/src/lib.rs @@ -48,6 +48,9 @@ //! //! - [`pallet_example_tasks`]: This pallet demonstrates the use of `Tasks` to execute service work. //! +//! - [`pallet_example_view_functions`]: This pallet demonstrates the use of view functions to query +//! pallet state. +//! //! - [`pallet_example_authorization_tx_extension`]: An example `TransactionExtension` that //! authorizes a custom origin through signature validation, along with two support pallets to //! showcase the usage. diff --git a/substrate/frame/examples/view-functions/Cargo.toml b/substrate/frame/examples/view-functions/Cargo.toml new file mode 100644 index 0000000000000..b52ad4e06e9fa --- /dev/null +++ b/substrate/frame/examples/view-functions/Cargo.toml @@ -0,0 +1,61 @@ +[package] +name = "pallet-example-view-functions" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Pallet to demonstrate the usage of view functions to query pallet state" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", default-features = false, workspace = true } +frame-metadata = { features = ["current"], workspace = true } +log = { workspace = true } +scale-info = { default-features = false, features = ["derive"], workspace = true } + +frame-support = { default-features = false, workspace = true } +frame-system = { default-features = false, workspace = true } + +sp-core = { default-features = false, workspace = true } +sp-io = { default-features = false, workspace = true } +sp-metadata-ir = { default-features = false, workspace = true } +sp-runtime = { default-features = false, workspace = true } + +frame-benchmarking = { default-features = false, optional = true, workspace = true } + +[dev-dependencies] +pretty_assertions = { workspace = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-metadata/std", + "frame-support/std", + "frame-system/std", + "log/std", + "scale-info/std", + "sp-core/std", + "sp-io/std", + "sp-metadata-ir/std", + "sp-runtime/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/examples/view-functions/src/lib.rs b/substrate/frame/examples/view-functions/src/lib.rs new file mode 100644 index 0000000000000..e842a718ad334 --- /dev/null +++ b/substrate/frame/examples/view-functions/src/lib.rs @@ -0,0 +1,114 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This pallet demonstrates the use of the `pallet::view_functions_experimental` api for service +//! work. +#![cfg_attr(not(feature = "std"), no_std)] + +pub mod tests; + +use frame_support::Parameter; +use scale_info::TypeInfo; + +pub struct SomeType1; +impl From for u64 { + fn from(_t: SomeType1) -> Self { + 0u64 + } +} + +pub trait SomeAssociation1 { + type _1: Parameter + codec::MaxEncodedLen + TypeInfo; +} +impl SomeAssociation1 for u64 { + type _1 = u64; +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + + #[pallet::error] + pub enum Error {} + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::storage] + pub type SomeValue = StorageValue<_, u32>; + + #[pallet::storage] + pub type SomeMap = StorageMap<_, Twox64Concat, u32, u32, OptionQuery>; + + #[pallet::view_functions_experimental] + impl Pallet + where + T::AccountId: From + SomeAssociation1, + { + /// Query value no args. + pub fn get_value() -> Option { + SomeValue::::get() + } + + /// Query value with args. + pub fn get_value_with_arg(key: u32) -> Option { + SomeMap::::get(key) + } + } +} + +#[frame_support::pallet] +pub mod pallet2 { + use super::*; + use frame_support::pallet_prelude::*; + + #[pallet::error] + pub enum Error {} + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(PhantomData<(T, I)>); + + #[pallet::storage] + pub type SomeValue, I: 'static = ()> = StorageValue<_, u32>; + + #[pallet::storage] + pub type SomeMap, I: 'static = ()> = + StorageMap<_, Twox64Concat, u32, u32, OptionQuery>; + + #[pallet::view_functions_experimental] + impl, I: 'static> Pallet + where + T::AccountId: From + SomeAssociation1, + { + /// Query value no args. + pub fn get_value() -> Option { + SomeValue::::get() + } + + /// Query value with args. + pub fn get_value_with_arg(key: u32) -> Option { + SomeMap::::get(key) + } + } +} diff --git a/substrate/frame/examples/view-functions/src/tests.rs b/substrate/frame/examples/view-functions/src/tests.rs new file mode 100644 index 0000000000000..25f5f094651d6 --- /dev/null +++ b/substrate/frame/examples/view-functions/src/tests.rs @@ -0,0 +1,188 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for `pallet-example-view-functions`. +#![cfg(test)] + +use crate::{ + pallet::{self, Pallet}, + pallet2, +}; +use codec::{Decode, Encode}; +use scale_info::{form::PortableForm, meta_type}; + +use frame_support::{derive_impl, pallet_prelude::PalletInfoAccess, view_functions::ViewFunction}; +use sp_io::hashing::twox_128; +use sp_metadata_ir::{ViewFunctionArgMetadataIR, ViewFunctionGroupIR, ViewFunctionMetadataIR}; +use sp_runtime::testing::TestXt; + +pub type AccountId = u32; +pub type Balance = u32; + +type Block = frame_system::mocking::MockBlock; +frame_support::construct_runtime!( + pub enum Runtime { + System: frame_system, + ViewFunctionsExample: pallet, + ViewFunctionsInstance: pallet2, + ViewFunctionsInstance1: pallet2::, + } +); + +pub type Extrinsic = TestXt; + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Runtime { + type Block = Block; +} + +impl pallet::Config for Runtime {} +impl pallet2::Config for Runtime {} + +impl pallet2::Config for Runtime {} + +pub fn new_test_ext() -> sp_io::TestExternalities { + use sp_runtime::BuildStorage; + + let t = RuntimeGenesisConfig { system: Default::default() }.build_storage().unwrap(); + t.into() +} + +#[test] +fn pallet_get_value_query() { + new_test_ext().execute_with(|| { + let some_value = Some(99); + pallet::SomeValue::::set(some_value); + assert_eq!(some_value, Pallet::::get_value()); + + let query = pallet::GetValueViewFunction::::new(); + test_dispatch_view_function(&query, some_value); + }); +} + +#[test] +fn pallet_get_value_with_arg_query() { + new_test_ext().execute_with(|| { + let some_key = 1u32; + let some_value = Some(123); + pallet::SomeMap::::set(some_key, some_value); + assert_eq!(some_value, Pallet::::get_value_with_arg(some_key)); + + let query = pallet::GetValueWithArgViewFunction::::new(some_key); + test_dispatch_view_function(&query, some_value); + }); +} + +#[test] +fn pallet_multiple_instances() { + use pallet2::Instance1; + + new_test_ext().execute_with(|| { + let instance_value = Some(123); + let instance1_value = Some(456); + + pallet2::SomeValue::::set(instance_value); + pallet2::SomeValue::::set(instance1_value); + + let query = pallet2::GetValueViewFunction::::new(); + test_dispatch_view_function(&query, instance_value); + + let query_instance1 = pallet2::GetValueViewFunction::::new(); + test_dispatch_view_function(&query_instance1, instance1_value); + }); +} + +#[test] +fn metadata_ir_definitions() { + new_test_ext().execute_with(|| { + let metadata_ir = Runtime::metadata_ir(); + let pallet1 = metadata_ir + .view_functions + .groups + .iter() + .find(|pallet| pallet.name == "ViewFunctionsExample") + .unwrap(); + + fn view_fn_id(preifx_hash: [u8; 16], view_fn_signature: &str) -> [u8; 32] { + let mut id = [0u8; 32]; + id[..16].copy_from_slice(&preifx_hash); + id[16..].copy_from_slice(&twox_128(view_fn_signature.as_bytes())); + id + } + + let get_value_id = view_fn_id( + ::name_hash(), + "get_value() -> Option", + ); + + let get_value_with_arg_id = view_fn_id( + ::name_hash(), + "get_value_with_arg(u32) -> Option", + ); + + pretty_assertions::assert_eq!( + pallet1.view_functions, + vec![ + ViewFunctionMetadataIR { + name: "get_value", + id: get_value_id, + args: vec![], + output: meta_type::>(), + docs: vec![" Query value no args."], + }, + ViewFunctionMetadataIR { + name: "get_value_with_arg", + id: get_value_with_arg_id, + args: vec![ViewFunctionArgMetadataIR { name: "key", ty: meta_type::() },], + output: meta_type::>(), + docs: vec![" Query value with args."], + }, + ] + ); + }); +} + +#[test] +fn metadata_encoded_to_custom_value() { + new_test_ext().execute_with(|| { + let metadata = sp_metadata_ir::into_latest(Runtime::metadata_ir()); + // metadata is currently experimental so lives as a custom value. + let frame_metadata::RuntimeMetadata::V15(v15) = metadata.1 else { + panic!("Expected metadata v15") + }; + let custom_value = v15 + .custom + .map + .get("view_functions_experimental") + .expect("Expected custom value"); + let view_function_groups: Vec> = + Decode::decode(&mut &custom_value.value[..]).unwrap(); + assert_eq!(view_function_groups.len(), 4); + }); +} + +fn test_dispatch_view_function(query: &Q, expected: V) +where + Q: ViewFunction + Encode, + V: Decode + Eq + PartialEq + std::fmt::Debug, +{ + let input = query.encode(); + let output = Runtime::execute_view_function(Q::id(), input).unwrap(); + let query_result = V::decode(&mut &output[..]).unwrap(); + + assert_eq!(expected, query_result,); +} diff --git a/substrate/frame/multisig/src/weights.rs b/substrate/frame/multisig/src/weights.rs index 1c91734e6188c..0f8167a07a1c8 100644 --- a/substrate/frame/multisig/src/weights.rs +++ b/substrate/frame/multisig/src/weights.rs @@ -18,17 +18,18 @@ //! Autogenerated weights for `pallet_multisig` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-01-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-01-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `25968fd2c26d`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` +//! HOSTNAME: `fff8f38555b9`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// frame-omni-bencher +// v1 // benchmark // pallet // --extrinsic=* -// --chain=dev +// --runtime=target/production/wbuild/kitchensink-runtime/kitchensink_runtime.wasm // --pallet=pallet_multisig // --header=/__w/polkadot-sdk/polkadot-sdk/substrate/HEADER-APACHE2 // --output=/__w/polkadot-sdk/polkadot-sdk/substrate/frame/multisig/src/weights.rs @@ -36,10 +37,12 @@ // --steps=50 // --repeat=20 // --heap-pages=4096 -// --template=substrate/.maintain/frame-weight-template.hbs +// --template=substrate/.maintain/frame-umbrella-weight-template.hbs // --no-storage-info // --no-min-squares // --no-median-slopes +// --genesis-builder-policy=none +// --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -69,12 +72,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `z` is `[0, 10000]`. fn as_multi_threshold_1(z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `145` + // Measured: `0` // Estimated: `3997` - // Minimum execution time: 28_800_000 picoseconds. - Weight::from_parts(30_130_161, 3997) - // Standard Error: 18 - .saturating_add(Weight::from_parts(551, 0).saturating_mul(z.into())) + // Minimum execution time: 18_665_000 picoseconds. + Weight::from_parts(19_157_181, 3997) + // Standard Error: 6 + .saturating_add(Weight::from_parts(590, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `Multisig::Multisigs` (r:1 w:1) @@ -83,14 +86,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `z` is `[0, 10000]`. fn as_multi_create(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `334 + s * (2 ±0)` + // Measured: `229 + s * (2 ±0)` // Estimated: `6811` - // Minimum execution time: 51_467_000 picoseconds. - Weight::from_parts(38_610_296, 6811) - // Standard Error: 1_796 - .saturating_add(Weight::from_parts(161_251, 0).saturating_mul(s.into())) - // Standard Error: 17 - .saturating_add(Weight::from_parts(2_068, 0).saturating_mul(z.into())) + // Minimum execution time: 42_388_000 picoseconds. + Weight::from_parts(29_499_967, 6811) + // Standard Error: 1_563 + .saturating_add(Weight::from_parts(145_538, 0).saturating_mul(s.into())) + // Standard Error: 15 + .saturating_add(Weight::from_parts(2_016, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -100,14 +103,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `z` is `[0, 10000]`. fn as_multi_approve(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `353` + // Measured: `185` // Estimated: `6811` - // Minimum execution time: 36_208_000 picoseconds. - Weight::from_parts(24_694_507, 6811) - // Standard Error: 1_430 - .saturating_add(Weight::from_parts(134_263, 0).saturating_mul(s.into())) - // Standard Error: 14 - .saturating_add(Weight::from_parts(2_021, 0).saturating_mul(z.into())) + // Minimum execution time: 27_231_000 picoseconds. + Weight::from_parts(16_755_689, 6811) + // Standard Error: 866 + .saturating_add(Weight::from_parts(119_094, 0).saturating_mul(s.into())) + // Standard Error: 8 + .saturating_add(Weight::from_parts(1_927, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -123,14 +126,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `z` is `[0, 10000]`. fn as_multi_complete(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `604 + s * (33 ±0)` + // Measured: `288 + s * (33 ±0)` // Estimated: `6811` - // Minimum execution time: 65_217_000 picoseconds. - Weight::from_parts(48_235_573, 6811) - // Standard Error: 2_841 - .saturating_add(Weight::from_parts(205_077, 0).saturating_mul(s.into())) - // Standard Error: 27 - .saturating_add(Weight::from_parts(2_298, 0).saturating_mul(z.into())) + // Minimum execution time: 50_448_000 picoseconds. + Weight::from_parts(34_504_261, 6811) + // Standard Error: 2_070 + .saturating_add(Weight::from_parts(189_586, 0).saturating_mul(s.into())) + // Standard Error: 20 + .saturating_add(Weight::from_parts(2_116, 0).saturating_mul(z.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -139,12 +142,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[2, 100]`. fn approve_as_multi_create(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `334 + s * (2 ±0)` + // Measured: `233 + s * (2 ±0)` // Estimated: `6811` - // Minimum execution time: 35_727_000 picoseconds. - Weight::from_parts(37_329_524, 6811) - // Standard Error: 1_814 - .saturating_add(Weight::from_parts(157_471, 0).saturating_mul(s.into())) + // Minimum execution time: 26_020_000 picoseconds. + Weight::from_parts(28_229_601, 6811) + // Standard Error: 1_282 + .saturating_add(Weight::from_parts(133_221, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -153,12 +156,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[2, 100]`. fn approve_as_multi_approve(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `353` + // Measured: `185` // Estimated: `6811` - // Minimum execution time: 21_623_000 picoseconds. - Weight::from_parts(22_601_251, 6811) - // Standard Error: 963 - .saturating_add(Weight::from_parts(139_320, 0).saturating_mul(s.into())) + // Minimum execution time: 13_660_000 picoseconds. + Weight::from_parts(14_317_629, 6811) + // Standard Error: 1_188 + .saturating_add(Weight::from_parts(125_599, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -167,12 +170,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[2, 100]`. fn cancel_as_multi(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `525 + s * (1 ±0)` + // Measured: `357 + s * (1 ±0)` // Estimated: `6811` - // Minimum execution time: 36_801_000 picoseconds. - Weight::from_parts(37_578_412, 6811) - // Standard Error: 1_580 - .saturating_add(Weight::from_parts(159_580, 0).saturating_mul(s.into())) + // Minimum execution time: 27_827_000 picoseconds. + Weight::from_parts(28_980_511, 6811) + // Standard Error: 822 + .saturating_add(Weight::from_parts(130_315, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -187,12 +190,12 @@ impl WeightInfo for () { /// The range of component `z` is `[0, 10000]`. fn as_multi_threshold_1(z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `145` + // Measured: `0` // Estimated: `3997` - // Minimum execution time: 28_800_000 picoseconds. - Weight::from_parts(30_130_161, 3997) - // Standard Error: 18 - .saturating_add(Weight::from_parts(551, 0).saturating_mul(z.into())) + // Minimum execution time: 18_665_000 picoseconds. + Weight::from_parts(19_157_181, 3997) + // Standard Error: 6 + .saturating_add(Weight::from_parts(590, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `Multisig::Multisigs` (r:1 w:1) @@ -201,14 +204,14 @@ impl WeightInfo for () { /// The range of component `z` is `[0, 10000]`. fn as_multi_create(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `334 + s * (2 ±0)` + // Measured: `229 + s * (2 ±0)` // Estimated: `6811` - // Minimum execution time: 51_467_000 picoseconds. - Weight::from_parts(38_610_296, 6811) - // Standard Error: 1_796 - .saturating_add(Weight::from_parts(161_251, 0).saturating_mul(s.into())) - // Standard Error: 17 - .saturating_add(Weight::from_parts(2_068, 0).saturating_mul(z.into())) + // Minimum execution time: 42_388_000 picoseconds. + Weight::from_parts(29_499_967, 6811) + // Standard Error: 1_563 + .saturating_add(Weight::from_parts(145_538, 0).saturating_mul(s.into())) + // Standard Error: 15 + .saturating_add(Weight::from_parts(2_016, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -218,14 +221,14 @@ impl WeightInfo for () { /// The range of component `z` is `[0, 10000]`. fn as_multi_approve(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `353` + // Measured: `185` // Estimated: `6811` - // Minimum execution time: 36_208_000 picoseconds. - Weight::from_parts(24_694_507, 6811) - // Standard Error: 1_430 - .saturating_add(Weight::from_parts(134_263, 0).saturating_mul(s.into())) - // Standard Error: 14 - .saturating_add(Weight::from_parts(2_021, 0).saturating_mul(z.into())) + // Minimum execution time: 27_231_000 picoseconds. + Weight::from_parts(16_755_689, 6811) + // Standard Error: 866 + .saturating_add(Weight::from_parts(119_094, 0).saturating_mul(s.into())) + // Standard Error: 8 + .saturating_add(Weight::from_parts(1_927, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -241,14 +244,14 @@ impl WeightInfo for () { /// The range of component `z` is `[0, 10000]`. fn as_multi_complete(s: u32, z: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `604 + s * (33 ±0)` + // Measured: `288 + s * (33 ±0)` // Estimated: `6811` - // Minimum execution time: 65_217_000 picoseconds. - Weight::from_parts(48_235_573, 6811) - // Standard Error: 2_841 - .saturating_add(Weight::from_parts(205_077, 0).saturating_mul(s.into())) - // Standard Error: 27 - .saturating_add(Weight::from_parts(2_298, 0).saturating_mul(z.into())) + // Minimum execution time: 50_448_000 picoseconds. + Weight::from_parts(34_504_261, 6811) + // Standard Error: 2_070 + .saturating_add(Weight::from_parts(189_586, 0).saturating_mul(s.into())) + // Standard Error: 20 + .saturating_add(Weight::from_parts(2_116, 0).saturating_mul(z.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -257,12 +260,12 @@ impl WeightInfo for () { /// The range of component `s` is `[2, 100]`. fn approve_as_multi_create(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `334 + s * (2 ±0)` + // Measured: `233 + s * (2 ±0)` // Estimated: `6811` - // Minimum execution time: 35_727_000 picoseconds. - Weight::from_parts(37_329_524, 6811) - // Standard Error: 1_814 - .saturating_add(Weight::from_parts(157_471, 0).saturating_mul(s.into())) + // Minimum execution time: 26_020_000 picoseconds. + Weight::from_parts(28_229_601, 6811) + // Standard Error: 1_282 + .saturating_add(Weight::from_parts(133_221, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -271,12 +274,12 @@ impl WeightInfo for () { /// The range of component `s` is `[2, 100]`. fn approve_as_multi_approve(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `353` + // Measured: `185` // Estimated: `6811` - // Minimum execution time: 21_623_000 picoseconds. - Weight::from_parts(22_601_251, 6811) - // Standard Error: 963 - .saturating_add(Weight::from_parts(139_320, 0).saturating_mul(s.into())) + // Minimum execution time: 13_660_000 picoseconds. + Weight::from_parts(14_317_629, 6811) + // Standard Error: 1_188 + .saturating_add(Weight::from_parts(125_599, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -285,12 +288,12 @@ impl WeightInfo for () { /// The range of component `s` is `[2, 100]`. fn cancel_as_multi(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `525 + s * (1 ±0)` + // Measured: `357 + s * (1 ±0)` // Estimated: `6811` - // Minimum execution time: 36_801_000 picoseconds. - Weight::from_parts(37_578_412, 6811) - // Standard Error: 1_580 - .saturating_add(Weight::from_parts(159_580, 0).saturating_mul(s.into())) + // Minimum execution time: 27_827_000 picoseconds. + Weight::from_parts(28_980_511, 6811) + // Standard Error: 822 + .saturating_add(Weight::from_parts(130_315, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } diff --git a/substrate/frame/revive/Cargo.toml b/substrate/frame/revive/Cargo.toml index 0959cc50638ba..4faa9205378fe 100644 --- a/substrate/frame/revive/Cargo.toml +++ b/substrate/frame/revive/Cargo.toml @@ -44,6 +44,9 @@ pallet-revive-uapi = { workspace = true, features = ["scale"] } pallet-transaction-payment = { workspace = true } sp-api = { workspace = true } sp-arithmetic = { workspace = true } +sp-consensus-aura = { workspace = true, optional = true } +sp-consensus-babe = { workspace = true, optional = true } +sp-consensus-slots = { workspace = true, optional = true } sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { workspace = true } @@ -96,6 +99,9 @@ std = [ "serde_json/std", "sp-api/std", "sp-arithmetic/std", + "sp-consensus-aura/std", + "sp-consensus-babe/std", + "sp-consensus-slots/std", "sp-core/std", "sp-io/std", "sp-keystore/std", @@ -114,6 +120,9 @@ runtime-benchmarks = [ "pallet-timestamp/runtime-benchmarks", "pallet-transaction-payment/runtime-benchmarks", "pallet-utility/runtime-benchmarks", + "sp-consensus-aura", + "sp-consensus-babe", + "sp-consensus-slots", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm/runtime-benchmarks", diff --git a/substrate/frame/revive/fixtures/contracts/block_author.rs b/substrate/frame/revive/fixtures/contracts/block_author.rs new file mode 100644 index 0000000000000..59886a19cc619 --- /dev/null +++ b/substrate/frame/revive/fixtures/contracts/block_author.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_std] +#![no_main] + +use common::input; +use uapi::{HostFn, HostFnImpl as api}; + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn deploy() {} + +#[no_mangle] +#[polkavm_derive::polkavm_export] +pub extern "C" fn call() { + input!(expected: &[u8; 20],); + + let mut received = [0; 20]; + api::block_author(&mut received); + + assert_eq!(expected, &received); +} diff --git a/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs b/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs index 660db84028dbd..0244967a05565 100644 --- a/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs +++ b/substrate/frame/revive/fixtures/contracts/create_transient_storage_and_call.rs @@ -22,7 +22,7 @@ use common::input; use uapi::{HostFn, HostFnImpl as api, StorageFlags}; -static BUFFER: [u8; 448] = [0u8; 448]; +static BUFFER: [u8; 416] = [0u8; 416]; #[no_mangle] #[polkavm_derive::polkavm_export] diff --git a/substrate/frame/revive/src/benchmarking/mod.rs b/substrate/frame/revive/src/benchmarking/mod.rs index d3d648cd2125d..624fa35570d84 100644 --- a/substrate/frame/revive/src/benchmarking/mod.rs +++ b/substrate/frame/revive/src/benchmarking/mod.rs @@ -40,7 +40,16 @@ use frame_support::{ }; use frame_system::RawOrigin; use pallet_revive_uapi::{pack_hi_lo, CallFlags, ReturnErrorCode, StorageFlags}; -use sp_runtime::traits::{Bounded, Hash}; +use sp_consensus_aura::AURA_ENGINE_ID; +use sp_consensus_babe::{ + digests::{PreDigest, PrimaryPreDigest}, + BABE_ENGINE_ID, +}; +use sp_consensus_slots::Slot; +use sp_runtime::{ + generic::{Digest, DigestItem}, + traits::{Bounded, Hash}, +}; /// How many runs we do per API benchmark. /// @@ -886,6 +895,59 @@ mod benchmarks { assert_eq!(U256::from_little_endian(&memory[..]), runtime.ext().block_number()); } + #[benchmark(pov_mode = Measured)] + fn seal_block_author() { + build_runtime!(runtime, memory: [[123u8; 20], ]); + + let mut digest = Digest::default(); + + // The pre-runtime digest log is unbounded; usually around 3 items but it can vary. + // To get safe benchmark results despite that, populate it with a bunch of random logs to + // ensure iteration over many items (we just overestimate the cost of the API). + for i in 0..16 { + digest.push(DigestItem::PreRuntime([i, i, i, i], vec![i; 128])); + digest.push(DigestItem::Consensus([i, i, i, i], vec![i; 128])); + digest.push(DigestItem::Seal([i, i, i, i], vec![i; 128])); + digest.push(DigestItem::Other(vec![i; 128])); + } + + // The content of the pre-runtime digest log depends on the configured consensus. + // However, mismatching logs are simply ignored. Thus we construct fixtures which will + // let the API to return a value in both BABE and AURA consensus. + + // Construct a `Digest` log fixture returning some value in BABE + let primary_pre_digest = vec![0; ::max_encoded_len()]; + let pre_digest = + PreDigest::Primary(PrimaryPreDigest::decode(&mut &primary_pre_digest[..]).unwrap()); + digest.push(DigestItem::PreRuntime(BABE_ENGINE_ID, pre_digest.encode())); + digest.push(DigestItem::Seal(BABE_ENGINE_ID, pre_digest.encode())); + + // Construct a `Digest` log fixture returning some value in AURA + let slot = Slot::default(); + digest.push(DigestItem::PreRuntime(AURA_ENGINE_ID, slot.encode())); + digest.push(DigestItem::Seal(AURA_ENGINE_ID, slot.encode())); + + frame_system::Pallet::::initialize( + &BlockNumberFor::::from(1u32), + &Default::default(), + &digest, + ); + + let result; + #[block] + { + result = runtime.bench_block_author(memory.as_mut_slice(), 0); + } + assert_ok!(result); + + let block_author = runtime + .ext() + .block_author() + .map(|account| T::AddressMapper::to_address(&account)) + .unwrap_or(H160::zero()); + assert_eq!(&memory[..], block_author.as_bytes()); + } + #[benchmark(pov_mode = Measured)] fn seal_block_hash() { let mut memory = vec![0u8; 64]; diff --git a/substrate/frame/revive/src/exec.rs b/substrate/frame/revive/src/exec.rs index 14ab917c0d4f9..c3adba45403ee 100644 --- a/substrate/frame/revive/src/exec.rs +++ b/substrate/frame/revive/src/exec.rs @@ -37,7 +37,7 @@ use frame_support::{ traits::{ fungible::{Inspect, Mutate}, tokens::{Fortitude, Preservation}, - Contains, OriginTrait, Time, + Contains, FindAuthor, OriginTrait, Time, }, weights::Weight, Blake2_128Concat, BoundedVec, StorageHasher, @@ -366,6 +366,9 @@ pub trait Ext: sealing::Sealed { /// `block_number` isn't within the range of the previous 256 blocks. fn block_hash(&self, block_number: U256) -> Option; + /// Returns the author of the current block. + fn block_author(&self) -> Option>; + /// Returns the maximum allowed size of a storage item. fn max_value_size(&self) -> u32; @@ -1718,6 +1721,13 @@ where self.block_hash(block_number) } + fn block_author(&self) -> Option> { + let digest = >::digest(); + let pre_runtime_digests = digest.logs.iter().filter_map(|d| d.as_pre_runtime()); + + T::FindAuthor::find_author(pre_runtime_digests) + } + fn max_value_size(&self) -> u32 { limits::PAYLOAD_BYTES } diff --git a/substrate/frame/revive/src/lib.rs b/substrate/frame/revive/src/lib.rs index 7f4565a9f0884..74b4b12cce184 100644 --- a/substrate/frame/revive/src/lib.rs +++ b/substrate/frame/revive/src/lib.rs @@ -114,7 +114,7 @@ const LOG_TARGET: &str = "runtime::revive"; #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::pallet_prelude::*; + use frame_support::{pallet_prelude::*, traits::FindAuthor}; use frame_system::pallet_prelude::*; use sp_core::U256; use sp_runtime::Perbill; @@ -189,6 +189,9 @@ pub mod pallet { #[pallet::no_default_bounds] type ChainExtension: chain_extension::ChainExtension + Default; + /// Find the author of the current block. + type FindAuthor: FindAuthor; + /// The amount of balance a caller has to pay for each byte of storage. /// /// # Note @@ -362,6 +365,7 @@ pub mod pallet { type ChainId = ConstU64<0>; type NativeToEthRatio = ConstU32<1>; type EthGasEncoder = (); + type FindAuthor = (); } } diff --git a/substrate/frame/revive/src/limits.rs b/substrate/frame/revive/src/limits.rs index f101abf0ea7e9..61932575a4c10 100644 --- a/substrate/frame/revive/src/limits.rs +++ b/substrate/frame/revive/src/limits.rs @@ -47,7 +47,7 @@ pub const NUM_EVENT_TOPICS: u32 = 4; pub const DELEGATE_DEPENDENCIES: u32 = 32; /// Maximum size of events (including topics) and storage values. -pub const PAYLOAD_BYTES: u32 = 448; +pub const PAYLOAD_BYTES: u32 = 416; /// The maximum size of the transient storage in bytes. /// diff --git a/substrate/frame/revive/src/tests.rs b/substrate/frame/revive/src/tests.rs index db4b4da2b05e3..50b14c2a9987f 100644 --- a/substrate/frame/revive/src/tests.rs +++ b/substrate/frame/revive/src/tests.rs @@ -52,7 +52,7 @@ use frame_support::{ traits::{ fungible::{BalancedHold, Inspect, Mutate, MutateHold}, tokens::Preservation, - ConstU32, ConstU64, Contains, OnIdle, OnInitialize, StorageVersion, + ConstU32, ConstU64, Contains, FindAuthor, OnIdle, OnInitialize, StorageVersion, }, weights::{constants::WEIGHT_REF_TIME_PER_SECOND, FixedFee, IdentityFee, Weight, WeightMeter}, }; @@ -506,6 +506,15 @@ parameter_types! { pub static UnstableInterface: bool = true; } +impl FindAuthor<::AccountId> for Test { + fn find_author<'a, I>(_digests: I) -> Option<::AccountId> + where + I: 'a + IntoIterator, + { + Some(EVE) + } +} + #[derive_impl(crate::config_preludes::TestDefaultConfig)] impl Config for Test { type Time = Timestamp; @@ -521,6 +530,7 @@ impl Config for Test { type InstantiateOrigin = EnsureAccount; type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; type ChainId = ChainId; + type FindAuthor = Test; } impl TryFrom for crate::Call { @@ -3080,7 +3090,7 @@ fn deposit_limit_in_nested_calls() { // Require more than the sender's balance. // Limit the sub call to little balance so it should fail in there let ret = builder::bare_call(addr_caller) - .data((448, &addr_callee, U256::from(1u64)).encode()) + .data((416, &addr_callee, U256::from(1u64)).encode()) .build_and_unwrap_result(); assert_return_code!(ret, RuntimeReturnCode::OutOfResources); @@ -3595,6 +3605,21 @@ fn block_hash_works() { }); } +#[test] +fn block_author_works() { + let (code, _) = compile_module("block_author").unwrap(); + + ExtBuilder::default().existential_deposit(1).build().execute_with(|| { + let _ = ::Currency::set_balance(&ALICE, 1_000_000); + + let Contract { addr, .. } = + builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); + + // The fixture asserts the input to match the find_author API method output. + assert_ok!(builder::call(addr).data(EVE_ADDR.encode()).build()); + }); +} + #[test] fn root_cannot_upload_code() { let (wasm, _) = compile_module("dummy").unwrap(); @@ -4573,6 +4598,7 @@ fn tracing_works_for_transfers() { } #[test] +#[ignore = "does not collect the gas_used properly"] fn tracing_works() { use crate::evm::*; use CallType::*; diff --git a/substrate/frame/revive/src/wasm/runtime.rs b/substrate/frame/revive/src/wasm/runtime.rs index d02c75247a4fe..5312ad2db1b0e 100644 --- a/substrate/frame/revive/src/wasm/runtime.rs +++ b/substrate/frame/revive/src/wasm/runtime.rs @@ -327,6 +327,8 @@ pub enum RuntimeCosts { BlockNumber, /// Weight of calling `seal_block_hash`. BlockHash, + /// Weight of calling `seal_block_author`. + BlockAuthor, /// Weight of calling `seal_gas_price`. GasPrice, /// Weight of calling `seal_base_fee`. @@ -483,6 +485,7 @@ impl Token for RuntimeCosts { MinimumBalance => T::WeightInfo::seal_minimum_balance(), BlockNumber => T::WeightInfo::seal_block_number(), BlockHash => T::WeightInfo::seal_block_hash(), + BlockAuthor => T::WeightInfo::seal_block_author(), GasPrice => T::WeightInfo::seal_gas_price(), BaseFee => T::WeightInfo::seal_base_fee(), Now => T::WeightInfo::seal_now(), @@ -1689,6 +1692,25 @@ pub mod env { )?) } + /// Stores the current block author into the supplied buffer. + /// See [`pallet_revive_uapi::HostFn::block_author`]. + #[stable] + fn block_author(&mut self, memory: &mut M, out_ptr: u32) -> Result<(), TrapReason> { + self.charge_gas(RuntimeCosts::BlockAuthor)?; + let block_author = self + .ext + .block_author() + .map(|account| ::AddressMapper::to_address(&account)) + .unwrap_or(H160::zero()); + Ok(self.write_fixed_sandbox_output( + memory, + out_ptr, + &block_author.as_bytes(), + false, + already_charged, + )?) + } + /// Computes the KECCAK 256-bit hash on the given input buffer. /// See [`pallet_revive_uapi::HostFn::hash_keccak_256`]. #[stable] diff --git a/substrate/frame/revive/src/weights.rs b/substrate/frame/revive/src/weights.rs index 086b64c5dde40..6fee4995c186b 100644 --- a/substrate/frame/revive/src/weights.rs +++ b/substrate/frame/revive/src/weights.rs @@ -18,17 +18,18 @@ //! Autogenerated weights for `pallet_revive` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-01-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-01-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `cc3478f23e9a`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` +//! HOSTNAME: `eacb3695a76e`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` // Executed Command: -// target/production/substrate-node +// frame-omni-bencher +// v1 // benchmark // pallet // --extrinsic=* -// --chain=dev +// --runtime=target/production/wbuild/kitchensink-runtime/kitchensink_runtime.wasm // --pallet=pallet_revive // --header=/__w/polkadot-sdk/polkadot-sdk/substrate/HEADER-APACHE2 // --output=/__w/polkadot-sdk/polkadot-sdk/substrate/frame/revive/src/weights.rs @@ -40,6 +41,8 @@ // --no-storage-info // --no-min-squares // --no-median-slopes +// --genesis-builder-policy=none +// --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -88,6 +91,7 @@ pub trait WeightInfo { fn seal_gas_price() -> Weight; fn seal_base_fee() -> Weight; fn seal_block_number() -> Weight; + fn seal_block_author() -> Weight; fn seal_block_hash() -> Weight; fn seal_now() -> Weight; fn seal_weight_to_fee() -> Weight; @@ -139,10 +143,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Revive::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) fn on_process_deletion_queue_batch() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `1594` - // Minimum execution time: 2_796_000 picoseconds. - Weight::from_parts(2_958_000, 1594) + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 690_000 picoseconds. + Weight::from_parts(743_000, 1485) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -150,12 +154,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `425 + k * (69 ±0)` - // Estimated: `415 + k * (70 ±0)` - // Minimum execution time: 16_135_000 picoseconds. - Weight::from_parts(3_227_098, 415) - // Standard Error: 1_106 - .saturating_add(Weight::from_parts(1_175_210, 0).saturating_mul(k.into())) + // Measured: `230 + k * (69 ±0)` + // Estimated: `222 + k * (70 ±0)` + // Minimum execution time: 10_913_000 picoseconds. + Weight::from_parts(11_048_000, 222) + // Standard Error: 972 + .saturating_add(Weight::from_parts(1_172_318, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -175,12 +179,14 @@ impl WeightInfo for SubstrateWeight { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// The range of component `c` is `[0, 262144]`. - fn call_with_code_per_byte(_c: u32, ) -> Weight { + fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1502` - // Estimated: `7442` - // Minimum execution time: 89_144_000 picoseconds. - Weight::from_parts(93_719_381, 7442) + // Measured: `1195` + // Estimated: `7135` + // Minimum execution time: 83_080_000 picoseconds. + Weight::from_parts(89_270_264, 7135) + // Standard Error: 0 + .saturating_add(Weight::from_parts(3, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -200,16 +206,14 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) /// The range of component `c` is `[0, 262144]`. /// The range of component `i` is `[0, 262144]`. - fn instantiate_with_code(c: u32, i: u32, ) -> Weight { + fn instantiate_with_code(_c: u32, i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `401` - // Estimated: `6349` - // Minimum execution time: 185_726_000 picoseconds. - Weight::from_parts(165_030_228, 6349) - // Standard Error: 10 - .saturating_add(Weight::from_parts(10, 0).saturating_mul(c.into())) - // Standard Error: 10 - .saturating_add(Weight::from_parts(4_453, 0).saturating_mul(i.into())) + // Measured: `93` + // Estimated: `6033` + // Minimum execution time: 171_761_000 picoseconds. + Weight::from_parts(158_031_807, 6033) + // Standard Error: 11 + .saturating_add(Weight::from_parts(4_536, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -230,12 +234,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 262144]`. fn instantiate(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1294` - // Estimated: `4739` - // Minimum execution time: 154_669_000 picoseconds. - Weight::from_parts(138_463_785, 4739) + // Measured: `987` + // Estimated: `4452` + // Minimum execution time: 143_210_000 picoseconds. + Weight::from_parts(121_908_111, 4452) // Standard Error: 15 - .saturating_add(Weight::from_parts(4_389, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(4_467, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -253,10 +257,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `1502` - // Estimated: `7442` - // Minimum execution time: 137_822_000 picoseconds. - Weight::from_parts(146_004_000, 7442) + // Measured: `1195` + // Estimated: `7135` + // Minimum execution time: 136_689_000 picoseconds. + Weight::from_parts(145_358_000, 7135) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -267,12 +271,14 @@ impl WeightInfo for SubstrateWeight { /// Storage: `Revive::PristineCode` (r:0 w:1) /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) /// The range of component `c` is `[0, 262144]`. - fn upload_code(_c: u32, ) -> Weight { + fn upload_code(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `164` - // Estimated: `3629` - // Minimum execution time: 53_476_000 picoseconds. - Weight::from_parts(55_795_699, 3629) + // Measured: `0` + // Estimated: `3465` + // Minimum execution time: 43_351_000 picoseconds. + Weight::from_parts(44_896_319, 3465) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -284,10 +290,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) fn remove_code() -> Weight { // Proof Size summary in bytes: - // Measured: `322` - // Estimated: `3787` - // Minimum execution time: 41_955_000 picoseconds. - Weight::from_parts(43_749_000, 3787) + // Measured: `181` + // Estimated: `3646` + // Minimum execution time: 36_034_000 picoseconds. + Weight::from_parts(36_595_000, 3646) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -297,10 +303,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) fn set_code() -> Weight { // Proof Size summary in bytes: - // Measured: `529` - // Estimated: `6469` - // Minimum execution time: 22_763_000 picoseconds. - Weight::from_parts(23_219_000, 6469) + // Measured: `425` + // Estimated: `6365` + // Minimum execution time: 19_484_000 picoseconds. + Weight::from_parts(20_104_000, 6365) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -310,10 +316,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(409), added: 2884, mode: `Measured`) fn map_account() -> Weight { // Proof Size summary in bytes: - // Measured: `164` - // Estimated: `3629` - // Minimum execution time: 45_478_000 picoseconds. - Weight::from_parts(46_658_000, 3629) + // Measured: `0` + // Estimated: `3465` + // Minimum execution time: 37_066_000 picoseconds. + Weight::from_parts(37_646_000, 3465) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -323,10 +329,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) fn unmap_account() -> Weight { // Proof Size summary in bytes: - // Measured: `93` - // Estimated: `3558` - // Minimum execution time: 33_359_000 picoseconds. - Weight::from_parts(34_196_000, 3558) + // Measured: `56` + // Estimated: `3521` + // Minimum execution time: 31_604_000 picoseconds. + Weight::from_parts(32_557_000, 3521) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -336,10 +342,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `Measured`) fn dispatch_as_fallback_account() -> Weight { // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 13_663_000 picoseconds. - Weight::from_parts(14_278_000, 3610) + // Measured: `0` + // Estimated: `3465` + // Minimum execution time: 6_070_000 picoseconds. + Weight::from_parts(6_246_000, 3465) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// The range of component `r` is `[0, 1600]`. @@ -347,61 +353,61 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_966_000 picoseconds. - Weight::from_parts(7_708_050, 0) - // Standard Error: 238 - .saturating_add(Weight::from_parts(167_115, 0).saturating_mul(r.into())) + // Minimum execution time: 6_471_000 picoseconds. + Weight::from_parts(7_724_355, 0) + // Standard Error: 245 + .saturating_add(Weight::from_parts(165_331, 0).saturating_mul(r.into())) } fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 332_000 picoseconds. - Weight::from_parts(378_000, 0) + // Minimum execution time: 239_000 picoseconds. + Weight::from_parts(278_000, 0) } fn seal_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 303_000 picoseconds. - Weight::from_parts(329_000, 0) + // Minimum execution time: 234_000 picoseconds. + Weight::from_parts(264_000, 0) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) fn seal_is_contract() -> Weight { // Proof Size summary in bytes: - // Measured: `306` - // Estimated: `3771` - // Minimum execution time: 10_014_000 picoseconds. - Weight::from_parts(10_549_000, 3771) + // Measured: `202` + // Estimated: `3667` + // Minimum execution time: 6_508_000 picoseconds. + Weight::from_parts(6_715_000, 3667) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Revive::AddressSuffix` (r:1 w:0) /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) fn seal_to_account_id() -> Weight { // Proof Size summary in bytes: - // Measured: `248` - // Estimated: `3713` - // Minimum execution time: 9_771_000 picoseconds. - Weight::from_parts(10_092_000, 3713) + // Measured: `144` + // Estimated: `3609` + // Minimum execution time: 6_190_000 picoseconds. + Weight::from_parts(6_413_000, 3609) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) fn seal_code_hash() -> Weight { // Proof Size summary in bytes: - // Measured: `403` - // Estimated: `3868` - // Minimum execution time: 11_260_000 picoseconds. - Weight::from_parts(11_626_000, 3868) + // Measured: `299` + // Estimated: `3764` + // Minimum execution time: 7_547_000 picoseconds. + Weight::from_parts(7_742_000, 3764) .saturating_add(T::DbWeight::get().reads(1_u64)) } fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 307_000 picoseconds. - Weight::from_parts(328_000, 0) + // Minimum execution time: 251_000 picoseconds. + Weight::from_parts(274_000, 0) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) @@ -409,53 +415,53 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) fn seal_code_size() -> Weight { // Proof Size summary in bytes: - // Measured: `473` - // Estimated: `3938` - // Minimum execution time: 14_675_000 picoseconds. - Weight::from_parts(15_168_000, 3938) + // Measured: `369` + // Estimated: `3834` + // Minimum execution time: 10_825_000 picoseconds. + Weight::from_parts(11_185_000, 3834) .saturating_add(T::DbWeight::get().reads(2_u64)) } fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 332_000 picoseconds. - Weight::from_parts(357_000, 0) + // Minimum execution time: 325_000 picoseconds. + Weight::from_parts(352_000, 0) } fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 298_000 picoseconds. - Weight::from_parts(332_000, 0) + // Minimum execution time: 245_000 picoseconds. + Weight::from_parts(282_000, 0) } fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 313_000 picoseconds. - Weight::from_parts(336_000, 0) + // Minimum execution time: 251_000 picoseconds. + Weight::from_parts(274_000, 0) } fn seal_weight_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 663_000 picoseconds. - Weight::from_parts(730_000, 0) + // Minimum execution time: 599_000 picoseconds. + Weight::from_parts(675_000, 0) } fn seal_ref_time_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 292_000 picoseconds. - Weight::from_parts(344_000, 0) + // Minimum execution time: 245_000 picoseconds. + Weight::from_parts(263_000, 0) } fn seal_balance() -> Weight { // Proof Size summary in bytes: // Measured: `102` // Estimated: `0` - // Minimum execution time: 4_604_000 picoseconds. - Weight::from_parts(4_875_000, 0) + // Minimum execution time: 4_613_000 picoseconds. + Weight::from_parts(4_768_000, 0) } /// Storage: `Revive::AddressSuffix` (r:1 w:0) /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) @@ -463,10 +469,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) fn seal_balance_of() -> Weight { // Proof Size summary in bytes: - // Measured: `264` - // Estimated: `3729` - // Minimum execution time: 12_252_000 picoseconds. - Weight::from_parts(12_641_000, 3729) + // Measured: `160` + // Estimated: `3625` + // Minimum execution time: 8_513_000 picoseconds. + Weight::from_parts(8_765_000, 3625) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `Revive::ImmutableDataOf` (r:1 w:0) @@ -474,12 +480,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 4096]`. fn seal_get_immutable_data(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `238 + n * (1 ±0)` - // Estimated: `3703 + n * (1 ±0)` - // Minimum execution time: 6_005_000 picoseconds. - Weight::from_parts(9_550_692, 3703) - // Standard Error: 18 - .saturating_add(Weight::from_parts(710, 0).saturating_mul(n.into())) + // Measured: `134 + n * (1 ±0)` + // Estimated: `3599 + n * (1 ±0)` + // Minimum execution time: 4_870_000 picoseconds. + Weight::from_parts(6_309_018, 3599) + // Standard Error: 7 + .saturating_add(Weight::from_parts(645, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -490,128 +496,138 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_981_000 picoseconds. - Weight::from_parts(2_297_488, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(528, 0).saturating_mul(n.into())) + // Minimum execution time: 1_754_000 picoseconds. + Weight::from_parts(1_939_099, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(581, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 279_000 picoseconds. - Weight::from_parts(309_000, 0) + // Minimum execution time: 243_000 picoseconds. + Weight::from_parts(292_000, 0) } fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 289_000 picoseconds. - Weight::from_parts(315_000, 0) + // Minimum execution time: 254_000 picoseconds. + Weight::from_parts(284_000, 0) } fn seal_return_data_size() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 253_000 picoseconds. - Weight::from_parts(310_000, 0) + // Minimum execution time: 242_000 picoseconds. + Weight::from_parts(257_000, 0) } fn seal_call_data_size() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 291_000 picoseconds. - Weight::from_parts(338_000, 0) + // Minimum execution time: 241_000 picoseconds. + Weight::from_parts(261_000, 0) } fn seal_gas_limit() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 266_000 picoseconds. - Weight::from_parts(331_000, 0) + // Minimum execution time: 265_000 picoseconds. + Weight::from_parts(290_000, 0) } fn seal_gas_price() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 250_000 picoseconds. - Weight::from_parts(314_000, 0) + // Minimum execution time: 225_000 picoseconds. + Weight::from_parts(249_000, 0) } fn seal_base_fee() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 266_000 picoseconds. - Weight::from_parts(341_000, 0) + // Minimum execution time: 246_000 picoseconds. + Weight::from_parts(266_000, 0) } fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 281_000 picoseconds. - Weight::from_parts(314_000, 0) + // Minimum execution time: 247_000 picoseconds. + Weight::from_parts(267_000, 0) + } + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn seal_block_author() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 13_503_000 picoseconds. + Weight::from_parts(13_907_000, 1485) + .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `System::BlockHash` (r:1 w:0) /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) fn seal_block_hash() -> Weight { // Proof Size summary in bytes: - // Measured: `30` - // Estimated: `3495` - // Minimum execution time: 3_557_000 picoseconds. - Weight::from_parts(3_816_000, 3495) + // Measured: `0` + // Estimated: `3465` + // Minimum execution time: 2_251_000 picoseconds. + Weight::from_parts(2_370_000, 3465) .saturating_add(T::DbWeight::get().reads(1_u64)) } fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 280_000 picoseconds. - Weight::from_parts(316_000, 0) + // Minimum execution time: 237_000 picoseconds. + Weight::from_parts(264_000, 0) } fn seal_weight_to_fee() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_413_000 picoseconds. - Weight::from_parts(1_477_000, 0) + // Minimum execution time: 1_238_000 picoseconds. + Weight::from_parts(1_311_000, 0) } /// The range of component `n` is `[0, 262140]`. fn seal_copy_to_contract(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 383_000 picoseconds. - Weight::from_parts(602_481, 0) + // Minimum execution time: 380_000 picoseconds. + Weight::from_parts(524_789, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(201, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(237, 0).saturating_mul(n.into())) } fn seal_call_data_load() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 327_000 picoseconds. - Weight::from_parts(365_000, 0) + // Minimum execution time: 248_000 picoseconds. + Weight::from_parts(267_000, 0) } /// The range of component `n` is `[0, 262144]`. fn seal_call_data_copy(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 334_000 picoseconds. - Weight::from_parts(205_756, 0) + // Minimum execution time: 230_000 picoseconds. + Weight::from_parts(207_234, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(116, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(150, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262140]`. fn seal_return(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 278_000 picoseconds. - Weight::from_parts(611_031, 0) + // Minimum execution time: 267_000 picoseconds. + Weight::from_parts(357_669, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(202, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(238, 0).saturating_mul(n.into())) } /// Storage: `Revive::AddressSuffix` (r:1 w:0) /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) @@ -626,12 +642,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 32]`. fn seal_terminate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `324 + n * (88 ±0)` - // Estimated: `3791 + n * (2563 ±0)` - // Minimum execution time: 18_544_000 picoseconds. - Weight::from_parts(18_412_253, 3791) - // Standard Error: 12_785 - .saturating_add(Weight::from_parts(4_214_449, 0).saturating_mul(n.into())) + // Measured: `218 + n * (88 ±0)` + // Estimated: `3684 + n * (2563 ±0)` + // Minimum execution time: 14_314_000 picoseconds. + Weight::from_parts(15_353_516, 3684) + // Standard Error: 10_720 + .saturating_add(Weight::from_parts(4_159_489, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -644,41 +660,41 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_156_000 picoseconds. - Weight::from_parts(4_120_442, 0) - // Standard Error: 3_278 - .saturating_add(Weight::from_parts(212_768, 0).saturating_mul(t.into())) - // Standard Error: 33 - .saturating_add(Weight::from_parts(1_199, 0).saturating_mul(n.into())) + // Minimum execution time: 3_968_000 picoseconds. + Weight::from_parts(3_902_423, 0) + // Standard Error: 2_379 + .saturating_add(Weight::from_parts(199_019, 0).saturating_mul(t.into())) + // Standard Error: 24 + .saturating_add(Weight::from_parts(945, 0).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn get_storage_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `680` - // Estimated: `680` - // Minimum execution time: 11_065_000 picoseconds. - Weight::from_parts(11_573_000, 680) + // Measured: `584` + // Estimated: `584` + // Minimum execution time: 5_980_000 picoseconds. + Weight::from_parts(6_250_000, 584) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn get_storage_full() -> Weight { // Proof Size summary in bytes: - // Measured: `10690` - // Estimated: `10690` - // Minimum execution time: 42_728_000 picoseconds. - Weight::from_parts(43_764_000, 10690) + // Measured: `10594` + // Estimated: `10594` + // Minimum execution time: 39_415_000 picoseconds. + Weight::from_parts(40_109_000, 10594) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_storage_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `680` - // Estimated: `680` - // Minimum execution time: 12_376_000 picoseconds. - Weight::from_parts(12_658_000, 680) + // Measured: `584` + // Estimated: `584` + // Minimum execution time: 6_844_000 picoseconds. + Weight::from_parts(7_017_000, 584) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -686,10 +702,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_storage_full() -> Weight { // Proof Size summary in bytes: - // Measured: `10690` - // Estimated: `10690` - // Minimum execution time: 44_344_000 picoseconds. - Weight::from_parts(45_753_000, 10690) + // Measured: `10594` + // Estimated: `10594` + // Minimum execution time: 39_496_000 picoseconds. + Weight::from_parts(41_428_000, 10594) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -699,14 +715,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `o` is `[0, 448]`. fn seal_set_storage(n: u32, o: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `248 + o * (1 ±0)` - // Estimated: `247 + o * (1 ±0)` - // Minimum execution time: 9_333_000 picoseconds. - Weight::from_parts(12_118_514, 247) - // Standard Error: 187 - .saturating_add(Weight::from_parts(1_212, 0).saturating_mul(n.into())) - // Standard Error: 187 - .saturating_add(Weight::from_parts(3_114, 0).saturating_mul(o.into())) + // Measured: `152 + o * (1 ±0)` + // Estimated: `151 + o * (1 ±0)` + // Minimum execution time: 6_400_000 picoseconds. + Weight::from_parts(7_358_548, 151) + // Standard Error: 67 + .saturating_add(Weight::from_parts(659, 0).saturating_mul(n.into())) + // Standard Error: 67 + .saturating_add(Weight::from_parts(1_273, 0).saturating_mul(o.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) @@ -716,12 +732,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 448]`. fn seal_clear_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `248 + n * (1 ±0)` - // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 8_800_000 picoseconds. - Weight::from_parts(12_126_263, 247) - // Standard Error: 310 - .saturating_add(Weight::from_parts(4_181, 0).saturating_mul(n.into())) + // Measured: `152 + n * (1 ±0)` + // Estimated: `151 + n * (1 ±0)` + // Minimum execution time: 6_090_000 picoseconds. + Weight::from_parts(7_308_548, 151) + // Standard Error: 113 + .saturating_add(Weight::from_parts(1_456, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -731,12 +747,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 448]`. fn seal_get_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `248 + n * (1 ±0)` - // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 8_612_000 picoseconds. - Weight::from_parts(11_888_491, 247) - // Standard Error: 322 - .saturating_add(Weight::from_parts(4_319, 0).saturating_mul(n.into())) + // Measured: `152 + n * (1 ±0)` + // Estimated: `151 + n * (1 ±0)` + // Minimum execution time: 5_649_000 picoseconds. + Weight::from_parts(7_096_122, 151) + // Standard Error: 120 + .saturating_add(Weight::from_parts(2_127, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -745,12 +761,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 448]`. fn seal_contains_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `248 + n * (1 ±0)` - // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 8_112_000 picoseconds. - Weight::from_parts(11_160_688, 247) - // Standard Error: 297 - .saturating_add(Weight::from_parts(4_056, 0).saturating_mul(n.into())) + // Measured: `152 + n * (1 ±0)` + // Estimated: `151 + n * (1 ±0)` + // Minimum execution time: 5_261_000 picoseconds. + Weight::from_parts(6_552_943, 151) + // Standard Error: 117 + .saturating_add(Weight::from_parts(1_585, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -759,12 +775,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 448]`. fn seal_take_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `248 + n * (1 ±0)` - // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 9_419_000 picoseconds. - Weight::from_parts(12_683_269, 247) - // Standard Error: 298 - .saturating_add(Weight::from_parts(4_848, 0).saturating_mul(n.into())) + // Measured: `152 + n * (1 ±0)` + // Estimated: `151 + n * (1 ±0)` + // Minimum execution time: 6_374_000 picoseconds. + Weight::from_parts(7_739_700, 151) + // Standard Error: 122 + .saturating_add(Weight::from_parts(2_264, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -773,36 +789,36 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_535_000 picoseconds. - Weight::from_parts(1_637_000, 0) + // Minimum execution time: 1_371_000 picoseconds. + Weight::from_parts(1_446_000, 0) } fn set_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_891_000 picoseconds. - Weight::from_parts(1_970_000, 0) + // Minimum execution time: 1_663_000 picoseconds. + Weight::from_parts(1_786_000, 0) } fn get_transient_storage_empty() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_442_000 picoseconds. - Weight::from_parts(1_595_000, 0) + // Minimum execution time: 1_352_000 picoseconds. + Weight::from_parts(1_425_000, 0) } fn get_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_690_000 picoseconds. - Weight::from_parts(1_781_000, 0) + // Minimum execution time: 1_499_000 picoseconds. + Weight::from_parts(1_569_000, 0) } fn rollback_transient_storage() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_364_000 picoseconds. - Weight::from_parts(1_408_000, 0) + // Minimum execution time: 1_038_000 picoseconds. + Weight::from_parts(1_091_000, 0) } /// The range of component `n` is `[0, 448]`. /// The range of component `o` is `[0, 448]`. @@ -810,50 +826,50 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_392_000 picoseconds. - Weight::from_parts(2_559_622, 0) - // Standard Error: 18 - .saturating_add(Weight::from_parts(194, 0).saturating_mul(n.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(319, 0).saturating_mul(o.into())) + // Minimum execution time: 2_108_000 picoseconds. + Weight::from_parts(2_300_363, 0) + // Standard Error: 13 + .saturating_add(Weight::from_parts(242, 0).saturating_mul(n.into())) + // Standard Error: 13 + .saturating_add(Weight::from_parts(374, 0).saturating_mul(o.into())) } /// The range of component `n` is `[0, 448]`. fn seal_clear_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_099_000 picoseconds. - Weight::from_parts(2_442_655, 0) - // Standard Error: 19 - .saturating_add(Weight::from_parts(361, 0).saturating_mul(n.into())) + // Minimum execution time: 1_822_000 picoseconds. + Weight::from_parts(2_150_092, 0) + // Standard Error: 16 + .saturating_add(Weight::from_parts(394, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 448]`. fn seal_get_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_936_000 picoseconds. - Weight::from_parts(2_160_919, 0) - // Standard Error: 20 - .saturating_add(Weight::from_parts(385, 0).saturating_mul(n.into())) + // Minimum execution time: 1_675_000 picoseconds. + Weight::from_parts(1_873_341, 0) + // Standard Error: 12 + .saturating_add(Weight::from_parts(273, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 448]`. fn seal_contains_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_809_000 picoseconds. - Weight::from_parts(1_997_103, 0) - // Standard Error: 16 - .saturating_add(Weight::from_parts(156, 0).saturating_mul(n.into())) + // Minimum execution time: 1_555_000 picoseconds. + Weight::from_parts(1_690_236, 0) + // Standard Error: 10 + .saturating_add(Weight::from_parts(185, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 448]`. fn seal_take_transient_storage(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_513_000 picoseconds. - Weight::from_parts(2_799_538, 0) + // Minimum execution time: 2_278_000 picoseconds. + Weight::from_parts(2_522_598, 0) } /// Storage: `Revive::AddressSuffix` (r:1 w:0) /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) @@ -869,18 +885,18 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 262144]`. fn seal_call(t: u32, i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1294 + t * (205 ±0)` - // Estimated: `4759 + t * (2482 ±0)` - // Minimum execution time: 36_919_000 picoseconds. - Weight::from_parts(37_978_283, 4759) - // Standard Error: 54_576 - .saturating_add(Weight::from_parts(5_559_261, 0).saturating_mul(t.into())) + // Measured: `1164 + t * (206 ±0)` + // Estimated: `4629 + t * (2417 ±0)` + // Minimum execution time: 30_631_000 picoseconds. + Weight::from_parts(31_328_855, 4629) + // Standard Error: 36_031 + .saturating_add(Weight::from_parts(5_665_922, 0).saturating_mul(t.into())) // Standard Error: 0 .saturating_add(Weight::from_parts(2, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 2482).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 2417).saturating_mul(t.into())) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) @@ -890,10 +906,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) fn seal_delegate_call() -> Weight { // Proof Size summary in bytes: - // Measured: `1237` - // Estimated: `4702` - // Minimum execution time: 31_267_000 picoseconds. - Weight::from_parts(32_495_000, 4702) + // Measured: `1109` + // Estimated: `4574` + // Minimum execution time: 25_423_000 picoseconds. + Weight::from_parts(25_957_000, 4574) .saturating_add(T::DbWeight::get().reads(3_u64)) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) @@ -907,12 +923,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 262144]`. fn seal_instantiate(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1272` - // Estimated: `4724` - // Minimum execution time: 119_000_000 picoseconds. - Weight::from_parts(110_163_800, 4724) - // Standard Error: 11 - .saturating_add(Weight::from_parts(4_063, 0).saturating_mul(i.into())) + // Measured: `1093` + // Estimated: `4571` + // Minimum execution time: 108_874_000 picoseconds. + Weight::from_parts(98_900_023, 4571) + // Standard Error: 10 + .saturating_add(Weight::from_parts(4_183, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -921,73 +937,73 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 725_000 picoseconds. - Weight::from_parts(4_441_443, 0) + // Minimum execution time: 627_000 picoseconds. + Weight::from_parts(3_385_445, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(1_384, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_419, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_057_000 picoseconds. - Weight::from_parts(5_659_277, 0) + // Minimum execution time: 1_035_000 picoseconds. + Weight::from_parts(3_723_700, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(3_588, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(3_637, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 691_000 picoseconds. - Weight::from_parts(3_368_834, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_507, 0).saturating_mul(n.into())) + // Minimum execution time: 626_000 picoseconds. + Weight::from_parts(2_822_237, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_552, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 619_000 picoseconds. - Weight::from_parts(2_422_606, 0) + // Minimum execution time: 554_000 picoseconds. + Weight::from_parts(3_287_817, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(1_528, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_542, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 261889]`. fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 46_148_000 picoseconds. - Weight::from_parts(35_311_479, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(5_452, 0).saturating_mul(n.into())) + // Minimum execution time: 42_532_000 picoseconds. + Weight::from_parts(27_976_517, 0) + // Standard Error: 11 + .saturating_add(Weight::from_parts(5_453, 0).saturating_mul(n.into())) } fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 49_475_000 picoseconds. - Weight::from_parts(50_488_000, 0) + // Minimum execution time: 45_970_000 picoseconds. + Weight::from_parts(47_747_000, 0) } fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_516_000 picoseconds. - Weight::from_parts(12_637_000, 0) + // Minimum execution time: 12_550_000 picoseconds. + Weight::from_parts(12_706_000, 0) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) fn seal_set_code_hash() -> Weight { // Proof Size summary in bytes: - // Measured: `300` - // Estimated: `3765` - // Minimum execution time: 13_735_000 picoseconds. - Weight::from_parts(14_450_000, 3765) + // Measured: `196` + // Estimated: `3661` + // Minimum execution time: 10_229_000 picoseconds. + Weight::from_parts(10_530_000, 3661) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -995,10 +1011,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) fn lock_delegate_dependency() -> Weight { // Proof Size summary in bytes: - // Measured: `337` - // Estimated: `3802` - // Minimum execution time: 13_488_000 picoseconds. - Weight::from_parts(14_161_000, 3802) + // Measured: `234` + // Estimated: `3699` + // Minimum execution time: 9_743_000 picoseconds. + Weight::from_parts(10_180_000, 3699) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -1006,10 +1022,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `MaxEncodedLen`) fn unlock_delegate_dependency() -> Weight { // Proof Size summary in bytes: - // Measured: `337` + // Measured: `234` // Estimated: `3561` - // Minimum execution time: 12_686_000 picoseconds. - Weight::from_parts(13_180_000, 3561) + // Minimum execution time: 8_717_000 picoseconds. + Weight::from_parts(9_129_000, 3561) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -1018,10 +1034,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_475_000 picoseconds. - Weight::from_parts(10_353_864, 0) - // Standard Error: 99 - .saturating_add(Weight::from_parts(73_636, 0).saturating_mul(r.into())) + // Minimum execution time: 8_332_000 picoseconds. + Weight::from_parts(9_985_610, 0) + // Standard Error: 187 + .saturating_add(Weight::from_parts(73_915, 0).saturating_mul(r.into())) } } @@ -1031,10 +1047,10 @@ impl WeightInfo for () { /// Proof: `Revive::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) fn on_process_deletion_queue_batch() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `1594` - // Minimum execution time: 2_796_000 picoseconds. - Weight::from_parts(2_958_000, 1594) + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 690_000 picoseconds. + Weight::from_parts(743_000, 1485) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1042,12 +1058,12 @@ impl WeightInfo for () { /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `425 + k * (69 ±0)` - // Estimated: `415 + k * (70 ±0)` - // Minimum execution time: 16_135_000 picoseconds. - Weight::from_parts(3_227_098, 415) - // Standard Error: 1_106 - .saturating_add(Weight::from_parts(1_175_210, 0).saturating_mul(k.into())) + // Measured: `230 + k * (69 ±0)` + // Estimated: `222 + k * (70 ±0)` + // Minimum execution time: 10_913_000 picoseconds. + Weight::from_parts(11_048_000, 222) + // Standard Error: 972 + .saturating_add(Weight::from_parts(1_172_318, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -1067,12 +1083,14 @@ impl WeightInfo for () { /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) /// The range of component `c` is `[0, 262144]`. - fn call_with_code_per_byte(_c: u32, ) -> Weight { + fn call_with_code_per_byte(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1502` - // Estimated: `7442` - // Minimum execution time: 89_144_000 picoseconds. - Weight::from_parts(93_719_381, 7442) + // Measured: `1195` + // Estimated: `7135` + // Minimum execution time: 83_080_000 picoseconds. + Weight::from_parts(89_270_264, 7135) + // Standard Error: 0 + .saturating_add(Weight::from_parts(3, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1092,16 +1110,14 @@ impl WeightInfo for () { /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) /// The range of component `c` is `[0, 262144]`. /// The range of component `i` is `[0, 262144]`. - fn instantiate_with_code(c: u32, i: u32, ) -> Weight { + fn instantiate_with_code(_c: u32, i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `401` - // Estimated: `6349` - // Minimum execution time: 185_726_000 picoseconds. - Weight::from_parts(165_030_228, 6349) - // Standard Error: 10 - .saturating_add(Weight::from_parts(10, 0).saturating_mul(c.into())) - // Standard Error: 10 - .saturating_add(Weight::from_parts(4_453, 0).saturating_mul(i.into())) + // Measured: `93` + // Estimated: `6033` + // Minimum execution time: 171_761_000 picoseconds. + Weight::from_parts(158_031_807, 6033) + // Standard Error: 11 + .saturating_add(Weight::from_parts(4_536, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -1122,12 +1138,12 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 262144]`. fn instantiate(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1294` - // Estimated: `4739` - // Minimum execution time: 154_669_000 picoseconds. - Weight::from_parts(138_463_785, 4739) + // Measured: `987` + // Estimated: `4452` + // Minimum execution time: 143_210_000 picoseconds. + Weight::from_parts(121_908_111, 4452) // Standard Error: 15 - .saturating_add(Weight::from_parts(4_389, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(4_467, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -1145,10 +1161,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) fn call() -> Weight { // Proof Size summary in bytes: - // Measured: `1502` - // Estimated: `7442` - // Minimum execution time: 137_822_000 picoseconds. - Weight::from_parts(146_004_000, 7442) + // Measured: `1195` + // Estimated: `7135` + // Minimum execution time: 136_689_000 picoseconds. + Weight::from_parts(145_358_000, 7135) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1159,12 +1175,14 @@ impl WeightInfo for () { /// Storage: `Revive::PristineCode` (r:0 w:1) /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) /// The range of component `c` is `[0, 262144]`. - fn upload_code(_c: u32, ) -> Weight { + fn upload_code(c: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `164` - // Estimated: `3629` - // Minimum execution time: 53_476_000 picoseconds. - Weight::from_parts(55_795_699, 3629) + // Measured: `0` + // Estimated: `3465` + // Minimum execution time: 43_351_000 picoseconds. + Weight::from_parts(44_896_319, 3465) + // Standard Error: 0 + .saturating_add(Weight::from_parts(1, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1176,10 +1194,10 @@ impl WeightInfo for () { /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) fn remove_code() -> Weight { // Proof Size summary in bytes: - // Measured: `322` - // Estimated: `3787` - // Minimum execution time: 41_955_000 picoseconds. - Weight::from_parts(43_749_000, 3787) + // Measured: `181` + // Estimated: `3646` + // Minimum execution time: 36_034_000 picoseconds. + Weight::from_parts(36_595_000, 3646) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1189,10 +1207,10 @@ impl WeightInfo for () { /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) fn set_code() -> Weight { // Proof Size summary in bytes: - // Measured: `529` - // Estimated: `6469` - // Minimum execution time: 22_763_000 picoseconds. - Weight::from_parts(23_219_000, 6469) + // Measured: `425` + // Estimated: `6365` + // Minimum execution time: 19_484_000 picoseconds. + Weight::from_parts(20_104_000, 6365) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1202,10 +1220,10 @@ impl WeightInfo for () { /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(409), added: 2884, mode: `Measured`) fn map_account() -> Weight { // Proof Size summary in bytes: - // Measured: `164` - // Estimated: `3629` - // Minimum execution time: 45_478_000 picoseconds. - Weight::from_parts(46_658_000, 3629) + // Measured: `0` + // Estimated: `3465` + // Minimum execution time: 37_066_000 picoseconds. + Weight::from_parts(37_646_000, 3465) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1215,10 +1233,10 @@ impl WeightInfo for () { /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) fn unmap_account() -> Weight { // Proof Size summary in bytes: - // Measured: `93` - // Estimated: `3558` - // Minimum execution time: 33_359_000 picoseconds. - Weight::from_parts(34_196_000, 3558) + // Measured: `56` + // Estimated: `3521` + // Minimum execution time: 31_604_000 picoseconds. + Weight::from_parts(32_557_000, 3521) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1228,10 +1246,10 @@ impl WeightInfo for () { /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `Measured`) fn dispatch_as_fallback_account() -> Weight { // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 13_663_000 picoseconds. - Weight::from_parts(14_278_000, 3610) + // Measured: `0` + // Estimated: `3465` + // Minimum execution time: 6_070_000 picoseconds. + Weight::from_parts(6_246_000, 3465) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// The range of component `r` is `[0, 1600]`. @@ -1239,61 +1257,61 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_966_000 picoseconds. - Weight::from_parts(7_708_050, 0) - // Standard Error: 238 - .saturating_add(Weight::from_parts(167_115, 0).saturating_mul(r.into())) + // Minimum execution time: 6_471_000 picoseconds. + Weight::from_parts(7_724_355, 0) + // Standard Error: 245 + .saturating_add(Weight::from_parts(165_331, 0).saturating_mul(r.into())) } fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 332_000 picoseconds. - Weight::from_parts(378_000, 0) + // Minimum execution time: 239_000 picoseconds. + Weight::from_parts(278_000, 0) } fn seal_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 303_000 picoseconds. - Weight::from_parts(329_000, 0) + // Minimum execution time: 234_000 picoseconds. + Weight::from_parts(264_000, 0) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) fn seal_is_contract() -> Weight { // Proof Size summary in bytes: - // Measured: `306` - // Estimated: `3771` - // Minimum execution time: 10_014_000 picoseconds. - Weight::from_parts(10_549_000, 3771) + // Measured: `202` + // Estimated: `3667` + // Minimum execution time: 6_508_000 picoseconds. + Weight::from_parts(6_715_000, 3667) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Revive::AddressSuffix` (r:1 w:0) /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) fn seal_to_account_id() -> Weight { // Proof Size summary in bytes: - // Measured: `248` - // Estimated: `3713` - // Minimum execution time: 9_771_000 picoseconds. - Weight::from_parts(10_092_000, 3713) + // Measured: `144` + // Estimated: `3609` + // Minimum execution time: 6_190_000 picoseconds. + Weight::from_parts(6_413_000, 3609) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) fn seal_code_hash() -> Weight { // Proof Size summary in bytes: - // Measured: `403` - // Estimated: `3868` - // Minimum execution time: 11_260_000 picoseconds. - Weight::from_parts(11_626_000, 3868) + // Measured: `299` + // Estimated: `3764` + // Minimum execution time: 7_547_000 picoseconds. + Weight::from_parts(7_742_000, 3764) .saturating_add(RocksDbWeight::get().reads(1_u64)) } fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 307_000 picoseconds. - Weight::from_parts(328_000, 0) + // Minimum execution time: 251_000 picoseconds. + Weight::from_parts(274_000, 0) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) @@ -1301,53 +1319,53 @@ impl WeightInfo for () { /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) fn seal_code_size() -> Weight { // Proof Size summary in bytes: - // Measured: `473` - // Estimated: `3938` - // Minimum execution time: 14_675_000 picoseconds. - Weight::from_parts(15_168_000, 3938) + // Measured: `369` + // Estimated: `3834` + // Minimum execution time: 10_825_000 picoseconds. + Weight::from_parts(11_185_000, 3834) .saturating_add(RocksDbWeight::get().reads(2_u64)) } fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 332_000 picoseconds. - Weight::from_parts(357_000, 0) + // Minimum execution time: 325_000 picoseconds. + Weight::from_parts(352_000, 0) } fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 298_000 picoseconds. - Weight::from_parts(332_000, 0) + // Minimum execution time: 245_000 picoseconds. + Weight::from_parts(282_000, 0) } fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 313_000 picoseconds. - Weight::from_parts(336_000, 0) + // Minimum execution time: 251_000 picoseconds. + Weight::from_parts(274_000, 0) } fn seal_weight_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 663_000 picoseconds. - Weight::from_parts(730_000, 0) + // Minimum execution time: 599_000 picoseconds. + Weight::from_parts(675_000, 0) } fn seal_ref_time_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 292_000 picoseconds. - Weight::from_parts(344_000, 0) + // Minimum execution time: 245_000 picoseconds. + Weight::from_parts(263_000, 0) } fn seal_balance() -> Weight { // Proof Size summary in bytes: // Measured: `102` // Estimated: `0` - // Minimum execution time: 4_604_000 picoseconds. - Weight::from_parts(4_875_000, 0) + // Minimum execution time: 4_613_000 picoseconds. + Weight::from_parts(4_768_000, 0) } /// Storage: `Revive::AddressSuffix` (r:1 w:0) /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) @@ -1355,10 +1373,10 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `Measured`) fn seal_balance_of() -> Weight { // Proof Size summary in bytes: - // Measured: `264` - // Estimated: `3729` - // Minimum execution time: 12_252_000 picoseconds. - Weight::from_parts(12_641_000, 3729) + // Measured: `160` + // Estimated: `3625` + // Minimum execution time: 8_513_000 picoseconds. + Weight::from_parts(8_765_000, 3625) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `Revive::ImmutableDataOf` (r:1 w:0) @@ -1366,12 +1384,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 4096]`. fn seal_get_immutable_data(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `238 + n * (1 ±0)` - // Estimated: `3703 + n * (1 ±0)` - // Minimum execution time: 6_005_000 picoseconds. - Weight::from_parts(9_550_692, 3703) - // Standard Error: 18 - .saturating_add(Weight::from_parts(710, 0).saturating_mul(n.into())) + // Measured: `134 + n * (1 ±0)` + // Estimated: `3599 + n * (1 ±0)` + // Minimum execution time: 4_870_000 picoseconds. + Weight::from_parts(6_309_018, 3599) + // Standard Error: 7 + .saturating_add(Weight::from_parts(645, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1382,128 +1400,138 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_981_000 picoseconds. - Weight::from_parts(2_297_488, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(528, 0).saturating_mul(n.into())) + // Minimum execution time: 1_754_000 picoseconds. + Weight::from_parts(1_939_099, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(581, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 279_000 picoseconds. - Weight::from_parts(309_000, 0) + // Minimum execution time: 243_000 picoseconds. + Weight::from_parts(292_000, 0) } fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 289_000 picoseconds. - Weight::from_parts(315_000, 0) + // Minimum execution time: 254_000 picoseconds. + Weight::from_parts(284_000, 0) } fn seal_return_data_size() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 253_000 picoseconds. - Weight::from_parts(310_000, 0) + // Minimum execution time: 242_000 picoseconds. + Weight::from_parts(257_000, 0) } fn seal_call_data_size() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 291_000 picoseconds. - Weight::from_parts(338_000, 0) + // Minimum execution time: 241_000 picoseconds. + Weight::from_parts(261_000, 0) } fn seal_gas_limit() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 266_000 picoseconds. - Weight::from_parts(331_000, 0) + // Minimum execution time: 265_000 picoseconds. + Weight::from_parts(290_000, 0) } fn seal_gas_price() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 250_000 picoseconds. - Weight::from_parts(314_000, 0) + // Minimum execution time: 225_000 picoseconds. + Weight::from_parts(249_000, 0) } fn seal_base_fee() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 266_000 picoseconds. - Weight::from_parts(341_000, 0) + // Minimum execution time: 246_000 picoseconds. + Weight::from_parts(266_000, 0) } fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 281_000 picoseconds. - Weight::from_parts(314_000, 0) + // Minimum execution time: 247_000 picoseconds. + Weight::from_parts(267_000, 0) + } + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn seal_block_author() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 13_503_000 picoseconds. + Weight::from_parts(13_907_000, 1485) + .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `System::BlockHash` (r:1 w:0) /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) fn seal_block_hash() -> Weight { // Proof Size summary in bytes: - // Measured: `30` - // Estimated: `3495` - // Minimum execution time: 3_557_000 picoseconds. - Weight::from_parts(3_816_000, 3495) + // Measured: `0` + // Estimated: `3465` + // Minimum execution time: 2_251_000 picoseconds. + Weight::from_parts(2_370_000, 3465) .saturating_add(RocksDbWeight::get().reads(1_u64)) } fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 280_000 picoseconds. - Weight::from_parts(316_000, 0) + // Minimum execution time: 237_000 picoseconds. + Weight::from_parts(264_000, 0) } fn seal_weight_to_fee() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_413_000 picoseconds. - Weight::from_parts(1_477_000, 0) + // Minimum execution time: 1_238_000 picoseconds. + Weight::from_parts(1_311_000, 0) } /// The range of component `n` is `[0, 262140]`. fn seal_copy_to_contract(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 383_000 picoseconds. - Weight::from_parts(602_481, 0) + // Minimum execution time: 380_000 picoseconds. + Weight::from_parts(524_789, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(201, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(237, 0).saturating_mul(n.into())) } fn seal_call_data_load() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 327_000 picoseconds. - Weight::from_parts(365_000, 0) + // Minimum execution time: 248_000 picoseconds. + Weight::from_parts(267_000, 0) } /// The range of component `n` is `[0, 262144]`. fn seal_call_data_copy(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 334_000 picoseconds. - Weight::from_parts(205_756, 0) + // Minimum execution time: 230_000 picoseconds. + Weight::from_parts(207_234, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(116, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(150, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262140]`. fn seal_return(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 278_000 picoseconds. - Weight::from_parts(611_031, 0) + // Minimum execution time: 267_000 picoseconds. + Weight::from_parts(357_669, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(202, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(238, 0).saturating_mul(n.into())) } /// Storage: `Revive::AddressSuffix` (r:1 w:0) /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) @@ -1518,12 +1546,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 32]`. fn seal_terminate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `324 + n * (88 ±0)` - // Estimated: `3791 + n * (2563 ±0)` - // Minimum execution time: 18_544_000 picoseconds. - Weight::from_parts(18_412_253, 3791) - // Standard Error: 12_785 - .saturating_add(Weight::from_parts(4_214_449, 0).saturating_mul(n.into())) + // Measured: `218 + n * (88 ±0)` + // Estimated: `3684 + n * (2563 ±0)` + // Minimum execution time: 14_314_000 picoseconds. + Weight::from_parts(15_353_516, 3684) + // Standard Error: 10_720 + .saturating_add(Weight::from_parts(4_159_489, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -1536,41 +1564,41 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_156_000 picoseconds. - Weight::from_parts(4_120_442, 0) - // Standard Error: 3_278 - .saturating_add(Weight::from_parts(212_768, 0).saturating_mul(t.into())) - // Standard Error: 33 - .saturating_add(Weight::from_parts(1_199, 0).saturating_mul(n.into())) + // Minimum execution time: 3_968_000 picoseconds. + Weight::from_parts(3_902_423, 0) + // Standard Error: 2_379 + .saturating_add(Weight::from_parts(199_019, 0).saturating_mul(t.into())) + // Standard Error: 24 + .saturating_add(Weight::from_parts(945, 0).saturating_mul(n.into())) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn get_storage_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `680` - // Estimated: `680` - // Minimum execution time: 11_065_000 picoseconds. - Weight::from_parts(11_573_000, 680) + // Measured: `584` + // Estimated: `584` + // Minimum execution time: 5_980_000 picoseconds. + Weight::from_parts(6_250_000, 584) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn get_storage_full() -> Weight { // Proof Size summary in bytes: - // Measured: `10690` - // Estimated: `10690` - // Minimum execution time: 42_728_000 picoseconds. - Weight::from_parts(43_764_000, 10690) + // Measured: `10594` + // Estimated: `10594` + // Minimum execution time: 39_415_000 picoseconds. + Weight::from_parts(40_109_000, 10594) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_storage_empty() -> Weight { // Proof Size summary in bytes: - // Measured: `680` - // Estimated: `680` - // Minimum execution time: 12_376_000 picoseconds. - Weight::from_parts(12_658_000, 680) + // Measured: `584` + // Estimated: `584` + // Minimum execution time: 6_844_000 picoseconds. + Weight::from_parts(7_017_000, 584) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1578,10 +1606,10 @@ impl WeightInfo for () { /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) fn set_storage_full() -> Weight { // Proof Size summary in bytes: - // Measured: `10690` - // Estimated: `10690` - // Minimum execution time: 44_344_000 picoseconds. - Weight::from_parts(45_753_000, 10690) + // Measured: `10594` + // Estimated: `10594` + // Minimum execution time: 39_496_000 picoseconds. + Weight::from_parts(41_428_000, 10594) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1591,14 +1619,14 @@ impl WeightInfo for () { /// The range of component `o` is `[0, 448]`. fn seal_set_storage(n: u32, o: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `248 + o * (1 ±0)` - // Estimated: `247 + o * (1 ±0)` - // Minimum execution time: 9_333_000 picoseconds. - Weight::from_parts(12_118_514, 247) - // Standard Error: 187 - .saturating_add(Weight::from_parts(1_212, 0).saturating_mul(n.into())) - // Standard Error: 187 - .saturating_add(Weight::from_parts(3_114, 0).saturating_mul(o.into())) + // Measured: `152 + o * (1 ±0)` + // Estimated: `151 + o * (1 ±0)` + // Minimum execution time: 6_400_000 picoseconds. + Weight::from_parts(7_358_548, 151) + // Standard Error: 67 + .saturating_add(Weight::from_parts(659, 0).saturating_mul(n.into())) + // Standard Error: 67 + .saturating_add(Weight::from_parts(1_273, 0).saturating_mul(o.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) @@ -1608,12 +1636,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 448]`. fn seal_clear_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `248 + n * (1 ±0)` - // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 8_800_000 picoseconds. - Weight::from_parts(12_126_263, 247) - // Standard Error: 310 - .saturating_add(Weight::from_parts(4_181, 0).saturating_mul(n.into())) + // Measured: `152 + n * (1 ±0)` + // Estimated: `151 + n * (1 ±0)` + // Minimum execution time: 6_090_000 picoseconds. + Weight::from_parts(7_308_548, 151) + // Standard Error: 113 + .saturating_add(Weight::from_parts(1_456, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1623,12 +1651,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 448]`. fn seal_get_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `248 + n * (1 ±0)` - // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 8_612_000 picoseconds. - Weight::from_parts(11_888_491, 247) - // Standard Error: 322 - .saturating_add(Weight::from_parts(4_319, 0).saturating_mul(n.into())) + // Measured: `152 + n * (1 ±0)` + // Estimated: `151 + n * (1 ±0)` + // Minimum execution time: 5_649_000 picoseconds. + Weight::from_parts(7_096_122, 151) + // Standard Error: 120 + .saturating_add(Weight::from_parts(2_127, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1637,12 +1665,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 448]`. fn seal_contains_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `248 + n * (1 ±0)` - // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 8_112_000 picoseconds. - Weight::from_parts(11_160_688, 247) - // Standard Error: 297 - .saturating_add(Weight::from_parts(4_056, 0).saturating_mul(n.into())) + // Measured: `152 + n * (1 ±0)` + // Estimated: `151 + n * (1 ±0)` + // Minimum execution time: 5_261_000 picoseconds. + Weight::from_parts(6_552_943, 151) + // Standard Error: 117 + .saturating_add(Weight::from_parts(1_585, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1651,12 +1679,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 448]`. fn seal_take_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `248 + n * (1 ±0)` - // Estimated: `247 + n * (1 ±0)` - // Minimum execution time: 9_419_000 picoseconds. - Weight::from_parts(12_683_269, 247) - // Standard Error: 298 - .saturating_add(Weight::from_parts(4_848, 0).saturating_mul(n.into())) + // Measured: `152 + n * (1 ±0)` + // Estimated: `151 + n * (1 ±0)` + // Minimum execution time: 6_374_000 picoseconds. + Weight::from_parts(7_739_700, 151) + // Standard Error: 122 + .saturating_add(Weight::from_parts(2_264, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1665,36 +1693,36 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_535_000 picoseconds. - Weight::from_parts(1_637_000, 0) + // Minimum execution time: 1_371_000 picoseconds. + Weight::from_parts(1_446_000, 0) } fn set_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_891_000 picoseconds. - Weight::from_parts(1_970_000, 0) + // Minimum execution time: 1_663_000 picoseconds. + Weight::from_parts(1_786_000, 0) } fn get_transient_storage_empty() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_442_000 picoseconds. - Weight::from_parts(1_595_000, 0) + // Minimum execution time: 1_352_000 picoseconds. + Weight::from_parts(1_425_000, 0) } fn get_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_690_000 picoseconds. - Weight::from_parts(1_781_000, 0) + // Minimum execution time: 1_499_000 picoseconds. + Weight::from_parts(1_569_000, 0) } fn rollback_transient_storage() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_364_000 picoseconds. - Weight::from_parts(1_408_000, 0) + // Minimum execution time: 1_038_000 picoseconds. + Weight::from_parts(1_091_000, 0) } /// The range of component `n` is `[0, 448]`. /// The range of component `o` is `[0, 448]`. @@ -1702,50 +1730,50 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_392_000 picoseconds. - Weight::from_parts(2_559_622, 0) - // Standard Error: 18 - .saturating_add(Weight::from_parts(194, 0).saturating_mul(n.into())) - // Standard Error: 18 - .saturating_add(Weight::from_parts(319, 0).saturating_mul(o.into())) + // Minimum execution time: 2_108_000 picoseconds. + Weight::from_parts(2_300_363, 0) + // Standard Error: 13 + .saturating_add(Weight::from_parts(242, 0).saturating_mul(n.into())) + // Standard Error: 13 + .saturating_add(Weight::from_parts(374, 0).saturating_mul(o.into())) } /// The range of component `n` is `[0, 448]`. fn seal_clear_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_099_000 picoseconds. - Weight::from_parts(2_442_655, 0) - // Standard Error: 19 - .saturating_add(Weight::from_parts(361, 0).saturating_mul(n.into())) + // Minimum execution time: 1_822_000 picoseconds. + Weight::from_parts(2_150_092, 0) + // Standard Error: 16 + .saturating_add(Weight::from_parts(394, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 448]`. fn seal_get_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_936_000 picoseconds. - Weight::from_parts(2_160_919, 0) - // Standard Error: 20 - .saturating_add(Weight::from_parts(385, 0).saturating_mul(n.into())) + // Minimum execution time: 1_675_000 picoseconds. + Weight::from_parts(1_873_341, 0) + // Standard Error: 12 + .saturating_add(Weight::from_parts(273, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 448]`. fn seal_contains_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_809_000 picoseconds. - Weight::from_parts(1_997_103, 0) - // Standard Error: 16 - .saturating_add(Weight::from_parts(156, 0).saturating_mul(n.into())) + // Minimum execution time: 1_555_000 picoseconds. + Weight::from_parts(1_690_236, 0) + // Standard Error: 10 + .saturating_add(Weight::from_parts(185, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 448]`. fn seal_take_transient_storage(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_513_000 picoseconds. - Weight::from_parts(2_799_538, 0) + // Minimum execution time: 2_278_000 picoseconds. + Weight::from_parts(2_522_598, 0) } /// Storage: `Revive::AddressSuffix` (r:1 w:0) /// Proof: `Revive::AddressSuffix` (`max_values`: None, `max_size`: Some(32), added: 2507, mode: `Measured`) @@ -1761,18 +1789,18 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 262144]`. fn seal_call(t: u32, i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1294 + t * (205 ±0)` - // Estimated: `4759 + t * (2482 ±0)` - // Minimum execution time: 36_919_000 picoseconds. - Weight::from_parts(37_978_283, 4759) - // Standard Error: 54_576 - .saturating_add(Weight::from_parts(5_559_261, 0).saturating_mul(t.into())) + // Measured: `1164 + t * (206 ±0)` + // Estimated: `4629 + t * (2417 ±0)` + // Minimum execution time: 30_631_000 picoseconds. + Weight::from_parts(31_328_855, 4629) + // Standard Error: 36_031 + .saturating_add(Weight::from_parts(5_665_922, 0).saturating_mul(t.into())) // Standard Error: 0 .saturating_add(Weight::from_parts(2, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes(1_u64)) - .saturating_add(Weight::from_parts(0, 2482).saturating_mul(t.into())) + .saturating_add(Weight::from_parts(0, 2417).saturating_mul(t.into())) } /// Storage: `Revive::ContractInfoOf` (r:1 w:0) /// Proof: `Revive::ContractInfoOf` (`max_values`: None, `max_size`: Some(1779), added: 4254, mode: `Measured`) @@ -1782,10 +1810,10 @@ impl WeightInfo for () { /// Proof: `Revive::PristineCode` (`max_values`: None, `max_size`: Some(262180), added: 264655, mode: `Measured`) fn seal_delegate_call() -> Weight { // Proof Size summary in bytes: - // Measured: `1237` - // Estimated: `4702` - // Minimum execution time: 31_267_000 picoseconds. - Weight::from_parts(32_495_000, 4702) + // Measured: `1109` + // Estimated: `4574` + // Minimum execution time: 25_423_000 picoseconds. + Weight::from_parts(25_957_000, 4574) .saturating_add(RocksDbWeight::get().reads(3_u64)) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) @@ -1799,12 +1827,12 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 262144]`. fn seal_instantiate(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1272` - // Estimated: `4724` - // Minimum execution time: 119_000_000 picoseconds. - Weight::from_parts(110_163_800, 4724) - // Standard Error: 11 - .saturating_add(Weight::from_parts(4_063, 0).saturating_mul(i.into())) + // Measured: `1093` + // Estimated: `4571` + // Minimum execution time: 108_874_000 picoseconds. + Weight::from_parts(98_900_023, 4571) + // Standard Error: 10 + .saturating_add(Weight::from_parts(4_183, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1813,73 +1841,73 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 725_000 picoseconds. - Weight::from_parts(4_441_443, 0) + // Minimum execution time: 627_000 picoseconds. + Weight::from_parts(3_385_445, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(1_384, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_419, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_057_000 picoseconds. - Weight::from_parts(5_659_277, 0) + // Minimum execution time: 1_035_000 picoseconds. + Weight::from_parts(3_723_700, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(3_588, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(3_637, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 691_000 picoseconds. - Weight::from_parts(3_368_834, 0) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_507, 0).saturating_mul(n.into())) + // Minimum execution time: 626_000 picoseconds. + Weight::from_parts(2_822_237, 0) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_552, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 262144]`. fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 619_000 picoseconds. - Weight::from_parts(2_422_606, 0) + // Minimum execution time: 554_000 picoseconds. + Weight::from_parts(3_287_817, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(1_528, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_542, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 261889]`. fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 46_148_000 picoseconds. - Weight::from_parts(35_311_479, 0) - // Standard Error: 10 - .saturating_add(Weight::from_parts(5_452, 0).saturating_mul(n.into())) + // Minimum execution time: 42_532_000 picoseconds. + Weight::from_parts(27_976_517, 0) + // Standard Error: 11 + .saturating_add(Weight::from_parts(5_453, 0).saturating_mul(n.into())) } fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 49_475_000 picoseconds. - Weight::from_parts(50_488_000, 0) + // Minimum execution time: 45_970_000 picoseconds. + Weight::from_parts(47_747_000, 0) } fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_516_000 picoseconds. - Weight::from_parts(12_637_000, 0) + // Minimum execution time: 12_550_000 picoseconds. + Weight::from_parts(12_706_000, 0) } /// Storage: `Revive::CodeInfoOf` (r:1 w:1) /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) fn seal_set_code_hash() -> Weight { // Proof Size summary in bytes: - // Measured: `300` - // Estimated: `3765` - // Minimum execution time: 13_735_000 picoseconds. - Weight::from_parts(14_450_000, 3765) + // Measured: `196` + // Estimated: `3661` + // Minimum execution time: 10_229_000 picoseconds. + Weight::from_parts(10_530_000, 3661) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1887,10 +1915,10 @@ impl WeightInfo for () { /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `Measured`) fn lock_delegate_dependency() -> Weight { // Proof Size summary in bytes: - // Measured: `337` - // Estimated: `3802` - // Minimum execution time: 13_488_000 picoseconds. - Weight::from_parts(14_161_000, 3802) + // Measured: `234` + // Estimated: `3699` + // Minimum execution time: 9_743_000 picoseconds. + Weight::from_parts(10_180_000, 3699) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1898,10 +1926,10 @@ impl WeightInfo for () { /// Proof: `Revive::CodeInfoOf` (`max_values`: None, `max_size`: Some(96), added: 2571, mode: `MaxEncodedLen`) fn unlock_delegate_dependency() -> Weight { // Proof Size summary in bytes: - // Measured: `337` + // Measured: `234` // Estimated: `3561` - // Minimum execution time: 12_686_000 picoseconds. - Weight::from_parts(13_180_000, 3561) + // Minimum execution time: 8_717_000 picoseconds. + Weight::from_parts(9_129_000, 3561) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1910,9 +1938,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_475_000 picoseconds. - Weight::from_parts(10_353_864, 0) - // Standard Error: 99 - .saturating_add(Weight::from_parts(73_636, 0).saturating_mul(r.into())) + // Minimum execution time: 8_332_000 picoseconds. + Weight::from_parts(9_985_610, 0) + // Standard Error: 187 + .saturating_add(Weight::from_parts(73_915, 0).saturating_mul(r.into())) } } diff --git a/substrate/frame/revive/uapi/src/host.rs b/substrate/frame/revive/uapi/src/host.rs index 2d7c73d26192e..08806f11c9c50 100644 --- a/substrate/frame/revive/uapi/src/host.rs +++ b/substrate/frame/revive/uapi/src/host.rs @@ -397,6 +397,13 @@ pub trait HostFn: private::Sealed { /// Returns the amount of ref_time left. fn ref_time_left() -> u64; + /// Stores the current block author of into the supplied buffer. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the block author. + fn block_author(output: &mut [u8; 20]); + /// Stores the current block number of the current contract into the supplied buffer. /// /// # Parameters diff --git a/substrate/frame/revive/uapi/src/host/riscv64.rs b/substrate/frame/revive/uapi/src/host/riscv64.rs index 8179ea890189b..620c2a9e1f5c9 100644 --- a/substrate/frame/revive/uapi/src/host/riscv64.rs +++ b/substrate/frame/revive/uapi/src/host/riscv64.rs @@ -120,6 +120,7 @@ mod sys { pub fn call_data_size() -> u64; pub fn block_number(out_ptr: *mut u8); pub fn block_hash(block_number_ptr: *const u8, out_ptr: *mut u8); + pub fn block_author(out_ptr: *mut u8); pub fn hash_sha2_256(input_ptr: *const u8, input_len: u32, out_ptr: *mut u8); pub fn hash_keccak_256(input_ptr: *const u8, input_len: u32, out_ptr: *mut u8); pub fn hash_blake2_256(input_ptr: *const u8, input_len: u32, out_ptr: *mut u8); @@ -394,6 +395,10 @@ impl HostFn for HostFnImpl { unsafe { sys::block_number(output.as_mut_ptr()) } } + fn block_author(output: &mut [u8; 20]) { + unsafe { sys::block_author(output.as_mut_ptr()) } + } + fn weight_to_fee(ref_time_limit: u64, proof_size_limit: u64, output: &mut [u8; 32]) { unsafe { sys::weight_to_fee(ref_time_limit, proof_size_limit, output.as_mut_ptr()) }; } diff --git a/substrate/frame/support/procedural/examples/proc_main/main.rs b/substrate/frame/support/procedural/examples/proc_main/main.rs index 4bdfc76dd92f0..946bd5ff03ed2 100644 --- a/substrate/frame/support/procedural/examples/proc_main/main.rs +++ b/substrate/frame/support/procedural/examples/proc_main/main.rs @@ -234,7 +234,8 @@ mod runtime { RuntimeHoldReason, RuntimeSlashReason, RuntimeLockId, - RuntimeTask + RuntimeTask, + RuntimeViewFunction )] pub struct Runtime; diff --git a/substrate/frame/support/procedural/examples/proc_main/runtime.rs b/substrate/frame/support/procedural/examples/proc_main/runtime.rs index 109ca4f6dc488..8de560555895b 100644 --- a/substrate/frame/support/procedural/examples/proc_main/runtime.rs +++ b/substrate/frame/support/procedural/examples/proc_main/runtime.rs @@ -99,7 +99,8 @@ mod runtime { RuntimeHoldReason, RuntimeSlashReason, RuntimeLockId, - RuntimeTask + RuntimeTask, + RuntimeViewFunction )] pub struct Runtime; diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs index f055e8ce28e90..411d74ecbb3d2 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs @@ -18,7 +18,6 @@ use crate::construct_runtime::Pallet; use proc_macro2::TokenStream; use quote::quote; -use std::str::FromStr; use syn::Ident; pub fn expand_outer_dispatch( @@ -40,15 +39,7 @@ pub fn expand_outer_dispatch( let name = &pallet_declaration.name; let path = &pallet_declaration.path; let index = pallet_declaration.index; - let attr = - pallet_declaration.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { - let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }); + let attr = pallet_declaration.get_attributes(); variant_defs.extend(quote! { #attr diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs index dbbe6ba6e6c32..7a51ba6ecf1da 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs @@ -19,7 +19,6 @@ use crate::construct_runtime::Pallet; use inflector::Inflector; use proc_macro2::TokenStream; use quote::{format_ident, quote, ToTokens}; -use std::str::FromStr; use syn::Ident; pub fn expand_outer_config( @@ -41,14 +40,7 @@ pub fn expand_outer_config( let field_name = &Ident::new(&pallet_name.to_string().to_snake_case(), decl.name.span()); let part_is_generic = !pallet_entry.generics.params.is_empty(); - let attr = &decl.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { - let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }); + let attr = &decl.get_attributes(); types.extend(expand_config_types(attr, runtime, decl, &config, part_is_generic)); fields.extend(quote!(#attr pub #field_name: #config,)); diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs index e34c6ac5016a9..e25492802c329 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs @@ -18,7 +18,6 @@ use crate::construct_runtime::Pallet; use proc_macro2::TokenStream; use quote::quote; -use std::str::FromStr; use syn::Ident; pub fn expand_outer_inherent( @@ -36,14 +35,7 @@ pub fn expand_outer_inherent( if pallet_decl.exists_part("Inherent") { let name = &pallet_decl.name; let path = &pallet_decl.path; - let attr = pallet_decl.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { - let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }); + let attr = pallet_decl.get_attributes(); pallet_names.push(name); pallet_attrs.push(attr); diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs index 0b3bd51688651..d246c00628640 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -18,7 +18,6 @@ use crate::construct_runtime::{parse::PalletPath, Pallet}; use proc_macro2::TokenStream; use quote::quote; -use std::str::FromStr; use syn::Ident; pub fn expand_runtime_metadata( @@ -51,14 +50,7 @@ pub fn expand_runtime_metadata( let errors = expand_pallet_metadata_errors(runtime, decl); let associated_types = expand_pallet_metadata_associated_types(runtime, decl); let docs = expand_pallet_metadata_docs(runtime, decl); - let attr = decl.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { - let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }); + let attr = decl.get_attributes(); let deprecation_info = expand_pallet_metadata_deprecation(runtime, decl); quote! { #attr @@ -78,6 +70,20 @@ pub fn expand_runtime_metadata( }) .collect::>(); + let view_functions = pallet_declarations.iter().map(|decl| { + let name = &decl.name; + let path = &decl.path; + let instance = decl.instance.as_ref().into_iter(); + let attr = decl.get_attributes(); + + quote! { + #attr + #path::Pallet::<#runtime #(, #path::#instance)*>::pallet_view_functions_metadata( + ::core::stringify!(#name) + ) + } + }); + quote! { impl #runtime { fn metadata_ir() -> #scrate::__private::metadata_ir::MetadataIR { @@ -149,6 +155,10 @@ pub fn expand_runtime_metadata( >(), event_enum_ty: #scrate::__private::scale_info::meta_type::(), error_enum_ty: #scrate::__private::scale_info::meta_type::(), + }, + view_functions: #scrate::__private::metadata_ir::RuntimeViewFunctionsIR { + ty: #scrate::__private::scale_info::meta_type::(), + groups: #scrate::__private::sp_std::vec![ #(#view_functions),* ], } } } diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/mod.rs index 88f9a3c6e33fd..823aa69dbdf2b 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/mod.rs @@ -28,6 +28,7 @@ mod outer_enums; mod slash_reason; mod task; mod unsigned; +mod view_function; pub use call::expand_outer_dispatch; pub use config::expand_outer_config; @@ -41,3 +42,4 @@ pub use outer_enums::{expand_outer_enum, OuterEnumType}; pub use slash_reason::expand_outer_slash_reason; pub use task::expand_outer_task; pub use unsigned::expand_outer_validate_unsigned; +pub use view_function::expand_outer_query; diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs index 1c4ab436ad92a..4742e68e2e2a8 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -18,7 +18,6 @@ use crate::construct_runtime::{Pallet, SYSTEM_PALLET_NAME}; use proc_macro2::TokenStream; use quote::quote; -use std::str::FromStr; use syn::{Generics, Ident}; pub fn expand_outer_origin( @@ -335,14 +334,7 @@ fn expand_origin_caller_variant( let part_is_generic = !generics.params.is_empty(); let variant_name = &pallet.name; let path = &pallet.path; - let attr = pallet.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { - let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }); + let attr = pallet.get_attributes(); match instance { Some(inst) if part_is_generic => quote! { @@ -387,14 +379,7 @@ fn expand_origin_pallet_conversions( }; let doc_string = get_intra_doc_string(" Convert to runtime origin using", &path.module_name()); - let attr = pallet.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { - let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }); + let attr = pallet.get_attributes(); quote! { #attr diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/outer_enums.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/outer_enums.rs index 80b242ccbe493..80d3a5af26627 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/outer_enums.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/outer_enums.rs @@ -18,7 +18,6 @@ use crate::construct_runtime::Pallet; use proc_macro2::{Span, TokenStream}; use quote::{quote, ToTokens}; -use std::str::FromStr; use syn::{Generics, Ident}; /// Represents the types supported for creating an outer enum. @@ -185,14 +184,7 @@ fn expand_enum_variant( let path = &pallet.path; let variant_name = &pallet.name; let part_is_generic = !generics.params.is_empty(); - let attr = pallet.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { - let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }); + let attr = pallet.get_attributes(); match instance { Some(inst) if part_is_generic => quote! { @@ -224,14 +216,7 @@ fn expand_enum_conversion( enum_name_ident: &Ident, ) -> TokenStream { let variant_name = &pallet.name; - let attr = pallet.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { - let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }); + let attr = pallet.get_attributes(); quote! { #attr diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/task.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/task.rs index 1302f86455f2c..b9b8efb8c0063 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/task.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/task.rs @@ -16,7 +16,6 @@ // limitations under the License use crate::construct_runtime::Pallet; -use core::str::FromStr; use proc_macro2::{Ident, TokenStream as TokenStream2}; use quote::quote; @@ -42,14 +41,7 @@ pub fn expand_outer_task( let instance = decl.instance.as_ref().map(|instance| quote!(, #path::#instance)); let task_type = quote!(#path::Task<#runtime_name #instance>); - let attr = decl.cfg_pattern.iter().fold(TokenStream2::new(), |acc, pattern| { - let attr = TokenStream2::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }); + let attr = decl.get_attributes(); from_impls.push(quote! { #attr diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/unsigned.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/unsigned.rs index 33aadba0d1f1c..737a39ea681e0 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/unsigned.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/unsigned.rs @@ -18,7 +18,6 @@ use crate::construct_runtime::Pallet; use proc_macro2::TokenStream; use quote::quote; -use std::str::FromStr; use syn::Ident; pub fn expand_outer_validate_unsigned( @@ -34,14 +33,7 @@ pub fn expand_outer_validate_unsigned( if pallet_decl.exists_part("ValidateUnsigned") { let name = &pallet_decl.name; let path = &pallet_decl.path; - let attr = pallet_decl.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { - let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }); + let attr = pallet_decl.get_attributes(); pallet_names.push(name); pallet_attrs.push(attr); diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/view_function.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/view_function.rs new file mode 100644 index 0000000000000..094dcca4a5b52 --- /dev/null +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/view_function.rs @@ -0,0 +1,78 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License + +use crate::construct_runtime::Pallet; +use proc_macro2::{Ident, Span, TokenStream as TokenStream2}; + +/// Expands implementation of runtime level `DispatchViewFunction`. +pub fn expand_outer_query( + runtime_name: &Ident, + pallet_decls: &[Pallet], + scrate: &TokenStream2, +) -> TokenStream2 { + let runtime_view_function = syn::Ident::new("RuntimeViewFunction", Span::call_site()); + + let prefix_conditionals = pallet_decls.iter().map(|pallet| { + let pallet_name = &pallet.name; + let attr = pallet.get_attributes(); + quote::quote! { + #attr + if id.prefix == <#pallet_name as #scrate::view_functions::ViewFunctionIdPrefix>::prefix() { + return <#pallet_name as #scrate::view_functions::DispatchViewFunction>::dispatch_view_function(id, input, output) + } + } + }); + + quote::quote! { + /// Runtime query type. + #[derive( + Clone, PartialEq, Eq, + #scrate::__private::codec::Encode, + #scrate::__private::codec::Decode, + #scrate::__private::scale_info::TypeInfo, + #scrate::__private::RuntimeDebug, + )] + pub enum #runtime_view_function {} + + const _: () = { + impl #scrate::view_functions::DispatchViewFunction for #runtime_view_function { + fn dispatch_view_function( + id: & #scrate::view_functions::ViewFunctionId, + input: &mut &[u8], + output: &mut O + ) -> Result<(), #scrate::view_functions::ViewFunctionDispatchError> + { + #( #prefix_conditionals )* + Err(#scrate::view_functions::ViewFunctionDispatchError::NotFound(id.clone())) + } + } + + impl #runtime_name { + /// Convenience function for query execution from the runtime API. + pub fn execute_view_function( + id: #scrate::view_functions::ViewFunctionId, + input: #scrate::__private::Vec<::core::primitive::u8>, + ) -> Result<#scrate::__private::Vec<::core::primitive::u8>, #scrate::view_functions::ViewFunctionDispatchError> + { + let mut output = #scrate::__private::vec![]; + <#runtime_view_function as #scrate::view_functions::DispatchViewFunction>::dispatch_view_function(&id, &mut &input[..], &mut output)?; + Ok(output) + } + } + }; + } +} diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index 087faf37252de..c6018e048f2f8 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -400,6 +400,7 @@ fn construct_runtime_final_expansion( let dispatch = expand::expand_outer_dispatch(&name, system_pallet, &pallets, &scrate); let tasks = expand::expand_outer_task(&name, &pallets, &scrate); + let query = expand::expand_outer_query(&name, &pallets, &scrate); let metadata = expand::expand_runtime_metadata( &name, &pallets, @@ -492,6 +493,8 @@ fn construct_runtime_final_expansion( #tasks + #query + #metadata #outer_config @@ -650,16 +653,7 @@ pub(crate) fn decl_pallet_runtime_setup( .collect::>(); let pallet_attrs = pallet_declarations .iter() - .map(|pallet| { - pallet.cfg_pattern.iter().fold(TokenStream2::new(), |acc, pattern| { - let attr = TokenStream2::from_str(&format!("#[cfg({})]", pattern.original())) - .expect("was successfully parsed before; qed"); - quote! { - #acc - #attr - } - }) - }) + .map(|pallet| pallet.get_attributes()) .collect::>(); quote!( diff --git a/substrate/frame/support/procedural/src/construct_runtime/parse.rs b/substrate/frame/support/procedural/src/construct_runtime/parse.rs index 729a803a302ed..2df08123821a3 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/parse.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/parse.rs @@ -15,6 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use core::str::FromStr; use frame_support_procedural_tools::syn_ext as ext; use proc_macro2::{Span, TokenStream}; use quote::ToTokens; @@ -609,6 +610,18 @@ impl Pallet { pub fn exists_part(&self, name: &str) -> bool { self.find_part(name).is_some() } + + // Get runtime attributes for the pallet, mostly used for macros + pub fn get_attributes(&self) -> TokenStream { + self.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { + let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote::quote! { + #acc + #attr + } + }) + } } /// Result of a conversion of a declaration of pallets. diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index c2f546d92048a..26703a2438ef9 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -817,6 +817,7 @@ pub fn inject_runtime_type(_: TokenStream, tokens: TokenStream) -> TokenStream { if item.ident != "RuntimeCall" && item.ident != "RuntimeEvent" && item.ident != "RuntimeTask" && + item.ident != "RuntimeViewFunction" && item.ident != "RuntimeOrigin" && item.ident != "RuntimeHoldReason" && item.ident != "RuntimeFreezeReason" && @@ -826,7 +827,7 @@ pub fn inject_runtime_type(_: TokenStream, tokens: TokenStream) -> TokenStream { return syn::Error::new_spanned( item, "`#[inject_runtime_type]` can only be attached to `RuntimeCall`, `RuntimeEvent`, \ - `RuntimeTask`, `RuntimeOrigin`, `RuntimeParameters` or `PalletInfo`", + `RuntimeTask`, `RuntimeViewFunction`, `RuntimeOrigin`, `RuntimeParameters` or `PalletInfo`", ) .to_compile_error() .into(); diff --git a/substrate/frame/support/procedural/src/pallet/expand/mod.rs b/substrate/frame/support/procedural/src/pallet/expand/mod.rs index 3f9b50f79c0cc..439ec55e269d4 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/mod.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/mod.rs @@ -35,6 +35,7 @@ mod tasks; mod tt_default_parts; mod type_value; mod validate_unsigned; +mod view_functions; mod warnings; use crate::pallet::Def; @@ -66,6 +67,7 @@ pub fn expand(mut def: Def) -> proc_macro2::TokenStream { let error = error::expand_error(&mut def); let event = event::expand_event(&mut def); let storages = storage::expand_storages(&mut def); + let view_functions = view_functions::expand_view_functions(&def); let inherents = inherent::expand_inherents(&mut def); let instances = instances::expand_instances(&mut def); let hooks = hooks::expand_hooks(&mut def); @@ -108,6 +110,7 @@ storage item. Otherwise, all storage items are listed among [*Type Definitions*] #error #event #storages + #view_functions #inherents #instances #hooks diff --git a/substrate/frame/support/procedural/src/pallet/expand/view_functions.rs b/substrate/frame/support/procedural/src/pallet/expand/view_functions.rs new file mode 100644 index 0000000000000..587e74a2ac182 --- /dev/null +++ b/substrate/frame/support/procedural/src/pallet/expand/view_functions.rs @@ -0,0 +1,263 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::pallet::{parse::view_functions::ViewFunctionDef, Def}; +use proc_macro2::{Span, TokenStream}; +use syn::spanned::Spanned; + +pub fn expand_view_functions(def: &Def) -> TokenStream { + let (span, where_clause, view_fns, docs) = match def.view_functions.as_ref() { + Some(view_fns) => ( + view_fns.attr_span, + view_fns.where_clause.clone(), + view_fns.view_functions.clone(), + view_fns.docs.clone(), + ), + None => (def.item.span(), def.config.where_clause.clone(), Vec::new(), Vec::new()), + }; + + let view_function_prefix_impl = + expand_view_function_prefix_impl(def, span, where_clause.as_ref()); + + let view_fn_impls = view_fns + .iter() + .map(|view_fn| expand_view_function(def, span, where_clause.as_ref(), view_fn)); + let impl_dispatch_view_function = + impl_dispatch_view_function(def, span, where_clause.as_ref(), &view_fns); + let impl_view_function_metadata = + impl_view_function_metadata(def, span, where_clause.as_ref(), &view_fns, &docs); + + quote::quote! { + #view_function_prefix_impl + #( #view_fn_impls )* + #impl_dispatch_view_function + #impl_view_function_metadata + } +} + +fn expand_view_function_prefix_impl( + def: &Def, + span: Span, + where_clause: Option<&syn::WhereClause>, +) -> TokenStream { + let pallet_ident = &def.pallet_struct.pallet; + let frame_support = &def.frame_support; + let frame_system = &def.frame_system; + let type_impl_gen = &def.type_impl_generics(span); + let type_use_gen = &def.type_use_generics(span); + + quote::quote! { + impl<#type_impl_gen> #frame_support::view_functions::ViewFunctionIdPrefix for #pallet_ident<#type_use_gen> #where_clause { + fn prefix() -> [::core::primitive::u8; 16usize] { + < + ::PalletInfo + as #frame_support::traits::PalletInfo + >::name_hash::>() + .expect("No name_hash found for the pallet in the runtime! This usually means that the pallet wasn't added to `construct_runtime!`.") + } + } + } +} + +fn expand_view_function( + def: &Def, + span: Span, + where_clause: Option<&syn::WhereClause>, + view_fn: &ViewFunctionDef, +) -> TokenStream { + let frame_support = &def.frame_support; + let pallet_ident = &def.pallet_struct.pallet; + let type_impl_gen = &def.type_impl_generics(span); + let type_decl_bounded_gen = &def.type_decl_bounded_generics(span); + let type_use_gen = &def.type_use_generics(span); + let capture_docs = if cfg!(feature = "no-metadata-docs") { "never" } else { "always" }; + + let view_function_struct_ident = view_fn.view_function_struct_ident(); + let view_fn_name = &view_fn.name; + let (arg_names, arg_types) = match view_fn.args_names_types() { + Ok((arg_names, arg_types)) => (arg_names, arg_types), + Err(e) => return e.into_compile_error(), + }; + let return_type = &view_fn.return_type; + let docs = &view_fn.docs; + + let view_function_id_suffix_bytes_raw = match view_fn.view_function_id_suffix_bytes() { + Ok(view_function_id_suffix_bytes_raw) => view_function_id_suffix_bytes_raw, + Err(e) => return e.into_compile_error(), + }; + let view_function_id_suffix_bytes = view_function_id_suffix_bytes_raw + .map(|byte| syn::LitInt::new(&format!("0x{:X}_u8", byte), Span::call_site())); + + quote::quote! { + #( #[doc = #docs] )* + #[allow(missing_docs)] + #[derive( + #frame_support::RuntimeDebugNoBound, + #frame_support::CloneNoBound, + #frame_support::EqNoBound, + #frame_support::PartialEqNoBound, + #frame_support::__private::codec::Encode, + #frame_support::__private::codec::Decode, + #frame_support::__private::scale_info::TypeInfo, + )] + #[codec(encode_bound())] + #[codec(decode_bound())] + #[scale_info(skip_type_params(#type_use_gen), capture_docs = #capture_docs)] + pub struct #view_function_struct_ident<#type_decl_bounded_gen> #where_clause { + #( + pub #arg_names: #arg_types, + )* + _marker: ::core::marker::PhantomData<(#type_use_gen,)>, + } + + impl<#type_impl_gen> #view_function_struct_ident<#type_use_gen> #where_clause { + /// Create a new [`#view_function_struct_ident`] instance. + pub fn new(#( #arg_names: #arg_types, )*) -> Self { + Self { + #( #arg_names, )* + _marker: ::core::default::Default::default() + } + } + } + + impl<#type_impl_gen> #frame_support::view_functions::ViewFunctionIdSuffix for #view_function_struct_ident<#type_use_gen> #where_clause { + const SUFFIX: [::core::primitive::u8; 16usize] = [ #( #view_function_id_suffix_bytes ),* ]; + } + + impl<#type_impl_gen> #frame_support::view_functions::ViewFunction for #view_function_struct_ident<#type_use_gen> #where_clause { + fn id() -> #frame_support::view_functions::ViewFunctionId { + #frame_support::view_functions::ViewFunctionId { + prefix: <#pallet_ident<#type_use_gen> as #frame_support::view_functions::ViewFunctionIdPrefix>::prefix(), + suffix: ::SUFFIX, + } + } + + type ReturnType = #return_type; + + fn invoke(self) -> Self::ReturnType { + let Self { #( #arg_names, )* _marker } = self; + #pallet_ident::<#type_use_gen> :: #view_fn_name( #( #arg_names, )* ) + } + } + } +} + +fn impl_dispatch_view_function( + def: &Def, + span: Span, + where_clause: Option<&syn::WhereClause>, + view_fns: &[ViewFunctionDef], +) -> TokenStream { + let frame_support = &def.frame_support; + let pallet_ident = &def.pallet_struct.pallet; + let type_impl_gen = &def.type_impl_generics(span); + let type_use_gen = &def.type_use_generics(span); + + let query_match_arms = view_fns.iter().map(|view_fn| { + let view_function_struct_ident = view_fn.view_function_struct_ident(); + quote::quote! { + <#view_function_struct_ident<#type_use_gen> as #frame_support::view_functions::ViewFunctionIdSuffix>::SUFFIX => { + <#view_function_struct_ident<#type_use_gen> as #frame_support::view_functions::ViewFunction>::execute(input, output) + } + } + }); + + quote::quote! { + impl<#type_impl_gen> #frame_support::view_functions::DispatchViewFunction + for #pallet_ident<#type_use_gen> #where_clause + { + #[deny(unreachable_patterns)] + fn dispatch_view_function( + id: & #frame_support::view_functions::ViewFunctionId, + input: &mut &[u8], + output: &mut O + ) -> Result<(), #frame_support::view_functions::ViewFunctionDispatchError> + { + match id.suffix { + #( #query_match_arms )* + _ => Err(#frame_support::view_functions::ViewFunctionDispatchError::NotFound(id.clone())), + } + } + } + } +} + +fn impl_view_function_metadata( + def: &Def, + span: Span, + where_clause: Option<&syn::WhereClause>, + view_fns: &[ViewFunctionDef], + docs: &[syn::Expr], +) -> TokenStream { + let frame_support = &def.frame_support; + let pallet_ident = &def.pallet_struct.pallet; + let type_impl_gen = &def.type_impl_generics(span); + let type_use_gen = &def.type_use_generics(span); + + let view_functions = view_fns.iter().map(|view_fn| { + let view_function_struct_ident = view_fn.view_function_struct_ident(); + let name = &view_fn.name; + let args = view_fn.args.iter().filter_map(|fn_arg| { + match fn_arg { + syn::FnArg::Receiver(_) => None, + syn::FnArg::Typed(typed) => { + let pat = &typed.pat; + let ty = &typed.ty; + Some(quote::quote! { + #frame_support::__private::metadata_ir::ViewFunctionArgMetadataIR { + name: ::core::stringify!(#pat), + ty: #frame_support::__private::scale_info::meta_type::<#ty>(), + } + }) + } + } + }); + + let no_docs = vec![]; + let doc = if cfg!(feature = "no-metadata-docs") { &no_docs } else { &view_fn.docs }; + + quote::quote! { + #frame_support::__private::metadata_ir::ViewFunctionMetadataIR { + name: ::core::stringify!(#name), + id: <#view_function_struct_ident<#type_use_gen> as #frame_support::view_functions::ViewFunction>::id().into(), + args: #frame_support::__private::sp_std::vec![ #( #args ),* ], + output: #frame_support::__private::scale_info::meta_type::< + <#view_function_struct_ident<#type_use_gen> as #frame_support::view_functions::ViewFunction>::ReturnType + >(), + docs: #frame_support::__private::sp_std::vec![ #( #doc ),* ], + } + } + }); + + let no_docs = vec![]; + let doc = if cfg!(feature = "no-metadata-docs") { &no_docs } else { docs }; + + quote::quote! { + impl<#type_impl_gen> #pallet_ident<#type_use_gen> #where_clause { + #[doc(hidden)] + pub fn pallet_view_functions_metadata(name: &'static ::core::primitive::str) + -> #frame_support::__private::metadata_ir::ViewFunctionGroupIR + { + #frame_support::__private::metadata_ir::ViewFunctionGroupIR { + name, + view_functions: #frame_support::__private::sp_std::vec![ #( #view_functions ),* ], + docs: #frame_support::__private::sp_std::vec![ #( #doc ),* ], + } + } + } + } +} diff --git a/substrate/frame/support/procedural/src/pallet/parse/mod.rs b/substrate/frame/support/procedural/src/pallet/parse/mod.rs index c9a150effccbe..89875974b8b5d 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/mod.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/mod.rs @@ -36,6 +36,7 @@ pub mod storage; pub mod tasks; pub mod type_value; pub mod validate_unsigned; +pub mod view_functions; #[cfg(test)] pub mod tests; @@ -70,6 +71,7 @@ pub struct Def { pub frame_system: syn::Path, pub frame_support: syn::Path, pub dev_mode: bool, + pub view_functions: Option, } impl Def { @@ -103,6 +105,7 @@ impl Def { let mut storages = vec![]; let mut type_values = vec![]; let mut composites: Vec = vec![]; + let mut view_functions = None; for (index, item) in items.iter_mut().enumerate() { let pallet_attr: Option = helper::take_first_item_pallet_attr(item)?; @@ -205,6 +208,9 @@ impl Def { } composites.push(composite); }, + Some(PalletAttr::ViewFunctions(span)) => { + view_functions = Some(view_functions::ViewFunctionsImplDef::try_from(span, item)?); + } Some(attr) => { let msg = "Invalid duplicated attribute"; return Err(syn::Error::new(attr.span(), msg)) @@ -250,6 +256,7 @@ impl Def { frame_system, frame_support, dev_mode, + view_functions, }; def.check_instance_usage()?; @@ -563,6 +570,7 @@ mod keyword { syn::custom_keyword!(pallet); syn::custom_keyword!(extra_constants); syn::custom_keyword!(composite_enum); + syn::custom_keyword!(view_functions_experimental); } /// The possible values for the `#[pallet::config]` attribute. @@ -652,6 +660,7 @@ enum PalletAttr { TypeValue(proc_macro2::Span), ExtraConstants(proc_macro2::Span), Composite(proc_macro2::Span), + ViewFunctions(proc_macro2::Span), } impl PalletAttr { @@ -677,6 +686,7 @@ impl PalletAttr { Self::TypeValue(span) => *span, Self::ExtraConstants(span) => *span, Self::Composite(span) => *span, + Self::ViewFunctions(span) => *span, } } } @@ -778,6 +788,10 @@ impl syn::parse::Parse for PalletAttr { Ok(PalletAttr::ExtraConstants(content.parse::()?.span())) } else if lookahead.peek(keyword::composite_enum) { Ok(PalletAttr::Composite(content.parse::()?.span())) + } else if lookahead.peek(keyword::view_functions_experimental) { + Ok(PalletAttr::ViewFunctions( + content.parse::()?.span(), + )) } else { Err(lookahead.error()) } diff --git a/substrate/frame/support/procedural/src/pallet/parse/view_functions.rs b/substrate/frame/support/procedural/src/pallet/parse/view_functions.rs new file mode 100644 index 0000000000000..766bcb13da8b3 --- /dev/null +++ b/substrate/frame/support/procedural/src/pallet/parse/view_functions.rs @@ -0,0 +1,155 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governsing permissions and +// limitations under the License. + +use frame_support_procedural_tools::get_doc_literals; +use inflector::Inflector; +use syn::spanned::Spanned; + +/// Parsed representation of an impl block annotated with `pallet::view_functions_experimental`. +pub struct ViewFunctionsImplDef { + /// The where_clause used. + pub where_clause: Option, + /// The span of the pallet::view_functions_experimental attribute. + pub attr_span: proc_macro2::Span, + /// Docs, specified on the impl Block. + pub docs: Vec, + /// The view function definitions. + pub view_functions: Vec, +} + +impl ViewFunctionsImplDef { + pub fn try_from(attr_span: proc_macro2::Span, item: &mut syn::Item) -> syn::Result { + let syn::Item::Impl(item_impl) = item else { + return Err(syn::Error::new( + item.span(), + "Invalid pallet::view_functions_experimental, expected item impl", + )) + }; + let mut view_functions = Vec::new(); + for item in &mut item_impl.items { + if let syn::ImplItem::Fn(method) = item { + if !matches!(method.vis, syn::Visibility::Public(_)) { + let msg = "Invalid pallet::view_functions_experimental, view function must be public: \ + `pub fn`"; + + let span = match method.vis { + syn::Visibility::Inherited => method.sig.span(), + _ => method.vis.span(), + }; + + return Err(syn::Error::new(span, msg)) + } + + let view_fn_def = ViewFunctionDef::try_from(method.clone())?; + view_functions.push(view_fn_def) + } else { + return Err(syn::Error::new( + item.span(), + "Invalid pallet::view_functions_experimental, expected a function", + )) + } + } + Ok(Self { + view_functions, + attr_span, + where_clause: item_impl.generics.where_clause.clone(), + docs: get_doc_literals(&item_impl.attrs), + }) + } +} + +/// Parsed representation of a view function definition. +#[derive(Clone)] +pub struct ViewFunctionDef { + pub name: syn::Ident, + pub docs: Vec, + pub args: Vec, + pub return_type: syn::Type, +} + +impl TryFrom for ViewFunctionDef { + type Error = syn::Error; + fn try_from(method: syn::ImplItemFn) -> Result { + let syn::ReturnType::Type(_, type_) = method.sig.output else { + return Err(syn::Error::new(method.sig.span(), "view functions must return a value")) + }; + + Ok(Self { + name: method.sig.ident.clone(), + docs: get_doc_literals(&method.attrs), + args: method.sig.inputs.iter().cloned().collect::>(), + return_type: *type_.clone(), + }) + } +} + +impl ViewFunctionDef { + pub fn view_function_struct_ident(&self) -> syn::Ident { + syn::Ident::new( + &format!("{}ViewFunction", self.name.to_string().to_pascal_case()), + self.name.span(), + ) + } + + pub fn view_function_id_suffix_bytes(&self) -> Result<[u8; 16], syn::Error> { + let mut output = [0u8; 16]; + + // concatenate the signature string + let arg_types = self + .args_names_types()? + .1 + .iter() + .map(|ty| quote::quote!(#ty).to_string().replace(" ", "")) + .collect::>() + .join(","); + let return_type = &self.return_type; + let return_type = quote::quote!(#return_type).to_string().replace(" ", ""); + let view_fn_signature = format!( + "{view_function_name}({arg_types}) -> {return_type}", + view_function_name = &self.name, + ); + + // hash the signature string + let hash = sp_crypto_hashing::twox_128(view_fn_signature.as_bytes()); + output.copy_from_slice(&hash[..]); + Ok(output) + } + + pub fn args_names_types(&self) -> Result<(Vec, Vec), syn::Error> { + Ok(self + .args + .iter() + .map(|arg| { + let syn::FnArg::Typed(pat_type) = arg else { + return Err(syn::Error::new( + arg.span(), + "Unsupported argument in view function", + )); + }; + let syn::Pat::Ident(ident) = &*pat_type.pat else { + return Err(syn::Error::new( + pat_type.pat.span(), + "Unsupported pattern in view function argument", + )); + }; + Ok((ident.ident.clone(), *pat_type.ty.clone())) + }) + .collect::, syn::Error>>()? + .into_iter() + .unzip()) + } +} diff --git a/substrate/frame/support/procedural/src/runtime/expand/mod.rs b/substrate/frame/support/procedural/src/runtime/expand/mod.rs index 666bc03aa415d..005b109c0eb5f 100644 --- a/substrate/frame/support/procedural/src/runtime/expand/mod.rs +++ b/substrate/frame/support/procedural/src/runtime/expand/mod.rs @@ -182,6 +182,7 @@ fn construct_runtime_final_expansion( let mut slash_reason = None; let mut lock_id = None; let mut task = None; + let mut query = None; for runtime_type in runtime_types.iter() { match runtime_type { @@ -224,6 +225,9 @@ fn construct_runtime_final_expansion( RuntimeType::RuntimeTask(_) => { task = Some(expand::expand_outer_task(&name, &pallets, &scrate)); }, + RuntimeType::RuntimeViewFunction(_) => { + query = Some(expand::expand_outer_query(&name, &pallets, &scrate)); + }, } } @@ -301,6 +305,8 @@ fn construct_runtime_final_expansion( #task + #query + #metadata #outer_config diff --git a/substrate/frame/support/procedural/src/runtime/parse/runtime_types.rs b/substrate/frame/support/procedural/src/runtime/parse/runtime_types.rs index a4480e2a1fd32..9a385146a811e 100644 --- a/substrate/frame/support/procedural/src/runtime/parse/runtime_types.rs +++ b/substrate/frame/support/procedural/src/runtime/parse/runtime_types.rs @@ -32,6 +32,7 @@ mod keyword { custom_keyword!(RuntimeSlashReason); custom_keyword!(RuntimeLockId); custom_keyword!(RuntimeTask); + custom_keyword!(RuntimeViewFunction); } #[derive(Debug, Clone, PartialEq)] @@ -45,6 +46,7 @@ pub enum RuntimeType { RuntimeSlashReason(keyword::RuntimeSlashReason), RuntimeLockId(keyword::RuntimeLockId), RuntimeTask(keyword::RuntimeTask), + RuntimeViewFunction(keyword::RuntimeViewFunction), } impl Parse for RuntimeType { @@ -69,6 +71,8 @@ impl Parse for RuntimeType { Ok(Self::RuntimeLockId(input.parse()?)) } else if lookahead.peek(keyword::RuntimeTask) { Ok(Self::RuntimeTask(input.parse()?)) + } else if lookahead.peek(keyword::RuntimeViewFunction) { + Ok(Self::RuntimeViewFunction(input.parse()?)) } else { Err(lookahead.error()) } diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index a6969260e6a26..97d16e2a06d23 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -87,6 +87,7 @@ pub mod storage; #[cfg(test)] mod tests; pub mod traits; +pub mod view_functions; pub mod weights; #[doc(hidden)] pub mod unsigned { diff --git a/substrate/frame/support/src/tests/mod.rs b/substrate/frame/support/src/tests/mod.rs index 7c90a12d4167e..b10e719b9ac36 100644 --- a/substrate/frame/support/src/tests/mod.rs +++ b/substrate/frame/support/src/tests/mod.rs @@ -237,7 +237,8 @@ mod runtime { RuntimeHoldReason, RuntimeSlashReason, RuntimeLockId, - RuntimeTask + RuntimeTask, + RuntimeViewFunction )] pub struct Runtime; diff --git a/substrate/frame/support/src/view_functions.rs b/substrate/frame/support/src/view_functions.rs new file mode 100644 index 0000000000000..dd23fad94a4fd --- /dev/null +++ b/substrate/frame/support/src/view_functions.rs @@ -0,0 +1,128 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License fsor the specific language governing permissions and +// limitations under the License. + +//! Traits for querying pallet view functions. + +use alloc::vec::Vec; +use codec::{Decode, DecodeAll, Encode, Output}; +use scale_info::TypeInfo; +use sp_runtime::RuntimeDebug; + +/// The unique identifier for a view function. +#[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] +pub struct ViewFunctionId { + /// The part of the id for dispatching view functions from the top level of the runtime. + /// + /// Specifies which view function grouping this view function belongs to. This could be a group + /// of view functions associated with a pallet, or a pallet agnostic group of view functions. + pub prefix: [u8; 16], + /// The part of the id for dispatching to a view function within a group. + pub suffix: [u8; 16], +} + +impl From for [u8; 32] { + fn from(value: ViewFunctionId) -> Self { + let mut output = [0u8; 32]; + output[..16].copy_from_slice(&value.prefix); + output[16..].copy_from_slice(&value.suffix); + output + } +} + +/// Error type for view function dispatching. +#[derive(Encode, Decode, RuntimeDebug, TypeInfo)] +pub enum ViewFunctionDispatchError { + /// View functions are not implemented for this runtime. + NotImplemented, + /// A view function with the given `ViewFunctionId` was not found + NotFound(ViewFunctionId), + /// Failed to decode the view function input. + Codec, +} + +impl From for ViewFunctionDispatchError { + fn from(_: codec::Error) -> Self { + ViewFunctionDispatchError::Codec + } +} + +/// Implemented by both pallets and the runtime. The runtime is dispatching by prefix using the +/// pallet implementation of `ViewFunctionIdPrefix` then the pallet is dispatching by suffix using +/// the methods implementation of `ViewFunctionIdSuffix`. +pub trait DispatchViewFunction { + fn dispatch_view_function( + id: &ViewFunctionId, + input: &mut &[u8], + output: &mut O, + ) -> Result<(), ViewFunctionDispatchError>; +} + +impl DispatchViewFunction for () { + fn dispatch_view_function( + _id: &ViewFunctionId, + _input: &mut &[u8], + _output: &mut O, + ) -> Result<(), ViewFunctionDispatchError> { + Err(ViewFunctionDispatchError::NotImplemented) + } +} + +/// Automatically implemented for each pallet by the macro [`pallet`](crate::pallet). +pub trait ViewFunctionIdPrefix { + fn prefix() -> [u8; 16]; +} + +/// Automatically implemented for each pallet view function method by the macro +/// [`pallet`](crate::pallet). +pub trait ViewFunctionIdSuffix { + const SUFFIX: [u8; 16]; +} + +/// Automatically implemented for each pallet view function method by the macro +/// [`pallet`](crate::pallet). +pub trait ViewFunction: DecodeAll { + fn id() -> ViewFunctionId; + type ReturnType: Encode; + + fn invoke(self) -> Self::ReturnType; + + fn execute( + input: &mut &[u8], + output: &mut O, + ) -> Result<(), ViewFunctionDispatchError> { + let view_function = Self::decode_all(input)?; + let result = view_function.invoke(); + Encode::encode_to(&result, output); + Ok(()) + } +} + +pub mod runtime_api { + use super::*; + + sp_api::decl_runtime_apis! { + #[api_version(1)] + /// Runtime API for executing view functions + pub trait RuntimeViewFunction { + /// Execute a view function query. + fn execute_view_function( + query_id: ViewFunctionId, + input: Vec, + ) -> Result, ViewFunctionDispatchError>; + } + } +} diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr index 726b09cf54c99..faa9cb558c262 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr @@ -561,6 +561,15 @@ note: the trait `Config` must be implemented | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:26:3 + | +26 | System: frame_system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^ the trait `Config` is not implemented for `Runtime`, which is required by `Pallet: ViewFunctionIdPrefix` + | + = help: the trait `ViewFunctionIdPrefix` is implemented for `Pallet` + = note: required for `Pallet` to implement `ViewFunctionIdPrefix` + error[E0599]: the function or associated item `storage_metadata` exists for struct `Pallet`, but its trait bounds were not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | @@ -736,6 +745,31 @@ note: the trait `Config` must be implemented | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) +error[E0599]: the function or associated item `pallet_view_functions_metadata` exists for struct `Pallet`, but its trait bounds were not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | construct_runtime! { + | __^ + | | _| + | || +21 | || pub struct Runtime where + | ||______________________- doesn't satisfy `Runtime: Config` +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |__^ function or associated item cannot be called on `Pallet` due to unsatisfied trait bounds + | + = note: the following trait bounds were not satisfied: + `Runtime: Config` +note: the trait `Config` must be implemented + --> $WORKSPACE/substrate/frame/system/src/lib.rs + | + | pub trait Config: 'static + Eq + Clone { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | diff --git a/substrate/frame/support/test/tests/derive_impl_ui/inject_runtime_type_invalid.stderr b/substrate/frame/support/test/tests/derive_impl_ui/inject_runtime_type_invalid.stderr index c7159b34afb3d..aafc6b5a2c874 100644 --- a/substrate/frame/support/test/tests/derive_impl_ui/inject_runtime_type_invalid.stderr +++ b/substrate/frame/support/test/tests/derive_impl_ui/inject_runtime_type_invalid.stderr @@ -1,4 +1,4 @@ -error: `#[inject_runtime_type]` can only be attached to `RuntimeCall`, `RuntimeEvent`, `RuntimeTask`, `RuntimeOrigin`, `RuntimeParameters` or `PalletInfo` +error: `#[inject_runtime_type]` can only be attached to `RuntimeCall`, `RuntimeEvent`, `RuntimeTask`, `RuntimeViewFunction`, `RuntimeOrigin`, `RuntimeParameters` or `PalletInfo` --> tests/derive_impl_ui/inject_runtime_type_invalid.rs:32:5 | 32 | type RuntimeInfo = (); diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index 9df1f461bba25..e45ff64e4c26e 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -461,6 +461,22 @@ pub mod pallet { _myfield: u32, } + #[pallet::view_functions_experimental] + impl Pallet + where + T::AccountId: From + SomeAssociation1, + { + /// Query value no args. + pub fn get_value() -> Option { + Value::::get() + } + + /// Query value with args. + pub fn get_value_with_arg(key: u16) -> Option { + Map2::::get(key) + } + } + #[pallet::genesis_build] impl BuildGenesisConfig for GenesisConfig where @@ -814,7 +830,8 @@ mod runtime { RuntimeHoldReason, RuntimeSlashReason, RuntimeLockId, - RuntimeTask + RuntimeTask, + RuntimeViewFunction )] pub struct Runtime; diff --git a/substrate/frame/support/test/tests/runtime.rs b/substrate/frame/support/test/tests/runtime.rs index 5335e08837e4a..cbcdf8d27b39a 100644 --- a/substrate/frame/support/test/tests/runtime.rs +++ b/substrate/frame/support/test/tests/runtime.rs @@ -296,7 +296,8 @@ mod runtime { RuntimeHoldReason, RuntimeSlashReason, RuntimeLockId, - RuntimeTask + RuntimeTask, + RuntimeViewFunction )] pub struct Runtime; diff --git a/substrate/frame/support/test/tests/runtime_legacy_ordering.rs b/substrate/frame/support/test/tests/runtime_legacy_ordering.rs index 7b92073a82b1a..1594356ad8fe8 100644 --- a/substrate/frame/support/test/tests/runtime_legacy_ordering.rs +++ b/substrate/frame/support/test/tests/runtime_legacy_ordering.rs @@ -296,7 +296,8 @@ mod runtime { RuntimeHoldReason, RuntimeSlashReason, RuntimeLockId, - RuntimeTask + RuntimeTask, + RuntimeViewFunction )] pub struct Runtime; diff --git a/substrate/frame/support/test/tests/runtime_ui/invalid_runtime_type_derive.stderr b/substrate/frame/support/test/tests/runtime_ui/invalid_runtime_type_derive.stderr index 0b128c3dd4579..daa6721ff051d 100644 --- a/substrate/frame/support/test/tests/runtime_ui/invalid_runtime_type_derive.stderr +++ b/substrate/frame/support/test/tests/runtime_ui/invalid_runtime_type_derive.stderr @@ -1,4 +1,4 @@ -error: expected one of: `RuntimeCall`, `RuntimeEvent`, `RuntimeError`, `RuntimeOrigin`, `RuntimeFreezeReason`, `RuntimeHoldReason`, `RuntimeSlashReason`, `RuntimeLockId`, `RuntimeTask` +error: expected one of: `RuntimeCall`, `RuntimeEvent`, `RuntimeError`, `RuntimeOrigin`, `RuntimeFreezeReason`, `RuntimeHoldReason`, `RuntimeSlashReason`, `RuntimeLockId`, `RuntimeTask`, `RuntimeViewFunction` --> tests/runtime_ui/invalid_runtime_type_derive.rs:21:23 | 21 | #[runtime::derive(RuntimeInfo)] diff --git a/substrate/frame/support/test/tests/runtime_ui/pass/basic.rs b/substrate/frame/support/test/tests/runtime_ui/pass/basic.rs index 514f150180153..8350211335a52 100644 --- a/substrate/frame/support/test/tests/runtime_ui/pass/basic.rs +++ b/substrate/frame/support/test/tests/runtime_ui/pass/basic.rs @@ -27,7 +27,7 @@ impl frame_system::Config for Runtime { #[frame_support::runtime] mod runtime { #[runtime::runtime] - #[runtime::derive(RuntimeCall, RuntimeEvent, RuntimeOrigin, RuntimeError, RuntimeTask)] + #[runtime::derive(RuntimeCall, RuntimeEvent, RuntimeOrigin, RuntimeError, RuntimeTask, RuntimeViewFunction)] pub struct Runtime; #[runtime::pallet_index(0)] diff --git a/substrate/primitives/metadata-ir/src/lib.rs b/substrate/primitives/metadata-ir/src/lib.rs index dc01f7eaadb33..e048010a34b75 100644 --- a/substrate/primitives/metadata-ir/src/lib.rs +++ b/substrate/primitives/metadata-ir/src/lib.rs @@ -122,6 +122,7 @@ mod test { event_enum_ty: meta_type::<()>(), error_enum_ty: meta_type::<()>(), }, + view_functions: RuntimeViewFunctionsIR { ty: meta_type::<()>(), groups: vec![] }, } } diff --git a/substrate/primitives/metadata-ir/src/types.rs b/substrate/primitives/metadata-ir/src/types.rs index af217ffe16eeb..0617fc7dfb94f 100644 --- a/substrate/primitives/metadata-ir/src/types.rs +++ b/substrate/primitives/metadata-ir/src/types.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use codec::{Compact, Encode}; +use codec::{Compact, Decode, Encode}; use scale_info::{ form::{Form, MetaForm, PortableForm}, prelude::{collections::BTreeMap, vec::Vec}, @@ -41,6 +41,8 @@ pub struct MetadataIR { pub apis: Vec>, /// The outer enums types as found in the runtime. pub outer_enums: OuterEnumsIR, + /// Metadata of view function queries + pub view_functions: RuntimeViewFunctionsIR, } /// Metadata of a runtime trait. @@ -118,6 +120,89 @@ impl IntoPortable for RuntimeApiMethodParamMetadataIR { } } +/// Metadata of the top level runtime view function dispatch. +#[derive(Clone, PartialEq, Eq, Encode, Decode, Debug)] +pub struct RuntimeViewFunctionsIR { + /// The type implementing the runtime query dispatch. + pub ty: T::Type, + /// The view function groupings metadata. + pub groups: Vec>, +} + +/// Metadata of a runtime view function group. +/// +/// For example, view functions associated with a pallet would form a view function group. +#[derive(Clone, PartialEq, Eq, Encode, Decode, Debug)] +pub struct ViewFunctionGroupIR { + /// Name of the view function group. + pub name: T::String, + /// View functions belonging to the group. + pub view_functions: Vec>, + /// View function group documentation. + pub docs: Vec, +} + +impl IntoPortable for ViewFunctionGroupIR { + type Output = ViewFunctionGroupIR; + + fn into_portable(self, registry: &mut Registry) -> Self::Output { + ViewFunctionGroupIR { + name: self.name.into_portable(registry), + view_functions: registry.map_into_portable(self.view_functions), + docs: registry.map_into_portable(self.docs), + } + } +} + +/// Metadata of a runtime view function. +#[derive(Clone, PartialEq, Eq, Encode, Decode, Debug)] +pub struct ViewFunctionMetadataIR { + /// Query name. + pub name: T::String, + /// Query id. + pub id: [u8; 32], + /// Query args. + pub args: Vec>, + /// Query output. + pub output: T::Type, + /// Query documentation. + pub docs: Vec, +} + +impl IntoPortable for ViewFunctionMetadataIR { + type Output = ViewFunctionMetadataIR; + + fn into_portable(self, registry: &mut Registry) -> Self::Output { + ViewFunctionMetadataIR { + name: self.name.into_portable(registry), + id: self.id, + args: registry.map_into_portable(self.args), + output: registry.register_type(&self.output), + docs: registry.map_into_portable(self.docs), + } + } +} + +/// Metadata of a runtime method argument. +#[derive(Clone, PartialEq, Eq, Encode, Decode, Debug)] +pub struct ViewFunctionArgMetadataIR { + /// Query argument name. + pub name: T::String, + /// Query argument type. + pub ty: T::Type, +} + +impl IntoPortable for ViewFunctionArgMetadataIR { + type Output = ViewFunctionArgMetadataIR; + + fn into_portable(self, registry: &mut Registry) -> Self::Output { + ViewFunctionArgMetadataIR { + name: self.name.into_portable(registry), + ty: registry.register_type(&self.ty), + } + } +} + /// The intermediate representation for a pallet metadata. #[derive(Clone, PartialEq, Eq, Encode, Debug)] pub struct PalletMetadataIR { diff --git a/substrate/primitives/metadata-ir/src/v15.rs b/substrate/primitives/metadata-ir/src/v15.rs index ed315a31e6dc9..7bc76f22b58d0 100644 --- a/substrate/primitives/metadata-ir/src/v15.rs +++ b/substrate/primitives/metadata-ir/src/v15.rs @@ -17,31 +17,39 @@ //! Convert the IR to V15 metadata. -use crate::OuterEnumsIR; - use super::types::{ - ExtrinsicMetadataIR, MetadataIR, PalletMetadataIR, RuntimeApiMetadataIR, + ExtrinsicMetadataIR, MetadataIR, OuterEnumsIR, PalletMetadataIR, RuntimeApiMetadataIR, RuntimeApiMethodMetadataIR, RuntimeApiMethodParamMetadataIR, TransactionExtensionMetadataIR, }; use frame_metadata::v15::{ - CustomMetadata, ExtrinsicMetadata, OuterEnums, PalletMetadata, RuntimeApiMetadata, - RuntimeApiMethodMetadata, RuntimeApiMethodParamMetadata, RuntimeMetadataV15, - SignedExtensionMetadata, + CustomMetadata, CustomValueMetadata, ExtrinsicMetadata, OuterEnums, PalletMetadata, + RuntimeApiMetadata, RuntimeApiMethodMetadata, RuntimeApiMethodParamMetadata, + RuntimeMetadataV15, SignedExtensionMetadata, }; +use scale_info::{IntoPortable, Registry}; impl From for RuntimeMetadataV15 { fn from(ir: MetadataIR) -> Self { - RuntimeMetadataV15::new( - ir.pallets.into_iter().map(Into::into).collect(), - ir.extrinsic.into(), - ir.ty, - ir.apis.into_iter().map(Into::into).collect(), - ir.outer_enums.into(), - // Substrate does not collect yet the custom metadata fields. - // This allows us to extend the V15 easily. - CustomMetadata { map: Default::default() }, - ) + let mut registry = Registry::new(); + let pallets = + registry.map_into_portable(ir.pallets.into_iter().map(Into::::into)); + let extrinsic = Into::::into(ir.extrinsic).into_portable(&mut registry); + let ty = registry.register_type(&ir.ty); + let apis = + registry.map_into_portable(ir.apis.into_iter().map(Into::::into)); + let outer_enums = Into::::into(ir.outer_enums).into_portable(&mut registry); + + let view_function_groups = registry.map_into_portable(ir.view_functions.groups.into_iter()); + let view_functions_custom_metadata = CustomValueMetadata { + ty: ir.view_functions.ty, + value: codec::Encode::encode(&view_function_groups), + }; + let mut custom_map = alloc::collections::BTreeMap::new(); + custom_map.insert("view_functions_experimental", view_functions_custom_metadata); + let custom = CustomMetadata { map: custom_map }.into_portable(&mut registry); + + Self { types: registry.into(), pallets, extrinsic, ty, apis, outer_enums, custom } } } diff --git a/templates/minimal/runtime/src/lib.rs b/templates/minimal/runtime/src/lib.rs index 972c7500f3993..5d549bf1a912d 100644 --- a/templates/minimal/runtime/src/lib.rs +++ b/templates/minimal/runtime/src/lib.rs @@ -138,7 +138,8 @@ mod runtime { RuntimeHoldReason, RuntimeSlashReason, RuntimeLockId, - RuntimeTask + RuntimeTask, + RuntimeViewFunction )] pub struct Runtime; diff --git a/templates/parachain/pallets/template/src/mock.rs b/templates/parachain/pallets/template/src/mock.rs index b924428d4145c..3eeb9604f0153 100644 --- a/templates/parachain/pallets/template/src/mock.rs +++ b/templates/parachain/pallets/template/src/mock.rs @@ -18,7 +18,8 @@ mod test_runtime { RuntimeHoldReason, RuntimeSlashReason, RuntimeLockId, - RuntimeTask + RuntimeTask, + RuntimeViewFunction )] pub struct Test; diff --git a/templates/parachain/runtime/src/apis.rs b/templates/parachain/runtime/src/apis.rs index 05a508ca655fb..d7da43b86af16 100644 --- a/templates/parachain/runtime/src/apis.rs +++ b/templates/parachain/runtime/src/apis.rs @@ -114,6 +114,12 @@ impl_runtime_apis! { } } + impl frame_support::view_functions::runtime_api::RuntimeViewFunction for Runtime { + fn execute_view_function(id: frame_support::view_functions::ViewFunctionId, input: Vec) -> Result, frame_support::view_functions::ViewFunctionDispatchError> { + Runtime::execute_view_function(id, input) + } + } + impl sp_block_builder::BlockBuilder for Runtime { fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { Executive::apply_extrinsic(extrinsic) diff --git a/templates/parachain/runtime/src/lib.rs b/templates/parachain/runtime/src/lib.rs index 0be27ecce7394..f312e9f80192f 100644 --- a/templates/parachain/runtime/src/lib.rs +++ b/templates/parachain/runtime/src/lib.rs @@ -262,7 +262,8 @@ mod runtime { RuntimeHoldReason, RuntimeSlashReason, RuntimeLockId, - RuntimeTask + RuntimeTask, + RuntimeViewFunction )] pub struct Runtime; diff --git a/templates/solochain/pallets/template/src/mock.rs b/templates/solochain/pallets/template/src/mock.rs index 1b86cd9b7709a..44085bc3bff18 100644 --- a/templates/solochain/pallets/template/src/mock.rs +++ b/templates/solochain/pallets/template/src/mock.rs @@ -18,7 +18,8 @@ mod runtime { RuntimeHoldReason, RuntimeSlashReason, RuntimeLockId, - RuntimeTask + RuntimeTask, + RuntimeViewFunction )] pub struct Test; diff --git a/templates/solochain/runtime/src/apis.rs b/templates/solochain/runtime/src/apis.rs index 06c645fa0c539..9dc588c43a2d5 100644 --- a/templates/solochain/runtime/src/apis.rs +++ b/templates/solochain/runtime/src/apis.rs @@ -75,6 +75,12 @@ impl_runtime_apis! { } } + impl frame_support::view_functions::runtime_api::RuntimeViewFunction for Runtime { + fn execute_view_function(id: frame_support::view_functions::ViewFunctionId, input: Vec) -> Result, frame_support::view_functions::ViewFunctionDispatchError> { + Runtime::execute_view_function(id, input) + } + } + impl sp_block_builder::BlockBuilder for Runtime { fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { Executive::apply_extrinsic(extrinsic) diff --git a/templates/solochain/runtime/src/lib.rs b/templates/solochain/runtime/src/lib.rs index 6a2149ec8b637..f25b8413721ea 100644 --- a/templates/solochain/runtime/src/lib.rs +++ b/templates/solochain/runtime/src/lib.rs @@ -196,7 +196,8 @@ mod runtime { RuntimeHoldReason, RuntimeSlashReason, RuntimeLockId, - RuntimeTask + RuntimeTask, + RuntimeViewFunction )] pub struct Runtime;