Skip to content

Commit

Permalink
bench all weekly - and fix for pallet_multisig lib (#6789)
Browse files Browse the repository at this point in the history
Closes #6196 
Closes #7204

Example of PR: #6816

Every sunday 01:00 AM it's going to start to benchmark (with /cmd bench)
all runtimes and all pallets
Then diff total will be pushed to a branch and PR open,. I assume
review-bot is going assign required reviewers per changed files

I afraid each weeks will be too much to review & merge, but we can
adjust later

Bonus: fix for pallet_multisig lib and
substrate/.maintain/frame-weight-template.hbs , which didn't let to
compile new weights

---------

Signed-off-by: Oliver Tale-Yazdi <[email protected]>
Co-authored-by: command-bot <>
Co-authored-by: cmd[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Oliver Tale-Yazdi <[email protected]>
  • Loading branch information
3 people authored Jan 24, 2025
1 parent e9393a9 commit f845a9f
Show file tree
Hide file tree
Showing 15 changed files with 949 additions and 751 deletions.
165 changes: 165 additions & 0 deletions .github/workflows/bench-all-runtimes.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,165 @@
name: Bench all runtimes

on:
# schedule:
# - cron: '0 1 * * 0' # weekly on Sunday night 01:00 UTC
workflow_dispatch:
# pull_request:

permissions: # allow the action to create a PR
contents: write
issues: write
pull-requests: write
actions: read

jobs:
preflight:
uses: ./.github/workflows/reusable-preflight.yml

runtime-matrix:
runs-on: ubuntu-latest
needs: [preflight]
timeout-minutes: 30
outputs:
runtime: ${{ steps.runtime.outputs.runtime }}
container:
image: ${{ needs.preflight.outputs.IMAGE }}
name: Extract runtimes from matrix
steps:
- uses: actions/checkout@v4
- id: runtime
run: |
RUNTIMES=$(jq '[.[] | select(.package != null)]' .github/workflows/runtimes-matrix.json)
RUNTIMES=$(echo $RUNTIMES | jq -c .)
echo "runtime=$RUNTIMES"
echo "runtime=$RUNTIMES" >> $GITHUB_OUTPUT
run-frame-omni-bencher:
needs: [preflight, runtime-matrix]
runs-on: ${{ needs.preflight.outputs.RUNNER_WEIGHTS }}
# 24 hours per runtime.
# Max it takes 14hr for westend to recalculate, but due to limited runners,
# sometimes it can take longer.
timeout-minutes: 1440
strategy:
fail-fast: false # keep running other workflows even if one fails, to see the logs of all possible failures
matrix:
runtime: ${{ fromJSON(needs.runtime-matrix.outputs.runtime) }}
container:
image: ${{ needs.preflight.outputs.IMAGE }}
env:
PACKAGE_NAME: ${{ matrix.runtime.package }}
FLAGS: ${{ matrix.runtime.bench_flags }}
RUST_LOG: "frame_omni_bencher=info,polkadot_sdk_frame=info"
steps:

- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
ref: master

- name: script
id: required
run: |
# Fixes "detected dubious ownership" error in the ci
git config --global --add safe.directory $GITHUB_WORKSPACE
git remote -v
python3 -m pip install -r .github/scripts/generate-prdoc.requirements.txt
python3 .github/scripts/cmd/cmd.py bench --runtime ${{ matrix.runtime.name }}
git add .
git status
if [ -f /tmp/cmd/command_output.log ]; then
CMD_OUTPUT=$(cat /tmp/cmd/command_output.log)
# export to summary to display in the PR
echo "$CMD_OUTPUT" >> $GITHUB_STEP_SUMMARY
# should be multiline, otherwise it captures the first line only
echo 'cmd_output<<EOF' >> $GITHUB_OUTPUT
echo "$CMD_OUTPUT" >> $GITHUB_OUTPUT
echo 'EOF' >> $GITHUB_OUTPUT
fi
# Create patch that includes both modifications and new files
git add -A
git diff --staged > diff-${{ matrix.runtime.name }}.patch -U0
git reset
- name: Upload diff
uses: actions/upload-artifact@v4
with:
name: diff-${{ matrix.runtime.name }}
path: diff-${{ matrix.runtime.name }}.patch

apply-diff-commit:
runs-on: ubuntu-latest
needs: [run-frame-omni-bencher]
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
ref: master

- name: Download all artifacts
uses: actions/download-artifact@v4
with:
path: patches

- name: Install subweight
run: cargo install subweight

# needs to be able to trigger CI
- uses: actions/create-github-app-token@v1
id: generate_token
with:
app-id: ${{ secrets.CMD_BOT_APP_ID }}
private-key: ${{ secrets.CMD_BOT_APP_KEY }}

- name: Apply diff and create PR
env:
GH_TOKEN: ${{ steps.generate_token.outputs.token }}
run: |
DATE=$(date +'%Y-%m-%d-%s')
BRANCH="update-weights-weekly-$DATE"
git config user.name "github-actions[bot]"
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
git switch -c "$BRANCH"
for file in patches/diff-*/diff-*.patch; do
if [ -f "$file" ] && [ -s "$file" ]; then
echo "Applying $file"
git apply "$file" --unidiff-zero --allow-empty || echo "Failed to apply $file"
else
echo "Skipping empty or non-existent patch file: $file"
fi
done
rm -rf patches
git add .
git commit -m "Update all weights weekly for $DATE"
git push --set-upstream origin "$BRANCH"
PR_TITLE="Auto-update of all weights for $DATE"
gh pr create \
--title "$PR_TITLE" \
--head "$BRANCH" \
--base "master" \
--reviewer paritytech/ci \
--reviewer paritytech/release-engineering \
--draft \
--label "R0-silent" \
--body "$PR_TITLE"
subweight compare commits \
--path-pattern "./**/weights/**/*.rs,./**/weights.rs" \
--method asymptotic \
--format markdown \
--no-color \
--change added changed \
--ignore-errors \
--threshold 2 \
origin/master $BRANCH
Original file line number Diff line number Diff line change
Expand Up @@ -16,28 +16,28 @@

//! Autogenerated weights for `pallet_multisig`
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
//! DATE: 2025-01-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024
//! HOSTNAME: `e20fc9f125eb`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024
// Executed Command:
// ./target/production/polkadot-parachain
// target/production/polkadot-parachain
// benchmark
// pallet
// --extrinsic=*
// --chain=asset-hub-rococo-dev
// --wasm-execution=compiled
// --pallet=pallet_multisig
// --no-storage-info
// --no-median-slopes
// --no-min-squares
// --extrinsic=*
// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt
// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights
// --wasm-execution=compiled
// --steps=50
// --repeat=20
// --json
// --header=./file_header.txt
// --output=./parachains/runtimes/assets/asset-hub-rococo/src/weights/
// --heap-pages=4096
// --no-storage-info
// --no-min-squares
// --no-median-slopes

#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
Expand All @@ -55,27 +55,27 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 13_714_000 picoseconds.
Weight::from_parts(14_440_231, 0)
// Minimum execution time: 16_059_000 picoseconds.
Weight::from_parts(17_033_878, 0)
.saturating_add(Weight::from_parts(0, 0))
// Standard Error: 5
.saturating_add(Weight::from_parts(598, 0).saturating_mul(z.into()))
// Standard Error: 8
.saturating_add(Weight::from_parts(489, 0).saturating_mul(z.into()))
}
/// Storage: `Multisig::Multisigs` (r:1 w:1)
/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
/// The range of component `s` is `[2, 100]`.
/// The range of component `z` is `[0, 10000]`.
fn as_multi_create(s: u32, z: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `262 + s * (2 ±0)`
// Measured: `295 + s * (2 ±0)`
// Estimated: `6811`
// Minimum execution time: 44_768_000 picoseconds.
Weight::from_parts(33_662_218, 0)
// Minimum execution time: 46_128_000 picoseconds.
Weight::from_parts(33_704_180, 0)
.saturating_add(Weight::from_parts(0, 6811))
// Standard Error: 1_633
.saturating_add(Weight::from_parts(128_927, 0).saturating_mul(s.into()))
// Standard Error: 16
.saturating_add(Weight::from_parts(1_543, 0).saturating_mul(z.into()))
// Standard Error: 1_456
.saturating_add(Weight::from_parts(147_148, 0).saturating_mul(s.into()))
// Standard Error: 14
.saturating_add(Weight::from_parts(2_037, 0).saturating_mul(z.into()))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
}
Expand All @@ -85,15 +85,15 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
/// The range of component `z` is `[0, 10000]`.
fn as_multi_approve(s: u32, z: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `282`
// Measured: `315`
// Estimated: `6811`
// Minimum execution time: 29_745_000 picoseconds.
Weight::from_parts(20_559_891, 0)
// Minimum execution time: 32_218_000 picoseconds.
Weight::from_parts(21_320_145, 0)
.saturating_add(Weight::from_parts(0, 6811))
// Standard Error: 914
.saturating_add(Weight::from_parts(103_601, 0).saturating_mul(s.into()))
// Standard Error: 8
.saturating_add(Weight::from_parts(1_504, 0).saturating_mul(z.into()))
// Standard Error: 1_922
.saturating_add(Weight::from_parts(131_349, 0).saturating_mul(s.into()))
// Standard Error: 18
.saturating_add(Weight::from_parts(1_829, 0).saturating_mul(z.into()))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
}
Expand All @@ -105,60 +105,63 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
/// The range of component `z` is `[0, 10000]`.
fn as_multi_complete(s: u32, z: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `385 + s * (33 ±0)`
// Measured: `418 + s * (33 ±0)`
// Estimated: `6811`
// Minimum execution time: 51_506_000 picoseconds.
Weight::from_parts(36_510_777, 0)
// Minimum execution time: 53_641_000 picoseconds.
Weight::from_parts(32_057_363, 0)
.saturating_add(Weight::from_parts(0, 6811))
// Standard Error: 2_183
.saturating_add(Weight::from_parts(183_764, 0).saturating_mul(s.into()))
// Standard Error: 21
.saturating_add(Weight::from_parts(1_653, 0).saturating_mul(z.into()))
// Standard Error: 2_897
.saturating_add(Weight::from_parts(254_035, 0).saturating_mul(s.into()))
// Standard Error: 28
.saturating_add(Weight::from_parts(2_432, 0).saturating_mul(z.into()))
.saturating_add(T::DbWeight::get().reads(2))
.saturating_add(T::DbWeight::get().writes(2))
}
/// Storage: `Multisig::Multisigs` (r:1 w:1)
/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
/// The range of component `s` is `[2, 100]`.
/// The range of component `z` is `[0, 10000]`.
fn approve_as_multi_create(s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `263 + s * (2 ±0)`
// Measured: `295 + s * (2 ±0)`
// Estimated: `6811`
// Minimum execution time: 31_072_000 picoseconds.
Weight::from_parts(32_408_621, 0)
// Minimum execution time: 30_302_000 picoseconds.
Weight::from_parts(33_367_363, 0)
.saturating_add(Weight::from_parts(0, 6811))
// Standard Error: 913
.saturating_add(Weight::from_parts(121_410, 0).saturating_mul(s.into()))
// Standard Error: 1_389
.saturating_add(Weight::from_parts(150_845, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
}
/// Storage: `Multisig::Multisigs` (r:1 w:1)
/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
/// The range of component `s` is `[2, 100]`.
/// The range of component `z` is `[0, 10000]`.
fn approve_as_multi_approve(s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `282`
// Measured: `315`
// Estimated: `6811`
// Minimum execution time: 18_301_000 picoseconds.
Weight::from_parts(18_223_547, 0)
// Minimum execution time: 17_008_000 picoseconds.
Weight::from_parts(18_452_875, 0)
.saturating_add(Weight::from_parts(0, 6811))
// Standard Error: 747
.saturating_add(Weight::from_parts(114_584, 0).saturating_mul(s.into()))
// Standard Error: 949
.saturating_add(Weight::from_parts(130_051, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
}
/// Storage: `Multisig::Multisigs` (r:1 w:1)
/// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`)
/// The range of component `s` is `[2, 100]`.
/// The range of component `z` is `[0, 10000]`.
fn cancel_as_multi(s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `454 + s * (1 ±0)`
// Measured: `482 + s * (1 ±0)`
// Estimated: `6811`
// Minimum execution time: 32_107_000 picoseconds.
Weight::from_parts(33_674_827, 0)
// Minimum execution time: 30_645_000 picoseconds.
Weight::from_parts(33_864_517, 0)
.saturating_add(Weight::from_parts(0, 6811))
// Standard Error: 1_220
.saturating_add(Weight::from_parts(122_011, 0).saturating_mul(s.into()))
// Standard Error: 1_511
.saturating_add(Weight::from_parts(138_628, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
}
Expand Down
Loading

0 comments on commit f845a9f

Please sign in to comment.