diff --git a/.github/actions/diffs/action.yml b/.github/actions/diffs/action.yml index 37f2ae16d5bfb..8a97000610847 100644 --- a/.github/actions/diffs/action.yml +++ b/.github/actions/diffs/action.yml @@ -22,6 +22,9 @@ outputs: isMoveAutoFormatter: description: True when changes happened in MoveAutoFormatter code value: "${{ steps.diff.outputs.isMoveAutoFormatter }}" + isMoveAnalyzerTraceAdapter: + description: True when changes happened in Trace Adapter + value: "${{ steps.diff.outputs.isMoveAnalyzerTraceAdapter }}" isExamples: description: True when changes happened in examples/ directory value: "${{ steps.diff.outputs.isExamples }}" @@ -79,5 +82,7 @@ runs: - 'sui-execution/**' isMoveAutoFormatter: - 'external-crates/move/crates/move-analyzer/prettier-plugin/**' + isMoveAnalyzerTraceAdapter: + - 'external-crates/move/crates/move-analyzer/trace-adapter/**' isExamples: - 'examples/**' diff --git a/.github/workflows/ide-tests.yml b/.github/workflows/ide-tests.yml new file mode 100644 index 0000000000000..ecb0e2a7435e8 --- /dev/null +++ b/.github/workflows/ide-tests.yml @@ -0,0 +1,91 @@ +name: IDE Tests + +on: + push: + branches: main + pull_request: + types: [ opened, synchronize, reopened, ready_for_review ] + workflow_dispatch: + inputs: + sui_repo_ref: + description: "Branch / commit to test" + type: string + required: false + default: '' + +jobs: + diff: + runs-on: [ubuntu-latest] + outputs: + isMoveAutoFormatter: ${{ steps.diff.outputs.isMoveAutoFormatter }} + isMoveAnalyzerTraceAdapter: ${{ steps.diff.outputs.isMoveAnalyzerTraceAdapter }} + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # Pin v4.1.1 + - name: Detect Changes + uses: './.github/actions/diffs' + id: diff + + move-auto-formatter-ci-test: + name: Move Auto-formatter Test + needs: diff + if: needs.diff.outputs.isMoveAutoFormatter == 'true' + runs-on: [ ubuntu-latest ] + + steps: + - name: Checkout + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # Pin v4.1.1 + with: + ref: ${{ github.event.inputs.sui_repo_ref || github.ref }} + + - name: pnpm setup + uses: pnpm/action-setup@a3252b78c470c02df07e9d59298aecedc3ccdd6d # pin@v3.0.0 + with: + version: 9.1.1 + + - name: Setup Node + uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # pin@v4.0.2 + with: + node-version: '20' + cache: 'pnpm' + + - name: Install dependencies + working-directory: ./external-crates/move/crates/move-analyzer/prettier-plugin + run: npm install && npm i web-tree-sitter + + - name: Run npm test + working-directory: ./external-crates/move/crates/move-analyzer/prettier-plugin + shell: bash + run: npm run test + + move-analyzer-trace-adapter-ci-test: + name: Trace Adapter Test + needs: diff + if: needs.diff.outputs.isMoveAnalyzerTraceAdapter == 'true' + runs-on: [ ubuntu-latest ] + + steps: + - name: Checkout + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # Pin v4.1.1 + with: + ref: ${{ github.event.inputs.sui_repo_ref || github.ref }} + + - name: pnpm setup + uses: pnpm/action-setup@a3252b78c470c02df07e9d59298aecedc3ccdd6d # pin@v3.0.0 + with: + version: 9.1.1 + + - name: Setup Node + uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # pin@v4.0.2 + with: + node-version: '20' + cache: 'pnpm' + + - name: Install dependencies + working-directory: ./external-crates/move/crates/move-analyzer/trace-adapter + shell: bash + run: npm install + + - name: Run npm test + working-directory: ./external-crates/move/crates/move-analyzer/trace-adapter + shell: bash + run: npm run test diff --git a/.vscode/extensions.json b/.vscode/extensions.json index f29818dd52d36..79feead4aa559 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -1,6 +1,7 @@ { "recommendations": [ - "move.move-analyzer", + "mysten.move", + "damirka.move-syntax", "rust-lang.rust-analyzer", "esbenp.prettier-vscode", "ms-playwright.playwright", diff --git a/Cargo.lock b/Cargo.lock index 34011f5351fe9..bc21f489583af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -182,7 +182,7 @@ dependencies = [ "rand 0.8.5", "rcgen", "ring 0.17.8", - "rustls 0.23.15", + "rustls 0.23.16", "rustls-webpki 0.102.8", "serde", "serde_json", @@ -217,7 +217,7 @@ dependencies = [ "prettyplease 0.2.25", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -262,9 +262,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.15" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" dependencies = [ "anstyle", "anstyle-parse", @@ -277,52 +277,52 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anstyle-parse" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.4" +version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" dependencies = [ "anstyle", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.91" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c042108f3ed77fd83760a5fd79b53be043192bb3b9dba91d8c574c0ada7850c8" +checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" dependencies = [ "backtrace", ] [[package]] name = "arbitrary" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" +checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" dependencies = [ "derive_arbitrary", ] @@ -1002,7 +1002,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", - "synstructure", + "synstructure 0.12.6", ] [[package]] @@ -1016,6 +1016,16 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "assert-json-diff" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "assert_cmd" version = "2.0.16" @@ -1089,8 +1099,8 @@ checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" dependencies = [ "async-task 4.7.1", "concurrent-queue", - "fastrand", - "futures-lite", + "fastrand 2.2.0", + "futures-lite 2.5.0", "slab", ] @@ -1105,7 +1115,7 @@ dependencies = [ "async-io", "async-lock 3.4.0", "blocking", - "futures-lite", + "futures-lite 2.5.0", "once_cell", ] @@ -1180,7 +1190,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "strum 0.25.0", - "syn 2.0.85", + "syn 2.0.87", "thiserror", ] @@ -1210,15 +1220,15 @@ dependencies = [ [[package]] name = "async-io" -version = "2.3.4" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" +checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" dependencies = [ "async-lock 3.4.0", "cfg-if", "concurrent-queue", "futures-io", - "futures-lite", + "futures-lite 2.5.0", "parking", "polling", "rustix", @@ -1255,7 +1265,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1273,7 +1283,7 @@ dependencies = [ "futures-channel", "futures-core", "futures-io", - "futures-lite", + "futures-lite 2.5.0", "gloo-timers 0.3.0", "kv-log-macro", "log", @@ -1304,7 +1314,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1326,7 +1336,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1399,7 +1409,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1432,7 +1442,7 @@ dependencies = [ "aws-smithy-types", "aws-types", "bytes", - "fastrand", + "fastrand 2.2.0", "hex", "http 0.2.12", "hyper 0.14.31", @@ -1452,7 +1462,7 @@ checksum = "70a66ac8ef5fa9cf01c2d999f39d16812e90ec1467bd382cbbb74ba23ea86201" dependencies = [ "aws-smithy-async", "aws-smithy-types", - "fastrand", + "fastrand 2.2.0", "tokio", "tracing", "zeroize", @@ -1492,7 +1502,7 @@ dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", - "fastrand", + "fastrand 2.2.0", "http 0.2.12", "percent-encoding", "tracing", @@ -1517,7 +1527,7 @@ dependencies = [ "aws-smithy-types", "aws-types", "bytes", - "fastrand", + "fastrand 2.2.0", "http 0.2.12", "regex", "tokio-stream", @@ -1543,7 +1553,7 @@ dependencies = [ "aws-smithy-types", "aws-smithy-xml", "aws-types", - "fastrand", + "fastrand 2.2.0", "http 0.2.12", "regex", "tokio-stream", @@ -1695,7 +1705,7 @@ dependencies = [ "aws-smithy-http-tower", "aws-smithy-types", "bytes", - "fastrand", + "fastrand 2.2.0", "http 0.2.12", "http-body 0.4.6", "hyper 0.14.31", @@ -1789,7 +1799,7 @@ dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", "bytes", - "fastrand", + "fastrand 2.2.0", "http 0.2.12", "http-body 0.4.6", "once_cell", @@ -1988,7 +1998,7 @@ checksum = "57d123550fa8d071b7255cb0cc04dc302baa6c8c4a79f55701552684d8399bce" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -2005,7 +2015,7 @@ dependencies = [ "hyper 1.5.0", "hyper-util", "pin-project-lite", - "rustls 0.23.15", + "rustls 0.23.16", "rustls-pemfile 2.2.0", "rustls-pki-types", "tokio", @@ -2223,9 +2233,9 @@ dependencies = [ [[package]] name = "bigdecimal" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d712318a27c7150326677b321a5fa91b55f6d9034ffd67f20319e147d40cee" +checksum = "8f850665a0385e070b64c38d2354e6c104c8479c59868d1e48a0c13ee2c7a1c1" dependencies = [ "autocfg", "libm", @@ -2242,7 +2252,7 @@ checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" [[package]] name = "bin-version" -version = "1.36.1" +version = "1.37.1" dependencies = [ "const-str", "git-version", @@ -2275,7 +2285,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -2501,7 +2511,7 @@ dependencies = [ "async-channel 2.3.1", "async-task 4.7.1", "futures-io", - "futures-lite", + "futures-lite 2.5.0", "piper", ] @@ -2895,9 +2905,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.31" +version = "1.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e7962b54006dcfcc61cb72735f4d89bb97061dd6a7ed882ec6b8ee53714c6f" +checksum = "40545c26d092346d8a8dab71ee48e7685a7a9cba76e634790c215b41a4a7b4cf" dependencies = [ "jobserver", "libc", @@ -2929,6 +2939,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chacha20" version = "0.9.1" @@ -3057,7 +3073,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -3185,9 +3201,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "colored" @@ -3277,7 +3293,7 @@ dependencies = [ "quinn-proto", "rand 0.8.5", "rstest", - "rustls 0.23.15", + "rustls 0.23.16", "serde", "shared-crypto", "strum_macros 0.24.3", @@ -3294,6 +3310,7 @@ dependencies = [ "tokio-util 0.7.12", "tonic 0.12.3", "tonic-build 0.12.3", + "tonic-rustls", "tower 0.4.13", "tower-http", "tracing", @@ -3944,7 +3961,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -3968,7 +3985,7 @@ checksum = "478c02b53607e3f21c374f024c2cfc2154e554905bba478e8e09409f10ce3726" dependencies = [ "cynic-proc-macros", "ref-cast", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "static_assertions", @@ -3989,7 +4006,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "strsim 0.10.0", - "syn 2.0.85", + "syn 2.0.87", "thiserror", ] @@ -4013,7 +4030,7 @@ dependencies = [ "cynic-codegen", "darling 0.20.10", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4085,7 +4102,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "strsim 0.11.1", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4118,7 +4135,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4336,6 +4353,25 @@ dependencies = [ "walkdir", ] +[[package]] +name = "deadpool" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "421fe0f90f2ab22016f32a9881be5134fdd71c65298917084b0c7477cbc3856e" +dependencies = [ + "async-trait", + "deadpool-runtime", + "num_cpus", + "retain_mut", + "tokio", +] + +[[package]] +name = "deadpool-runtime" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "092966b41edc516079bdf31ec78a2e0588d1d0c08f78b91d8307215928642b2b" + [[package]] name = "debug-ignore" version = "1.0.5" @@ -4421,13 +4457,13 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" +checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4471,7 +4507,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "rustc_version", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4491,7 +4527,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4527,9 +4563,9 @@ dependencies = [ [[package]] name = "diesel-async" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb799bb6f8ca6a794462125d7b8983b0c86e6c93a33a9c55934a4a5de4409d3" +checksum = "4c5c6ec8d5c7b8444d19a47161797cbe361e0fb1ee40c6a8124ec915b64a4125" dependencies = [ "async-trait", "bb8", @@ -4550,7 +4586,7 @@ dependencies = [ "dsl_auto_type", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4570,7 +4606,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "209c735641a413bc68c4923a9d6ad4bcb3ca306b794edaa7eb0b3228a99ffb25" dependencies = [ - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4719,7 +4755,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4792,7 +4828,7 @@ dependencies = [ "optfield", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4824,7 +4860,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -5041,7 +5077,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -5060,7 +5096,7 @@ dependencies = [ "once_cell", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -5267,7 +5303,7 @@ dependencies = [ "reqwest 0.11.27", "serde", "serde_json", - "syn 2.0.85", + "syn 2.0.87", "toml 0.8.19", "walkdir", ] @@ -5285,7 +5321,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "serde_json", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -5311,7 +5347,7 @@ dependencies = [ "serde", "serde_json", "strum 0.26.3", - "syn 2.0.85", + "syn 2.0.87", "tempfile", "thiserror", "tiny-keccak", @@ -5547,7 +5583,7 @@ dependencies = [ [[package]] name = "fastcrypto" version = "0.1.8" -source = "git+https://github.com/MystenLabs/fastcrypto?rev=c050ffc78b93739328af5d59b05f90e0e26b1b7e#c050ffc78b93739328af5d59b05f90e0e26b1b7e" +source = "git+https://github.com/MystenLabs/fastcrypto?rev=2f502fd8570fe4e9cff36eea5bbd6fef22002898#2f502fd8570fe4e9cff36eea5bbd6fef22002898" dependencies = [ "aes", "aes-gcm", @@ -5601,7 +5637,7 @@ dependencies = [ [[package]] name = "fastcrypto-derive" version = "0.1.3" -source = "git+https://github.com/MystenLabs/fastcrypto?rev=c050ffc78b93739328af5d59b05f90e0e26b1b7e#c050ffc78b93739328af5d59b05f90e0e26b1b7e" +source = "git+https://github.com/MystenLabs/fastcrypto?rev=2f502fd8570fe4e9cff36eea5bbd6fef22002898#2f502fd8570fe4e9cff36eea5bbd6fef22002898" dependencies = [ "quote 1.0.37", "syn 1.0.109", @@ -5610,7 +5646,7 @@ dependencies = [ [[package]] name = "fastcrypto-tbls" version = "0.1.0" -source = "git+https://github.com/MystenLabs/fastcrypto?rev=c050ffc78b93739328af5d59b05f90e0e26b1b7e#c050ffc78b93739328af5d59b05f90e0e26b1b7e" +source = "git+https://github.com/MystenLabs/fastcrypto?rev=2f502fd8570fe4e9cff36eea5bbd6fef22002898#2f502fd8570fe4e9cff36eea5bbd6fef22002898" dependencies = [ "bcs", "digest 0.10.7", @@ -5629,7 +5665,7 @@ dependencies = [ [[package]] name = "fastcrypto-vdf" version = "0.1.0" -source = "git+https://github.com/MystenLabs/fastcrypto?rev=c050ffc78b93739328af5d59b05f90e0e26b1b7e#c050ffc78b93739328af5d59b05f90e0e26b1b7e" +source = "git+https://github.com/MystenLabs/fastcrypto?rev=2f502fd8570fe4e9cff36eea5bbd6fef22002898#2f502fd8570fe4e9cff36eea5bbd6fef22002898" dependencies = [ "bcs", "fastcrypto", @@ -5646,7 +5682,7 @@ dependencies = [ [[package]] name = "fastcrypto-zkp" version = "0.1.3" -source = "git+https://github.com/MystenLabs/fastcrypto?rev=c050ffc78b93739328af5d59b05f90e0e26b1b7e#c050ffc78b93739328af5d59b05f90e0e26b1b7e" +source = "git+https://github.com/MystenLabs/fastcrypto?rev=2f502fd8570fe4e9cff36eea5bbd6fef22002898#2f502fd8570fe4e9cff36eea5bbd6fef22002898" dependencies = [ "ark-bn254", "ark-ec", @@ -5667,7 +5703,7 @@ dependencies = [ "num-bigint 0.4.6", "once_cell", "regex", - "reqwest 0.12.8", + "reqwest 0.12.9", "schemars", "serde", "serde_json", @@ -5676,9 +5712,18 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.1.1" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", +] + +[[package]] +name = "fastrand" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" [[package]] name = "fd-lock" @@ -6002,11 +6047,26 @@ checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" -version = "2.3.0" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +dependencies = [ + "fastrand 1.9.0", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + +[[package]] +name = "futures-lite" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +checksum = "cef40d21ae2c515b51041df9ed313ed21e572df340ea58a922a0aefe7e8891a1" dependencies = [ - "fastrand", + "fastrand 2.2.0", "futures-core", "futures-io", "parking", @@ -6031,7 +6091,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -6108,7 +6168,7 @@ dependencies = [ "hyper 0.14.31", "hyper-rustls 0.25.0", "log", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "thiserror", @@ -6119,6 +6179,33 @@ dependencies = [ "yup-oauth2", ] +[[package]] +name = "gcp_auth" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbf67f30198e045a039264c01fb44659ce82402d7771c50938beb41a5ac87733" +dependencies = [ + "async-trait", + "base64 0.22.1", + "bytes", + "chrono", + "home", + "http 1.1.0", + "http-body-util", + "hyper 1.5.0", + "hyper-rustls 0.27.3", + "hyper-util", + "ring 0.17.8", + "rustls-pemfile 2.2.0", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", + "tracing-futures", + "url", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -6193,7 +6280,7 @@ checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -6456,9 +6543,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" +checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" dependencies = [ "allocator-api2", "equivalent", @@ -6671,6 +6758,27 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08a397c49fec283e3d6211adbe480be95aae5f304cfb923e9970e08956d5168a" +[[package]] +name = "http-types" +version = "2.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e9b187a72d63adbfba487f48095306ac823049cb504ee195541e91c7775f5ad" +dependencies = [ + "anyhow", + "async-channel 1.9.0", + "base64 0.13.1", + "futures-lite 1.13.0", + "http 0.2.12", + "infer", + "pin-project-lite", + "rand 0.7.3", + "serde", + "serde_json", + "serde_qs", + "serde_urlencoded", + "url", +] + [[package]] name = "httparse" version = "1.9.5" @@ -6793,7 +6901,7 @@ dependencies = [ "http 1.1.0", "hyper 1.5.0", "hyper-util", - "rustls 0.23.15", + "rustls 0.23.16", "rustls-native-certs 0.8.0", "rustls-pki-types", "tokio", @@ -6816,9 +6924,9 @@ dependencies = [ [[package]] name = "hyper-timeout" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ "hyper 1.5.0", "hyper-util", @@ -6829,9 +6937,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", @@ -6882,6 +6990,124 @@ dependencies = [ "cc", ] +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.87", +] + [[package]] name = "id-arena" version = "2.2.1" @@ -6904,6 +7130,27 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + [[package]] name = "if_chain" version = "1.0.2" @@ -7024,7 +7271,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.15.0", + "hashbrown 0.15.1", "serde", ] @@ -7041,6 +7288,12 @@ dependencies = [ "unicode-width", ] +[[package]] +name = "infer" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64e9829a50b42bb782c1df523f78d332fe371b10c661e78b7a3c34b0198e9fac" + [[package]] name = "inotify" version = "0.9.6" @@ -7089,9 +7342,9 @@ dependencies = [ [[package]] name = "insta" -version = "1.40.0" +version = "1.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6593a41c7a73841868772495db7dc1e8ecab43bb5c0b6da2059246c4b506ab60" +checksum = "7e9ffc4d4892617c50a928c52b2961cb5174b6fc6ebf252b2fac9d21955c48b8" dependencies = [ "console", "lazy_static", @@ -7122,9 +7375,9 @@ checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" [[package]] name = "io-extras" -version = "0.18.2" +version = "0.18.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9f046b9af244f13b3bd939f55d16830ac3a201e8a9ba9661bfcb03e2be72b9b" +checksum = "7d45fd7584f9b67ac37bc041212d06bfac0700b36456b05890d36a3b626260eb" dependencies = [ "io-lifetimes", "windows-sys 0.52.0", @@ -7677,9 +7930,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.161" +version = "0.2.162" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" +checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" [[package]] name = "libloading" @@ -7693,9 +7946,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libquickjs-sys" @@ -7779,6 +8032,12 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +[[package]] +name = "litemap" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" + [[package]] name = "lock_api" version = "0.4.12" @@ -7820,7 +8079,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "regex-syntax 0.8.5", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -7856,7 +8115,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.0", + "hashbrown 0.15.1", ] [[package]] @@ -8160,7 +8419,7 @@ checksum = "dcf09caffaac8068c346b6df2a7fc27a177fd20b39421a39ce0a211bde679a6c" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -8507,7 +8766,6 @@ dependencies = [ "hex", "move-binary-format", "move-core-types", - "num-bigint 0.4.6", "once_cell", "serde", "sha2 0.9.9", @@ -8748,7 +9006,7 @@ version = "0.1.0" dependencies = [ "enum-compat-util", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -9177,7 +9435,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "syn 1.0.109", - "synstructure", + "synstructure 0.12.6", ] [[package]] @@ -9202,7 +9460,7 @@ dependencies = [ "mysten-metrics", "parking_lot 0.12.3", "prometheus", - "reqwest 0.12.8", + "reqwest 0.12.9", "snap", "sui-tls", "sui-types", @@ -9235,11 +9493,13 @@ name = "mysten-network" version = "0.2.0" dependencies = [ "anemo", + "async-stream", "bcs", "bytes", "eyre", "futures", "http 1.1.0", + "hyper-rustls 0.27.3", "hyper-util", "multiaddr", "once_cell", @@ -9247,6 +9507,7 @@ dependencies = [ "serde", "snap", "tokio", + "tokio-rustls 0.26.0", "tokio-stream", "tonic 0.12.3", "tonic-health", @@ -9296,7 +9557,7 @@ version = "0.1.0" dependencies = [ "proc-macro2 1.0.89", "syn 1.0.109", - "synstructure", + "synstructure 0.12.6", ] [[package]] @@ -9445,7 +9706,7 @@ dependencies = [ "pretty_assertions", "prometheus", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde-reflection", "serde_yaml 0.8.26", "sui-keys", @@ -9499,7 +9760,7 @@ dependencies = [ "prometheus", "proptest", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.9", "sui-macros", "sui-protocol-config", "tap", @@ -9639,7 +9900,7 @@ dependencies = [ "narwhal-types", "prometheus", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.9", "sui-protocol-config", "tap", "telemetry-subscribers", @@ -10064,7 +10325,7 @@ dependencies = [ "proc-macro-crate 1.1.3", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -10076,7 +10337,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -10145,7 +10406,7 @@ dependencies = [ "percent-encoding", "quick-xml", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.9", "ring 0.17.8", "rustls-pemfile 2.2.0", "serde", @@ -10344,7 +10605,7 @@ checksum = "fa59f025cde9c698fcb4fcb3533db4621795374065bee908215263488f2d2a1d" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -10394,7 +10655,7 @@ dependencies = [ "proc-macro-error", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -10408,7 +10669,7 @@ dependencies = [ "proc-macro2 1.0.89", "proc-macro2-diagnostics", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -10670,7 +10931,7 @@ checksum = "f14d42b14749cc7927add34a9932b3b3cc5349a633384850baa67183061439dd" dependencies = [ "ciborium", "coset", - "idna", + "idna 0.5.0", "passkey-authenticator", "passkey-types", "public-suffix", @@ -10850,7 +11111,7 @@ dependencies = [ "pest_meta", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -10934,7 +11195,7 @@ dependencies = [ "phf_shared 0.11.2", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -10972,7 +11233,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -10994,7 +11255,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" dependencies = [ "atomic-waker", - "fastrand", + "fastrand 2.2.0", "futures-io", ] @@ -11077,9 +11338,9 @@ dependencies = [ [[package]] name = "polling" -version = "3.7.3" +version = "3.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2790cd301dec6cd3b7a025e4815cf825724a51c98dccfe6a3e55f05ffb6511" +checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" dependencies = [ "cfg-if", "concurrent-queue", @@ -11257,7 +11518,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2 1.0.89", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -11378,7 +11639,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", "version_check", "yansi 1.0.1", ] @@ -11415,7 +11676,7 @@ checksum = "0fcebfa99f03ae51220778316b37d24981e36322c82c24848f48c5bd0f64cbdb" dependencies = [ "enum-as-inner", "mime", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "time", "url", @@ -11532,7 +11793,7 @@ dependencies = [ "prost 0.13.3", "prost-types 0.13.3", "regex", - "syn 2.0.85", + "syn 2.0.87", "tempfile", ] @@ -11559,7 +11820,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -11572,7 +11833,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -11679,7 +11940,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.15", + "rustls 0.23.16", "socket2 0.5.7", "thiserror", "tokio", @@ -11696,7 +11957,7 @@ dependencies = [ "rand 0.8.5", "ring 0.17.8", "rustc-hash 2.0.0", - "rustls 0.23.15", + "rustls 0.23.16", "slab", "thiserror", "tinyvec", @@ -11705,10 +11966,11 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" +checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" dependencies = [ + "cfg_aliases", "libc", "once_cell", "socket2 0.5.7", @@ -11895,7 +12157,7 @@ checksum = "a25d631e41bfb5fdcde1d4e2215f62f7f0afa3ff11e26563765bd6ea1d229aeb" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -11962,7 +12224,7 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -11980,9 +12242,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", @@ -12071,9 +12333,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.8" +version = "0.12.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" +checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" dependencies = [ "async-compression", "base64 0.22.1", @@ -12096,7 +12358,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.15", + "rustls 0.23.16", "rustls-native-certs 0.8.0", "rustls-pemfile 2.2.0", "rustls-pki-types", @@ -12126,7 +12388,7 @@ dependencies = [ "anyhow", "async-trait", "http 1.1.0", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "thiserror", "tower-service", @@ -12146,7 +12408,7 @@ dependencies = [ "http 1.1.0", "hyper 1.5.0", "parking_lot 0.11.2", - "reqwest 0.12.8", + "reqwest 0.12.9", "reqwest-middleware", "retry-policies", "tokio", @@ -12154,6 +12416,12 @@ dependencies = [ "wasm-timer", ] +[[package]] +name = "retain_mut" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4389f1d5789befaf6029ebd9f7dac4af7f7e3d61b69d4f30e2ac02b57e7712b0" + [[package]] name = "retry-policies" version = "0.3.0" @@ -12600,9 +12868,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.37" +version = "0.38.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +checksum = "375116bee2be9ed569afe2154ea6a99dfdffd257f533f187498c2a8f5feaf4ee" dependencies = [ "bitflags 2.6.0", "errno", @@ -12653,9 +12921,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.15" +version = "0.23.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fbb44d7acc4e873d613422379f69f237a1b141928c02f6bc6ccfddddc2d7993" +checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" dependencies = [ "log", "once_cell", @@ -12847,7 +13115,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -12881,7 +13149,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "serde_derive_internals", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -13027,9 +13295,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" -version = "1.0.213" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ea7893ff5e2466df8d720bb615088341b295f849602c6956047f8f80f0e9bc1" +checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" dependencies = [ "serde_derive", ] @@ -13076,13 +13344,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.213" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e85ad2009c50b58e87caa8cd6dac16bdf511bbfb7af6c33df902396aa480fa5" +checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -13093,7 +13361,7 @@ checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -13119,6 +13387,17 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_qs" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7715380eec75f029a4ef7de39a9200e0a63823176b759d055b613f5a87df6a6" +dependencies = [ + "percent-encoding", + "serde", + "thiserror", +] + [[package]] name = "serde_repr" version = "0.1.19" @@ -13127,7 +13406,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -13203,7 +13482,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -13215,7 +13494,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -13594,7 +13873,7 @@ dependencies = [ "log", "object_store 0.10.2", "regex", - "reqwest 0.12.8", + "reqwest 0.12.9", "reqwest-middleware", "reqwest-retry", "serde", @@ -13882,7 +14161,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "rustversion", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -13895,7 +14174,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "rustversion", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -13931,7 +14210,7 @@ checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" [[package]] name = "sui" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anemo", "anyhow", @@ -13944,6 +14223,7 @@ dependencies = [ "bip32 0.4.0", "camino", "clap", + "codespan-reporting", "colored", "csv", "datatest-stable", @@ -13961,9 +14241,12 @@ dependencies = [ "miette", "move-analyzer", "move-binary-format", + "move-bytecode-source-map", "move-bytecode-verifier-meter", "move-command-line-common", + "move-compiler", "move-core-types", + "move-ir-types", "move-package", "move-vm-config", "move-vm-profiler", @@ -13972,7 +14255,7 @@ dependencies = [ "prometheus", "rand 0.8.5", "regex", - "reqwest 0.12.8", + "reqwest 0.12.9", "rusoto_core", "rusoto_kms", "rustyline", @@ -14145,7 +14428,7 @@ dependencies = [ [[package]] name = "sui-analytics-indexer" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "arrow 52.2.0", @@ -14197,7 +14480,7 @@ dependencies = [ [[package]] name = "sui-analytics-indexer-derive" -version = "1.36.1" +version = "1.37.1" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", @@ -14206,7 +14489,7 @@ dependencies = [ [[package]] name = "sui-archival" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "byteorder", @@ -14266,7 +14549,7 @@ dependencies = [ "narwhal-config", "prettytable-rs", "prometheus-parse", - "reqwest 0.12.8", + "reqwest 0.12.9", "russh", "russh-keys", "serde", @@ -14331,7 +14614,7 @@ dependencies = [ [[package]] name = "sui-bridge" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "arc-swap", @@ -14356,7 +14639,7 @@ dependencies = [ "once_cell", "prometheus", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "serde_with 3.11.0", @@ -14381,7 +14664,7 @@ dependencies = [ [[package]] name = "sui-bridge-cli" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "clap", @@ -14389,7 +14672,7 @@ dependencies = [ "fastcrypto", "futures", "move-core-types", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "serde_with 3.11.0", @@ -14417,6 +14700,7 @@ dependencies = [ "clap", "diesel", "diesel-async", + "diesel_migrations", "ethers", "futures", "hex-literal 0.3.4", @@ -14428,9 +14712,9 @@ dependencies = [ "serde_yaml 0.8.26", "strum_macros 0.24.3", "sui-bridge", - "sui-bridge-watchdog", "sui-config", "sui-data-ingestion-core", + "sui-indexer", "sui-indexer-builder", "sui-json-rpc-types", "sui-sdk", @@ -14444,24 +14728,9 @@ dependencies = [ "tracing", ] -[[package]] -name = "sui-bridge-watchdog" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "ethers", - "futures", - "mysten-metrics", - "prometheus", - "sui-bridge", - "tokio", - "tracing", -] - [[package]] name = "sui-cluster-test" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "async-trait", @@ -14473,7 +14742,7 @@ dependencies = [ "move-core-types", "prometheus", "regex", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde_json", "shared-crypto", "sui-config", @@ -14517,7 +14786,7 @@ dependencies = [ "once_cell", "prometheus", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_with 3.11.0", "serde_yaml 0.8.26", @@ -14586,7 +14855,7 @@ dependencies = [ "prometheus", "rand 0.8.5", "rayon", - "reqwest 0.12.8", + "reqwest 0.12.9", "roaring", "rstest", "scopeguard", @@ -14614,6 +14883,7 @@ dependencies = [ "sui-storage", "sui-swarm-config", "sui-test-transaction-builder", + "sui-tls", "sui-transaction-checks", "sui-types", "tap", @@ -14654,7 +14924,7 @@ dependencies = [ [[package]] name = "sui-data-ingestion" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "async-trait", @@ -14677,6 +14947,7 @@ dependencies = [ "serde_yaml 0.8.26", "sui-archival", "sui-data-ingestion-core", + "sui-kvstore", "sui-storage", "sui-types", "telemetry-subscribers", @@ -14750,7 +15021,7 @@ dependencies = [ [[package]] name = "sui-e2e-tests" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "assert_cmd", @@ -14775,12 +15046,13 @@ dependencies = [ "passkey-client", "passkey-types", "prometheus", + "prost 0.13.3", "rand 0.8.5", + "reqwest 0.12.9", "serde", "serde_json", "shared-crypto", "sui", - "sui-bridge", "sui-config", "sui-core", "sui-framework", @@ -14868,12 +15140,13 @@ dependencies = [ [[package]] name = "sui-faucet" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "async-recursion", "async-trait", "axum 0.7.7", + "bin-version", "clap", "eyre", "futures", @@ -14906,16 +15179,36 @@ dependencies = [ ] [[package]] -name = "sui-framework" -version = "0.1.0" +name = "sui-field-count" +version = "1.37.1" dependencies = [ - "anyhow", - "bcs", - "move-binary-format", - "move-compiler", - "move-core-types", - "move-package", - "once_cell", + "sui-field-count-derive", + "sui-field-count-main", +] + +[[package]] +name = "sui-field-count-derive" +version = "1.37.1" +dependencies = [ + "quote 1.0.37", + "syn 1.0.109", +] + +[[package]] +name = "sui-field-count-main" +version = "1.37.1" + +[[package]] +name = "sui-framework" +version = "0.1.0" +dependencies = [ + "anyhow", + "bcs", + "move-binary-format", + "move-compiler", + "move-core-types", + "move-package", + "once_cell", "regex", "serde", "sui-config", @@ -14927,7 +15220,7 @@ dependencies = [ [[package]] name = "sui-framework-snapshot" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "bcs", @@ -14991,7 +15284,7 @@ dependencies = [ [[package]] name = "sui-graphql-config" -version = "1.36.1" +version = "1.37.1" dependencies = [ "quote 1.0.37", "syn 1.0.109", @@ -15011,7 +15304,7 @@ dependencies = [ [[package]] name = "sui-graphql-rpc" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "async-graphql", @@ -15051,7 +15344,7 @@ dependencies = [ "prometheus", "rand 0.8.5", "regex", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "serde_with 3.11.0", @@ -15095,7 +15388,7 @@ dependencies = [ "async-graphql", "axum 0.7.7", "hyper 1.5.0", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde_json", "sui-graphql-rpc-headers", "thiserror", @@ -15110,7 +15403,7 @@ dependencies = [ [[package]] name = "sui-indexer" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "async-trait", @@ -15167,6 +15460,8 @@ dependencies = [ "sui-sdk", "sui-snapshot", "sui-storage", + "sui-swarm-config", + "sui-synthetic-ingestion", "sui-test-transaction-builder", "sui-transaction-builder", "sui-types", @@ -15183,6 +15478,41 @@ dependencies = [ "url", ] +[[package]] +name = "sui-indexer-alt" +version = "1.37.1" +dependencies = [ + "anyhow", + "async-trait", + "axum 0.7.7", + "backoff", + "bb8", + "bcs", + "chrono", + "clap", + "diesel", + "diesel-async", + "diesel_migrations", + "futures", + "mysten-metrics", + "prometheus", + "rand 0.8.5", + "reqwest 0.12.9", + "serde", + "sui-field-count", + "sui-storage", + "sui-types", + "telemetry-subscribers", + "tempfile", + "thiserror", + "tokio", + "tokio-stream", + "tokio-util 0.7.12", + "tracing", + "url", + "wiremock", +] + [[package]] name = "sui-indexer-builder" version = "0.1.0" @@ -15310,7 +15640,7 @@ dependencies = [ "move-package", "prometheus", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.9", "sui-config", "sui-core", "sui-json", @@ -15382,9 +15712,31 @@ dependencies = [ "tiny-bip39", ] +[[package]] +name = "sui-kvstore" +version = "1.37.1" +dependencies = [ + "anyhow", + "async-trait", + "base64 0.21.7", + "bcs", + "gcp_auth", + "http 1.1.0", + "prometheus", + "prost 0.13.3", + "prost-types 0.13.3", + "serde", + "sui-data-ingestion-core", + "sui-types", + "telemetry-subscribers", + "tokio", + "tonic 0.12.3", + "tracing", +] + [[package]] name = "sui-light-client" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "async-trait", @@ -15396,7 +15748,7 @@ dependencies = [ "move-binary-format", "move-core-types", "object_store 0.10.2", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "serde_yaml 0.8.26", @@ -15422,7 +15774,7 @@ dependencies = [ [[package]] name = "sui-metric-checker" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "backoff", @@ -15432,7 +15784,7 @@ dependencies = [ "humantime", "once_cell", "prometheus-http-query", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_yaml 0.9.34+deprecated", "strum_macros 0.24.3", @@ -15443,7 +15795,7 @@ dependencies = [ [[package]] name = "sui-move" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "assert_cmd", @@ -15485,7 +15837,7 @@ dependencies = [ [[package]] name = "sui-move-build" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "datatest-stable", @@ -15509,7 +15861,7 @@ dependencies = [ [[package]] name = "sui-move-lsp" -version = "1.36.1" +version = "1.37.1" dependencies = [ "bin-version", "clap", @@ -15599,6 +15951,83 @@ dependencies = [ "tracing", ] +[[package]] +name = "sui-mvr-indexer" +version = "1.37.1" +dependencies = [ + "anyhow", + "async-trait", + "axum 0.7.7", + "backoff", + "bb8", + "bcs", + "bytes", + "cached", + "chrono", + "clap", + "criterion", + "csv", + "dashmap", + "diesel", + "diesel-async", + "diesel_migrations", + "fastcrypto", + "futures", + "hex", + "indicatif", + "itertools 0.13.0", + "jsonrpsee", + "move-binary-format", + "move-bytecode-utils", + "move-core-types", + "mysten-metrics", + "ntest", + "object_store 0.10.2", + "prometheus", + "rand 0.8.5", + "rayon", + "regex", + "serde", + "serde_json", + "serde_with 3.11.0", + "simulacrum", + "strum 0.24.1", + "strum_macros 0.24.3", + "sui-archival", + "sui-config", + "sui-core", + "sui-data-ingestion-core", + "sui-json", + "sui-json-rpc", + "sui-json-rpc-api", + "sui-json-rpc-types", + "sui-keys", + "sui-move-build", + "sui-open-rpc", + "sui-package-resolver", + "sui-protocol-config", + "sui-rest-api", + "sui-sdk", + "sui-snapshot", + "sui-storage", + "sui-swarm-config", + "sui-synthetic-ingestion", + "sui-test-transaction-builder", + "sui-transaction-builder", + "sui-types", + "tap", + "telemetry-subscribers", + "tempfile", + "test-cluster", + "thiserror", + "tokio", + "tokio-stream", + "tokio-util 0.7.12", + "toml 0.7.8", + "tracing", + "url", +] + [[package]] name = "sui-network" version = "0.0.0" @@ -15641,7 +16070,7 @@ dependencies = [ [[package]] name = "sui-node" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anemo", "anemo-tower", @@ -15665,7 +16094,7 @@ dependencies = [ "narwhal-network", "parking_lot 0.12.3", "prometheus", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "sui-archival", "sui-config", @@ -15693,7 +16122,7 @@ dependencies = [ [[package]] name = "sui-open-rpc" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "bcs", @@ -15729,7 +16158,7 @@ dependencies = [ [[package]] name = "sui-oracle" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "bcs", @@ -15741,7 +16170,7 @@ dependencies = [ "once_cell", "prometheus", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "shared-crypto", @@ -15759,7 +16188,7 @@ dependencies = [ [[package]] name = "sui-package-dump" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "bcs", @@ -15767,7 +16196,7 @@ dependencies = [ "cynic-codegen", "fastcrypto", "move-core-types", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "sui-types", @@ -15776,7 +16205,7 @@ dependencies = [ [[package]] name = "sui-package-management" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "move-core-types", @@ -15821,7 +16250,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "sui-enum-compat-util", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -15875,8 +16304,8 @@ dependencies = [ "prost-build 0.13.3", "protobuf", "rand 0.8.5", - "reqwest 0.12.8", - "rustls 0.23.15", + "reqwest 0.12.9", + "rustls 0.23.16", "rustls-pemfile 2.2.0", "serde", "serde_json", @@ -15959,8 +16388,10 @@ dependencies = [ "mysten-network", "openapiv3", "prometheus", + "prost 0.13.3", + "prost-build 0.13.3", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.9", "schemars", "serde", "serde_json", @@ -15977,7 +16408,7 @@ dependencies = [ [[package]] name = "sui-rosetta" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "async-trait", @@ -15996,7 +16427,7 @@ dependencies = [ "once_cell", "quick-js", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "shared-crypto", @@ -16022,7 +16453,7 @@ dependencies = [ [[package]] name = "sui-rpc-loadgen" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "async-trait", @@ -16051,7 +16482,7 @@ dependencies = [ [[package]] name = "sui-sdk" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "async-recursion", @@ -16067,7 +16498,7 @@ dependencies = [ "jsonrpsee", "move-core-types", "rand 0.8.5", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "serde_with 3.11.0", @@ -16107,7 +16538,7 @@ dependencies = [ [[package]] name = "sui-security-watchdog" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "arrow-array 52.2.0", @@ -16118,7 +16549,7 @@ dependencies = [ "lexical-util", "mysten-metrics", "prometheus", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "snowflake-api", @@ -16154,7 +16585,7 @@ dependencies = [ [[package]] name = "sui-single-node-benchmark" -version = "1.36.1" +version = "1.37.1" dependencies = [ "async-trait", "bcs", @@ -16217,7 +16648,7 @@ dependencies = [ [[package]] name = "sui-source-validation" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "colored", @@ -16265,7 +16696,7 @@ dependencies = [ "move-symbol-pool", "mysten-metrics", "prometheus", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "sui", "sui-json-rpc-types", @@ -16321,7 +16752,7 @@ dependencies = [ "percent-encoding", "pretty_assertions", "prometheus", - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "serde_json", "sui-config", @@ -16343,7 +16774,7 @@ dependencies = [ [[package]] name = "sui-surfer" -version = "1.36.1" +version = "1.37.1" dependencies = [ "async-trait", "bcs", @@ -16385,6 +16816,7 @@ dependencies = [ "sui-protocol-config", "sui-simulator", "sui-swarm-config", + "sui-tls", "sui-types", "tap", "telemetry-subscribers", @@ -16423,11 +16855,24 @@ dependencies = [ "tracing", ] +[[package]] +name = "sui-synthetic-ingestion" +version = "0.0.0" +dependencies = [ + "async-trait", + "simulacrum", + "sui-test-transaction-builder", + "sui-types", + "tempfile", + "tokio", + "tracing", +] + [[package]] name = "sui-telemetry" version = "0.1.0" dependencies = [ - "reqwest 0.12.8", + "reqwest 0.12.9", "serde", "sui-core", "tracing", @@ -16448,7 +16893,7 @@ dependencies = [ [[package]] name = "sui-test-validator" -version = "1.36.1" +version = "1.37.1" [[package]] name = "sui-tls" @@ -16463,8 +16908,8 @@ dependencies = [ "pkcs8 0.9.0", "rand 0.8.5", "rcgen", - "reqwest 0.12.8", - "rustls 0.23.15", + "reqwest 0.12.9", + "rustls 0.23.16", "rustls-webpki 0.102.8", "tokio", "tokio-rustls 0.26.0", @@ -16474,7 +16919,7 @@ dependencies = [ [[package]] name = "sui-tool" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anemo", "anemo-cli", @@ -16509,6 +16954,7 @@ dependencies = [ "sui-sdk", "sui-snapshot", "sui-storage", + "sui-tls", "sui-types", "telemetry-subscribers", "tempfile", @@ -16756,7 +17202,7 @@ dependencies = [ [[package]] name = "suins-indexer" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "async-trait", @@ -16776,7 +17222,7 @@ dependencies = [ "object_store 0.10.2", "prometheus", "rand 0.8.5", - "rustls 0.23.15", + "rustls 0.23.16", "serde", "serde_json", "serde_yaml 0.8.26", @@ -16796,7 +17242,7 @@ dependencies = [ [[package]] name = "suiop-cli" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "axum 0.7.7", @@ -16818,7 +17264,7 @@ dependencies = [ "prettytable-rs", "rand 0.8.5", "regex", - "reqwest 0.12.8", + "reqwest 0.12.9", "semver", "serde", "serde_json", @@ -16879,9 +17325,9 @@ dependencies = [ [[package]] name = "symbolic-common" -version = "12.12.0" +version = "12.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "366f1b4c6baf6cfefc234bbd4899535fca0b06c74443039a73f6dfb2fad88d77" +checksum = "3d4d73159efebfb389d819fd479afb2dbd57dcb3e3f4b7fcfa0e675f5a46c1cb" dependencies = [ "debugid", "memmap2", @@ -16891,9 +17337,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.12.0" +version = "12.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aba05ba5b9962ea5617baf556293720a8b2d0a282aa14ee4bf10e22efc7da8c8" +checksum = "a767859f6549c665011970874c3f541838b4835d5aaaa493d3ee383918be9f10" dependencies = [ "cpp_demangle 0.4.4", "rustc-demangle", @@ -16924,9 +17370,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.85" +version = "2.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56" +checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", @@ -16960,6 +17406,17 @@ dependencies = [ "unicode-xid 0.2.6", ] +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.87", +] + [[package]] name = "sysinfo" version = "0.27.8" @@ -17060,9 +17517,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tar" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ff6c40d3aedb5e06b57c6f669ad17ab063dd1e63d977c6a88e7f4dfa4f04020" +checksum = "c65998313f8e17d0d553d28f91a0df93e4dbbbf770279c7bc21ca0f09ea1a1f6" dependencies = [ "filetime", "libc", @@ -17118,12 +17575,12 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" dependencies = [ "cfg-if", - "fastrand", + "fastrand 2.2.0", "once_cell", "rustix", "windows-sys 0.59.0", @@ -17240,6 +17697,7 @@ dependencies = [ "sui-config", "sui-core", "sui-framework", + "sui-indexer", "sui-json-rpc", "sui-json-rpc-api", "sui-json-rpc-types", @@ -17253,7 +17711,9 @@ dependencies = [ "sui-swarm-config", "sui-test-transaction-builder", "sui-types", + "tempfile", "tokio", + "tokio-util 0.7.12", "tracing", ] @@ -17295,7 +17755,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "subprocess", - "syn 2.0.85", + "syn 2.0.87", "test-fuzz-internal", "toolchain_find", ] @@ -17327,22 +17787,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.65" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" +checksum = "02dd99dc800bbb97186339685293e1cc5d9df1f8fae2d0aecd9ff1c77efea892" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.65" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" +checksum = "a7c61ec9a6f64d2793d8a45faba21efbe3ced62a886d44c36a009b2b519b4c7e" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -17436,6 +17896,16 @@ dependencies = [ "crunchy", ] +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinytemplate" version = "1.2.1" @@ -17514,7 +17984,7 @@ checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -17524,7 +17994,7 @@ source = "git+https://github.com/mystenmark/tokio-madsim-fork.git?rev=d46208cb11 dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -17560,7 +18030,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04fb792ccd6bbcd4bba408eb8a292f70fc4a3589e5d793626f45190e6454b6ab" dependencies = [ "ring 0.17.8", - "rustls 0.23.15", + "rustls 0.23.16", "tokio", "tokio-postgres", "tokio-rustls 0.26.0", @@ -17616,7 +18086,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.15", + "rustls 0.23.16", "rustls-pki-types", "tokio", ] @@ -17867,13 +18337,15 @@ dependencies = [ "http-body 1.0.1", "http-body-util", "hyper 1.5.0", - "hyper-timeout 0.5.1", + "hyper-timeout 0.5.2", "hyper-util", "percent-encoding", "pin-project", "prost 0.13.3", + "rustls-pemfile 2.2.0", "socket2 0.5.7", "tokio", + "tokio-rustls 0.26.0", "tokio-stream", "tower 0.4.13", "tower-layer", @@ -17905,7 +18377,7 @@ dependencies = [ "prost-build 0.13.3", "prost-types 0.13.3", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -17921,6 +18393,33 @@ dependencies = [ "tonic 0.12.3", ] +[[package]] +name = "tonic-rustls" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "803689f99cfc6de9c3b27aa86bf98553754c72c53b715913f1c14dcd3c030f77" +dependencies = [ + "async-stream", + "bytes", + "h2 0.4.6", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.5.0", + "hyper-timeout 0.5.2", + "hyper-util", + "pin-project", + "socket2 0.5.7", + "tokio", + "tokio-rustls 0.26.0", + "tokio-stream", + "tonic 0.12.3", + "tower 0.5.1", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "toolchain_find" version = "0.3.0" @@ -17963,9 +18462,12 @@ checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" dependencies = [ "futures-core", "futures-util", + "indexmap 2.6.0", "pin-project-lite", + "slab", "sync_wrapper 0.1.2", "tokio", + "tokio-util 0.7.12", "tower-layer", "tower-service", "tracing", @@ -18046,7 +18548,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -18325,7 +18827,7 @@ dependencies = [ "quote 1.0.37", "regex", "regex-syntax 0.7.5", - "syn 2.0.85", + "syn 2.0.87", "zstd-sys", ] @@ -18337,9 +18839,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "typeshare" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04f17399b76c2e743d58eac0635d7686e9c00f48cd4776f00695d9882a7d3187" +checksum = "19be0f411120091e76e13e5a0186d8e2bcc3e7e244afdb70152197f1a8486ceb" dependencies = [ "chrono", "serde", @@ -18354,7 +18856,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a615d6c2764852a2e88a4f16e9ce1ea49bb776b5872956309e170d63a042a34f" dependencies = [ "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -18494,7 +18996,7 @@ dependencies = [ "flate2", "log", "once_cell", - "rustls 0.23.15", + "rustls 0.23.16", "rustls-pki-types", "url", "webpki-roots 0.26.6", @@ -18502,12 +19004,12 @@ dependencies = [ [[package]] name = "url" -version = "2.5.2" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "8d157f1b96d14500ffdc1f10ba712e780825526c03d9a49b4d0324b0d9113ada" dependencies = [ "form_urlencoded", - "idna", + "idna 1.0.3", "percent-encoding", "serde", ] @@ -18524,6 +19026,18 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" version = "0.2.2" @@ -18636,6 +19150,12 @@ dependencies = [ "libc", ] +[[package]] +name = "waker-fn" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317211a0dc0ceedd78fb2ca9a44aed3d7b9b26f81870d485c07122b4350673b7" + [[package]] name = "walkdir" version = "2.5.0" @@ -18721,7 +19241,7 @@ dependencies = [ "once_cell", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", "wasm-bindgen-shared", ] @@ -18755,7 +19275,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -18787,9 +19307,9 @@ dependencies = [ [[package]] name = "wasm-streams" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e072d4e72f700fb3443d8fe94a39315df013eef1104903cdb0a2abd322bbecd" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" dependencies = [ "futures-util", "js-sys", @@ -18926,7 +19446,7 @@ dependencies = [ "anyhow", "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", "wasmtime-component-util", "wasmtime-wit-bindgen", "wit-parser", @@ -19094,7 +19614,7 @@ checksum = "5399c175ddba4a471b9da45105dea3493059d52b2d54860eadb0df04c813948d" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -19305,7 +19825,7 @@ dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", "shellexpand 2.1.2", - "syn 2.0.85", + "syn 2.0.87", "witx", ] @@ -19317,7 +19837,7 @@ checksum = "93e43fc332703d1ec3aa86a5ce8bb49e6b95b6c617b90e726d3e70a0f70f48a5" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", "wiggle-generate", ] @@ -19593,6 +20113,28 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "wiremock" +version = "0.5.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13a3a53eaf34f390dd30d7b1b078287dd05df2aa2e21a589ccb80f5c7253c2e9" +dependencies = [ + "assert-json-diff", + "async-trait", + "base64 0.21.7", + "deadpool", + "futures", + "futures-timer", + "http-types", + "hyper 0.14.31", + "log", + "once_cell", + "regex", + "serde", + "serde_json", + "tokio", +] + [[package]] name = "wit-parser" version = "0.13.2" @@ -19622,6 +20164,18 @@ dependencies = [ "wast 35.0.2", ] +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + [[package]] name = "ws_stream_wasm" version = "0.7.4" @@ -19658,7 +20212,7 @@ dependencies = [ [[package]] name = "x" -version = "1.36.1" +version = "1.37.1" dependencies = [ "anyhow", "camino", @@ -19717,9 +20271,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.22" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af4e2e2f7cba5a093896c1e150fbfe177d1883e7448200efb81d40b9d339ef26" +checksum = "af310deaae937e48a26602b730250b4949e125f468f11e6990be3e5304ddd96f" [[package]] name = "xmlparser" @@ -19759,6 +20313,30 @@ dependencies = [ "time", ] +[[package]] +name = "yoke" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.87", + "synstructure 0.13.1", +] + [[package]] name = "yup-oauth2" version = "8.3.2" @@ -19804,7 +20382,28 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", +] + +[[package]] +name = "zerofrom" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.87", + "synstructure 0.13.1", ] [[package]] @@ -19824,7 +20423,29 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2 1.0.89", "quote 1.0.37", - "syn 2.0.85", + "syn 2.0.87", +] + +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2 1.0.89", + "quote 1.0.37", + "syn 2.0.87", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 67cc9896cda22..ed066f8183cd4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -93,7 +93,6 @@ members = [ "crates/sui-bridge", "crates/sui-bridge-cli", "crates/sui-bridge-indexer", - "crates/sui-bridge-watchdog", "crates/sui-cluster-test", "crates/sui-config", "crates/sui-core", @@ -104,6 +103,9 @@ members = [ "crates/sui-e2e-tests", "crates/sui-enum-compat-util", "crates/sui-faucet", + "crates/sui-field-count", + "crates/sui-field-count-derive", + "crates/sui-field-count-main", "crates/sui-framework", "crates/sui-framework-snapshot", "crates/sui-framework-tests", @@ -114,6 +116,7 @@ members = [ "crates/sui-graphql-rpc-client", "crates/sui-graphql-rpc-headers", "crates/sui-indexer", + "crates/sui-indexer-alt", "crates/sui-indexer-builder", "crates/sui-json", "crates/sui-json-rpc", @@ -121,12 +124,14 @@ members = [ "crates/sui-json-rpc-tests", "crates/sui-json-rpc-types", "crates/sui-keys", + "crates/sui-kvstore", "crates/sui-light-client", "crates/sui-macros", "crates/sui-metric-checker", "crates/sui-move", "crates/sui-move-build", "crates/sui-move-lsp", + "crates/sui-mvr-indexer", "crates/sui-network", "crates/sui-node", "crates/sui-open-rpc", @@ -154,6 +159,7 @@ members = [ "crates/sui-surfer", "crates/sui-swarm", "crates/sui-swarm-config", + "crates/sui-synthetic-ingestion", "crates/sui-telemetry", "crates/sui-test-transaction-builder", "crates/sui-test-validator", @@ -203,7 +209,7 @@ members = [ [workspace.package] # This version string will be inherited by sui-core, sui-faucet, sui-node, sui-tools, sui-sdk, sui-move-build, and sui crates. -version = "1.36.1" +version = "1.37.1" [profile.release] # debug = 1 means line charts only, which is minimum needed for good stack traces @@ -256,6 +262,7 @@ async-graphql = "=7.0.1" async-graphql-axum = "=7.0.1" async-graphql-value = "=7.0.1" async-recursion = "1.0.4" +async-stream = "0.3.6" async-trait = "0.1.61" atomic_float = "0.1" aws-config = "0.56" @@ -301,6 +308,7 @@ camino = "1.1.1" cfg-if = "1.0.0" chrono = { version = "0.4.26", features = ["clock", "serde"] } clap = { version = "4.4", features = ["derive", "wrap_help"] } +codespan-reporting = "0.11.1" collectable = "0.0.2" colored = "2.0.0" color-eyre = "0.6.2" @@ -344,6 +352,7 @@ futures-core = "0.3.21" git-version = "0.3.5" glob = "0.3.1" governor = "0.6.0" +gcp_auth = "0.12.3" hashbrown = "0.12" hdrhistogram = "7.5.1" hex = "0.4.3" @@ -418,6 +427,7 @@ proptest = "1.1.0" proptest-derive = "0.3.0" prost = "0.13" prost-build = "0.13" +prost-types = "0.13.1" protobuf = { version = "2.28", features = ["with-bytes"] } quinn-proto = "0.11.7" quote = "1.0.23" @@ -496,12 +506,10 @@ tokio-stream = { version = "0.1.14", features = ["sync", "net"] } tokio-util = "0.7.10" toml = { version = "0.7.4", features = ["preserve_order"] } toml_edit = { version = "0.19.10" } -# NOTE: do not enable the `tls` feature on tonic. It will break custom TLS handling -# for self signed certificates. Unit tests under consensus/core and other integration -# tests will fail. tonic = { version = "0.12", features = ["transport"] } tonic-build = { version = "0.12", features = ["prost", "transport"] } tonic-health = "0.12" +tonic-rustls = "0.1.0" tower = { version = "0.4.12", features = [ "full", "util", @@ -540,6 +548,7 @@ webpki = { version = "0.102", package = "rustls-webpki", features = [ "alloc", "std", ] } +wiremock = "0.5" x509-parser = "0.14.0" zstd = "0.12.3" zeroize = "1.6.0" @@ -579,10 +588,10 @@ mamoru-sui-types = { git = "https://github.com/Mamoru-Foundation/mamoru-core", r #mamoru-sniffer = { path = "../mamoru-core/mamoru-sniffer" } #mamoru-sui-types = { path = "../mamoru-core/blockchain-types/mamoru-sui-types" } -fastcrypto = { git = "https://github.com/MystenLabs/fastcrypto", rev = "c050ffc78b93739328af5d59b05f90e0e26b1b7e" } -fastcrypto-tbls = { git = "https://github.com/MystenLabs/fastcrypto", rev = "c050ffc78b93739328af5d59b05f90e0e26b1b7e" } -fastcrypto-zkp = { git = "https://github.com/MystenLabs/fastcrypto", rev = "c050ffc78b93739328af5d59b05f90e0e26b1b7e", package = "fastcrypto-zkp" } -fastcrypto-vdf = { git = "https://github.com/MystenLabs/fastcrypto", rev = "c050ffc78b93739328af5d59b05f90e0e26b1b7e", features = [ +fastcrypto = { git = "https://github.com/MystenLabs/fastcrypto", rev = "2f502fd8570fe4e9cff36eea5bbd6fef22002898" } +fastcrypto-tbls = { git = "https://github.com/MystenLabs/fastcrypto", rev = "2f502fd8570fe4e9cff36eea5bbd6fef22002898" } +fastcrypto-zkp = { git = "https://github.com/MystenLabs/fastcrypto", rev = "2f502fd8570fe4e9cff36eea5bbd6fef22002898", package = "fastcrypto-zkp" } +fastcrypto-vdf = { git = "https://github.com/MystenLabs/fastcrypto", rev = "2f502fd8570fe4e9cff36eea5bbd6fef22002898", features = [ "experimental", ] } passkey-types = { version = "0.2.0" } @@ -621,7 +630,6 @@ sui-archival = { path = "crates/sui-archival" } sui-authority-aggregation = { path = "crates/sui-authority-aggregation" } sui-benchmark = { path = "crates/sui-benchmark" } sui-bridge = { path = "crates/sui-bridge" } -sui-bridge-watchdog = { path = "crates/sui-bridge-watchdog" } sui-cluster-test = { path = "crates/sui-cluster-test" } sui-config = { path = "crates/sui-config" } sui-core = { path = "crates/sui-core" } @@ -631,6 +639,9 @@ sui-data-ingestion-core = { path = "crates/sui-data-ingestion-core" } sui-e2e-tests = { path = "crates/sui-e2e-tests" } sui-enum-compat-util = { path = "crates/sui-enum-compat-util" } sui-faucet = { path = "crates/sui-faucet" } +sui-field-count = { path = "crates/sui-field-count" } +sui-field-count-main = { path = "crates/sui-field-count-main" } +sui-field-count-derive = { path = "crates/sui-field-count-derive" } sui-framework = { path = "crates/sui-framework" } sui-framework-snapshot = { path = "crates/sui-framework-snapshot" } sui-framework-tests = { path = "crates/sui-framework-tests" } @@ -646,11 +657,13 @@ sui-json-rpc = { path = "crates/sui-json-rpc" } sui-json-rpc-api = { path = "crates/sui-json-rpc-api" } sui-json-rpc-types = { path = "crates/sui-json-rpc-types" } sui-keys = { path = "crates/sui-keys" } +sui-kvstore = {path = "crates/sui-kvstore"} sui-macros = { path = "crates/sui-macros" } sui-metric-checker = { path = "crates/sui-metric-checker" } sui-move = { path = "crates/sui-move" } sui-move-build = { path = "crates/sui-move-build" } sui-move-lsp = { path = "crates/sui-move-lsp" } +sui-mvr-indexer = { path = "crates/sui-mvr-indexer" } sui-network = { path = "crates/sui-network" } sui-node = { path = "crates/sui-node" } sui-open-rpc = { path = "crates/sui-open-rpc" } @@ -674,6 +687,7 @@ sui-storage = { path = "crates/sui-storage" } sui-surfer = { path = "crates/sui-surfer" } sui-swarm = { path = "crates/sui-swarm" } sui-swarm-config = { path = "crates/sui-swarm-config" } +sui-synthetic-ingestion = { path = "crates/sui-synthetic-ingestion" } sui-telemetry = { path = "crates/sui-telemetry" } sui-test-transaction-builder = { path = "crates/sui-test-transaction-builder" } sui-test-validator = { path = "crates/sui-test-validator" } diff --git a/apps/wallet/src/background/connections/ContentScriptConnection.ts b/apps/wallet/src/background/connections/ContentScriptConnection.ts index 4eb296c0a428d..1b72a231cb2d1 100644 --- a/apps/wallet/src/background/connections/ContentScriptConnection.ts +++ b/apps/wallet/src/background/connections/ContentScriptConnection.ts @@ -25,6 +25,7 @@ import { import Permissions from '_src/background/Permissions'; import Transactions from '_src/background/Transactions'; import { FEATURES, growthbook } from '_src/shared/experimentation/features'; +import { isDisconnectApp } from '_src/shared/messaging/messages/payloads/permissions/DisconnectApp'; import { isQredoConnectPayload } from '_src/shared/messaging/messages/payloads/QredoConnect'; import { isSignMessageRequest, @@ -151,6 +152,8 @@ export class ContentScriptConnection extends Connection { throw new Error('This feature is not implemented yet.'); } await requestUserApproval(payload.args, this, msg); + } else if (isDisconnectApp(payload)) { + await Permissions.delete(this.origin); } else { throw new Error(`Unknown message, ${JSON.stringify(msg.payload)}`); } diff --git a/apps/wallet/src/dapp-interface/WalletStandardInterface.ts b/apps/wallet/src/dapp-interface/WalletStandardInterface.ts index 389dcc6dccb75..7350a5b88f151 100644 --- a/apps/wallet/src/dapp-interface/WalletStandardInterface.ts +++ b/apps/wallet/src/dapp-interface/WalletStandardInterface.ts @@ -22,6 +22,7 @@ import type { } from '_payloads/transactions'; import { API_ENV } from '_src/shared/api-env'; import type { NetworkEnvType } from '_src/shared/api-env'; +import { type DisconnectApp } from '_src/shared/messaging/messages/payloads/permissions/DisconnectApp'; import { isQredoConnectPayload, type QredoConnectPayload, @@ -40,6 +41,8 @@ import { SUI_TESTNET_CHAIN, type StandardConnectFeature, type StandardConnectMethod, + type StandardDisconnectFeature, + type StandardDisconnectMethod, type StandardEventsFeature, type StandardEventsListeners, type StandardEventsOnMethod, @@ -119,6 +122,7 @@ export class SuiWallet implements Wallet { get features(): StandardConnectFeature & StandardEventsFeature & + StandardDisconnectFeature & SuiFeatures & QredoConnectFeature { return { @@ -130,6 +134,10 @@ export class SuiWallet implements Wallet { version: '1.0.0', on: this.#on, }, + 'standard:disconnect': { + version: '1.0.0', + disconnect: this.#disconnect, + }, 'sui:signTransactionBlock': { version: '1.0.0', signTransactionBlock: this.#signTransactionBlock, @@ -244,6 +252,13 @@ export class SuiWallet implements Wallet { return { accounts: this.accounts }; }; + #disconnect: StandardDisconnectMethod = async () => { + this.#send({ + type: 'disconnect-app', + origin: '', // origin is auto-discovered for wallet's disconnect. + }); + }; + #signTransactionBlock: SuiSignTransactionBlockMethod = async ({ transactionBlock, account, diff --git a/apps/wallet/src/shared/analytics/ampli/index.ts b/apps/wallet/src/shared/analytics/ampli/index.ts index 2c52d691daa87..03db6920b3800 100644 --- a/apps/wallet/src/shared/analytics/ampli/index.ts +++ b/apps/wallet/src/shared/analytics/ampli/index.ts @@ -231,6 +231,18 @@ export interface ClickedSwapCoinProperties { totalBalance?: number; } +export interface ClickedTokenClaimsBannerProperties { + /** + * A generic name property that can be used across events. + */ + name: string; + /** + * The ID of an object on Sui. + */ + objectId: string; + objectType: string; +} + export interface ClickedUnstakeSuiProperties { /** * The amount of SUI staked. @@ -514,6 +526,10 @@ export interface SwappedCoinProperties { */ estimatedReturnBalance: number; fromCoinType: string; + /** + * swap provider name + */ + provider?: string; toCoinType: string; /** * The total balance of the selected coin that the user has. @@ -537,6 +553,10 @@ export interface SwappedCoinFailedProperties { */ estimatedReturnBalance: number; fromCoinType: string; + /** + * swap provider name + */ + provider?: string; toCoinType: string; /** * The total balance of the selected coin that the user has. @@ -691,6 +711,14 @@ export class ClickedSwapCoin implements BaseEvent { } } +export class ClickedTokenClaimsBanner implements BaseEvent { + event_type = 'clicked token claims banner'; + + constructor(public event_properties: ClickedTokenClaimsBannerProperties) { + this.event_properties = event_properties; + } +} + export class ClickedUnstakeSui implements BaseEvent { event_type = 'clicked unstake SUI'; @@ -1300,6 +1328,23 @@ export class Ampli { return this.track(new ClickedSwapCoin(properties), options); } + /** + * clicked token claims banner + * + * [View in Tracking Plan](https://data.amplitude.com/mystenlabs/Sui%20Wallet/events/main/latest/clicked%20token%20claims%20banner) + * + * Event has no description in tracking plan. + * + * @param properties The event's properties (e.g. name) + * @param options Amplitude event options. + */ + clickedTokenClaimsBanner( + properties: ClickedTokenClaimsBannerProperties, + options?: EventOptions, + ) { + return this.track(new ClickedTokenClaimsBanner(properties), options); + } + /** * clicked unstake SUI * diff --git a/apps/wallet/src/ui/app/pages/swap/index.tsx b/apps/wallet/src/ui/app/pages/swap/index.tsx index 93396613d06ab..a6b040f4d96f9 100644 --- a/apps/wallet/src/ui/app/pages/swap/index.tsx +++ b/apps/wallet/src/ui/app/pages/swap/index.tsx @@ -222,6 +222,7 @@ export function SwapPage() { toCoinType: toCoinType || '', totalBalance: Number(amount), estimatedReturnBalance: inputAmountInUSD || 0, + provider: swapData?.provider, }); const receiptUrl = `/receipt?txdigest=${encodeURIComponent( @@ -229,6 +230,16 @@ export function SwapPage() { )}&from=transactions`; return navigate(receiptUrl); }, + onError: (error) => { + ampli.swappedCoinFailed({ + estimatedReturnBalance: Number(swapData?.formattedToAmount || 0), + fromCoinType: fromCoinType!, + toCoinType: toCoinType!, + totalBalance: Number(amount || 0), + errorMessage: error.message, + provider: swapData?.provider, + }); + }, }); const handleOnsubmit: SubmitHandler = (formData) => { diff --git a/consensus/core/Cargo.toml b/consensus/core/Cargo.toml index 2cb277834c9b2..6d4756ac4075f 100644 --- a/consensus/core/Cargo.toml +++ b/consensus/core/Cargo.toml @@ -57,6 +57,7 @@ tower.workspace = true tower-http.workspace = true tracing.workspace = true typed-store.workspace = true +tonic-rustls.workspace = true [dev-dependencies] rstest.workspace = true diff --git a/consensus/core/src/core.rs b/consensus/core/src/core.rs index ab9569243cef8..a1ff274e6722c 100644 --- a/consensus/core/src/core.rs +++ b/consensus/core/src/core.rs @@ -478,6 +478,18 @@ impl Core { // Ensure the new block and its ancestors are persisted, before broadcasting it. self.dag_state.write().flush(); + let current_proposal_duration = Duration::from_millis(verified_block.timestamp_ms()); + let previous_proposal_duration = Duration::from_millis(self.last_proposed_timestamp_ms()); + self.context + .metrics + .node_metrics + .block_proposal_interval + .observe( + current_proposal_duration + .saturating_sub(previous_proposal_duration) + .as_secs_f64(), + ); + // Update internal state. self.last_proposed_block = verified_block.clone(); diff --git a/consensus/core/src/metrics.rs b/consensus/core/src/metrics.rs index a9f0ad175e848..11d303f3cf65a 100644 --- a/consensus/core/src/metrics.rs +++ b/consensus/core/src/metrics.rs @@ -105,6 +105,7 @@ pub(crate) struct NodeMetrics { pub(crate) proposed_block_ancestors_depth: HistogramVec, pub(crate) highest_verified_authority_round: IntGaugeVec, pub(crate) lowest_verified_authority_round: IntGaugeVec, + pub(crate) block_proposal_interval: Histogram, pub(crate) block_proposal_leader_wait_ms: IntCounterVec, pub(crate) block_proposal_leader_wait_count: IntCounterVec, pub(crate) block_timestamp_drift_wait_ms: IntCounterVec, @@ -235,6 +236,12 @@ impl NodeMetrics { &["authority"], registry, ).unwrap(), + block_proposal_interval: register_histogram_with_registry!( + "block_proposal_interval", + "Intervals (in secs) between block proposals.", + FINE_GRAINED_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ).unwrap(), block_proposal_leader_wait_ms: register_int_counter_vec_with_registry!( "block_proposal_leader_wait_ms", "Total time in ms spent waiting for a leader when proposing blocks.", diff --git a/consensus/core/src/network/tonic_network.rs b/consensus/core/src/network/tonic_network.rs index 185f59786b163..59a29a102ff90 100644 --- a/consensus/core/src/network/tonic_network.rs +++ b/consensus/core/src/network/tonic_network.rs @@ -24,6 +24,7 @@ use mysten_network::{ Multiaddr, }; use parking_lot::RwLock; +use sui_tls::AllowPublicKeys; use tokio::{ pin, task::JoinSet, @@ -44,7 +45,6 @@ use super::{ consensus_service_client::ConsensusServiceClient, consensus_service_server::ConsensusService, }, - tonic_tls::create_rustls_client_config, BlockStream, NetworkClient, NetworkManager, NetworkService, }; use crate::{ @@ -54,7 +54,7 @@ use crate::{ error::{ConsensusError, ConsensusResult}, network::{ tonic_gen::consensus_service_server::ConsensusServiceServer, - tonic_tls::create_rustls_server_config, + tonic_tls::certificate_server_name, }, CommitIndex, Round, }; @@ -339,7 +339,7 @@ impl NetworkClient for TonicClient { // Tonic channel wrapped with layers. type Channel = mysten_network::callback::Callback< tower_http::trace::Trace< - tonic::transport::Channel, + tonic_rustls::Channel, tower_http::classify::SharedClassifier, >, MetricsCallbackMaker, @@ -381,7 +381,17 @@ impl ChannelPool { let address = format!("https://{address}"); let config = &self.context.parameters.tonic; let buffer_size = config.connection_buffer_size; - let endpoint = tonic::transport::Channel::from_shared(address.clone()) + let client_tls_config = sui_tls::create_rustls_client_config( + self.context + .committee + .authority(peer) + .network_key + .clone() + .into_inner(), + certificate_server_name(&self.context), + Some(network_keypair.private_key().into_inner()), + ); + let endpoint = tonic_rustls::Channel::from_shared(address.clone()) .unwrap() .connect_timeout(timeout) .initial_connection_window_size(Some(buffer_size as u32)) @@ -391,22 +401,14 @@ impl ChannelPool { .http2_keep_alive_interval(config.keepalive_interval) // tcp keepalive is probably unnecessary and is unsupported by msim. .user_agent("mysticeti") + .unwrap() + .tls_config(client_tls_config) .unwrap(); - let client_tls_config = create_rustls_client_config(&self.context, network_keypair, peer); - let https_connector = hyper_rustls::HttpsConnectorBuilder::new() - .with_tls_config(client_tls_config) - .https_only() - .enable_http2() - .build(); - let deadline = tokio::time::Instant::now() + timeout; let channel = loop { trace!("Connecting to endpoint at {address}"); - match endpoint - .connect_with_connector(https_connector.clone()) - .await - { + match endpoint.connect().await { Ok(channel) => break channel, Err(e) => { warn!("Failed to connect to endpoint at {address}: {e:?}"); @@ -735,8 +737,17 @@ impl NetworkManager for TonicManager { Arc::new(builder) }; - let tls_server_config = - create_rustls_server_config(&self.context, self.network_keypair.clone()); + let tls_server_config = sui_tls::create_rustls_server_config( + self.network_keypair.clone().private_key().into_inner(), + certificate_server_name(&self.context), + AllowPublicKeys::new( + self.context + .committee + .authorities() + .map(|(_i, a)| a.network_key.clone().into_inner()) + .collect(), + ), + ); let tls_acceptor = TlsAcceptor::from(Arc::new(tls_server_config)); // Create listener to incoming connections. diff --git a/consensus/core/src/network/tonic_tls.rs b/consensus/core/src/network/tonic_tls.rs index 13377934e3b18..6e7ff630115ec 100644 --- a/consensus/core/src/network/tonic_tls.rs +++ b/consensus/core/src/network/tonic_tls.rs @@ -2,63 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::context::Context; -use consensus_config::{AuthorityIndex, NetworkKeyPair}; -use sui_tls::AllowPublicKeys; -use tokio_rustls::rustls::{ClientConfig, ServerConfig}; -pub(crate) fn create_rustls_server_config( - context: &Context, - network_keypair: NetworkKeyPair, -) -> ServerConfig { - let allower = AllowPublicKeys::new( - context - .committee - .authorities() - .map(|(_i, a)| a.network_key.clone().into_inner()) - .collect(), - ); - let verifier = sui_tls::ClientCertVerifier::new(allower, certificate_server_name(context)); - // TODO: refactor to use key bytes - let self_signed_cert = sui_tls::SelfSignedCertificate::new( - network_keypair.private_key().into_inner(), - &certificate_server_name(context), - ); - let tls_cert = self_signed_cert.rustls_certificate(); - let tls_private_key = self_signed_cert.rustls_private_key(); - let mut tls_config = verifier - .rustls_server_config(vec![tls_cert], tls_private_key) - .unwrap_or_else(|e| panic!("Failed to create TLS server config: {:?}", e)); - tls_config.alpn_protocols = vec![b"h2".to_vec()]; - tls_config -} - -pub(crate) fn create_rustls_client_config( - context: &Context, - network_keypair: NetworkKeyPair, - target: AuthorityIndex, -) -> ClientConfig { - let target_public_key = context - .committee - .authority(target) - .network_key - .clone() - .into_inner(); - let self_signed_cert = sui_tls::SelfSignedCertificate::new( - network_keypair.private_key().into_inner(), - &certificate_server_name(context), - ); - let tls_cert = self_signed_cert.rustls_certificate(); - let tls_private_key = self_signed_cert.rustls_private_key(); - let mut tls_config = - sui_tls::ServerCertVerifier::new(target_public_key, certificate_server_name(context)) - .rustls_client_config(vec![tls_cert], tls_private_key) - .unwrap_or_else(|e| panic!("Failed to create TLS client config: {:?}", e)); - // ServerCertVerifier sets alpn for completeness, but alpn cannot be predefined when - // using HttpsConnector from hyper-rustls, as in TonicManager. - tls_config.alpn_protocols = vec![]; - tls_config -} - -fn certificate_server_name(context: &Context) -> String { +pub(crate) fn certificate_server_name(context: &Context) -> String { format!("consensus_epoch_{}", context.committee.epoch()) } diff --git a/consensus/core/src/threshold_clock.rs b/consensus/core/src/threshold_clock.rs index 8786383acbb92..ef8ca8b752973 100644 --- a/consensus/core/src/threshold_clock.rs +++ b/consensus/core/src/threshold_clock.rs @@ -38,7 +38,7 @@ impl ThresholdClock { (self.round > previous_round).then_some(self.round) } - pub(crate) fn add_block(&mut self, block: BlockRef) { + fn add_block(&mut self, block: BlockRef) { match block.round.cmp(&self.round) { // Blocks with round less then what we currently build are irrelevant here Ordering::Less => {} diff --git a/crates/mysten-common/src/logging.rs b/crates/mysten-common/src/logging.rs index 8ba327026953b..3cd5c9ad0a371 100644 --- a/crates/mysten-common/src/logging.rs +++ b/crates/mysten-common/src/logging.rs @@ -15,8 +15,11 @@ macro_rules! debug_fatal { if cfg!(debug_assertions) { $crate::fatal!($($arg)*); } else { - // TODO: Export invariant metric for alerting tracing::error!(debug_fatal = true, $($arg)*); + let location = concat!(file!(), ':', line!()); + if let Some(metrics) = mysten_metrics::get_metrics() { + metrics.system_invariant_violations.with_label_values(&[location]).inc(); + } } }}; } diff --git a/crates/mysten-metrics/src/lib.rs b/crates/mysten-metrics/src/lib.rs index 3fb40de20573a..8cf2310fce3e0 100644 --- a/crates/mysten-metrics/src/lib.rs +++ b/crates/mysten-metrics/src/lib.rs @@ -15,8 +15,9 @@ use std::time::Instant; use once_cell::sync::OnceCell; use prometheus::{ - register_histogram_with_registry, register_int_gauge_vec_with_registry, Histogram, IntGaugeVec, - Registry, TextEncoder, + register_histogram_with_registry, register_int_counter_vec_with_registry, + register_int_gauge_vec_with_registry, Histogram, IntCounterVec, IntGaugeVec, Registry, + TextEncoder, }; use tap::TapFallible; use tracing::{warn, Span}; @@ -69,6 +70,7 @@ pub struct Metrics { pub scope_duration_ns: IntGaugeVec, pub scope_entrance: IntGaugeVec, pub thread_stall_duration_sec: Histogram, + pub system_invariant_violations: IntCounterVec, } impl Metrics { @@ -143,6 +145,12 @@ impl Metrics { registry, ) .unwrap(), + system_invariant_violations: register_int_counter_vec_with_registry!( + "system_invariant_violations", + "Number of system invariant violations", + &["name"], + registry, + ).unwrap(), } } } diff --git a/crates/mysten-network/Cargo.toml b/crates/mysten-network/Cargo.toml index 3fb61694e170f..18426cb914806 100644 --- a/crates/mysten-network/Cargo.toml +++ b/crates/mysten-network/Cargo.toml @@ -9,6 +9,7 @@ publish = false [dependencies] anemo.workspace = true +async-stream.workspace = true bcs.workspace = true bytes.workspace = true eyre.workspace = true @@ -18,8 +19,10 @@ multiaddr.workspace = true serde.workspace = true once_cell.workspace = true snap.workspace = true +hyper-rustls.workspace = true hyper-util.workspace = true tokio = { workspace = true, features = ["sync", "rt", "macros"] } +tokio-rustls.workspace = true tokio-stream.workspace = true tonic.workspace = true tonic-health.workspace = true diff --git a/crates/mysten-network/src/client.rs b/crates/mysten-network/src/client.rs index f0c188f54f21c..8cb508c798431 100644 --- a/crates/mysten-network/src/client.rs +++ b/crates/mysten-network/src/client.rs @@ -21,53 +21,67 @@ use std::{ vec, }; use tokio::task::JoinHandle; +use tokio_rustls::rustls::ClientConfig; use tonic::transport::{Channel, Endpoint, Uri}; use tower::Service; use tracing::{info, trace}; -pub async fn connect(address: &Multiaddr) -> Result { - let channel = endpoint_from_multiaddr(address)?.connect().await?; +pub async fn connect(address: &Multiaddr, tls_config: Option) -> Result { + let channel = endpoint_from_multiaddr(address, tls_config)? + .connect() + .await?; Ok(channel) } -pub fn connect_lazy(address: &Multiaddr) -> Result { - let channel = endpoint_from_multiaddr(address)?.connect_lazy(); +pub fn connect_lazy(address: &Multiaddr, tls_config: Option) -> Result { + let channel = endpoint_from_multiaddr(address, tls_config)?.connect_lazy(); Ok(channel) } -pub(crate) async fn connect_with_config(address: &Multiaddr, config: &Config) -> Result { - let channel = endpoint_from_multiaddr(address)? +pub(crate) async fn connect_with_config( + address: &Multiaddr, + tls_config: Option, + config: &Config, +) -> Result { + let channel = endpoint_from_multiaddr(address, tls_config)? .apply_config(config) .connect() .await?; Ok(channel) } -pub(crate) fn connect_lazy_with_config(address: &Multiaddr, config: &Config) -> Result { - let channel = endpoint_from_multiaddr(address)? +pub(crate) fn connect_lazy_with_config( + address: &Multiaddr, + tls_config: Option, + config: &Config, +) -> Result { + let channel = endpoint_from_multiaddr(address, tls_config)? .apply_config(config) .connect_lazy(); Ok(channel) } -fn endpoint_from_multiaddr(addr: &Multiaddr) -> Result { +fn endpoint_from_multiaddr( + addr: &Multiaddr, + tls_config: Option, +) -> Result { let mut iter = addr.iter(); let channel = match iter.next().ok_or_else(|| eyre!("address is empty"))? { Protocol::Dns(_) => { let (dns_name, tcp_port, http_or_https) = parse_dns(addr)?; let uri = format!("{http_or_https}://{dns_name}:{tcp_port}"); - MyEndpoint::try_from_uri(uri)? + MyEndpoint::try_from_uri(uri, tls_config)? } Protocol::Ip4(_) => { let (socket_addr, http_or_https) = parse_ip4(addr)?; let uri = format!("{http_or_https}://{socket_addr}"); - MyEndpoint::try_from_uri(uri)? + MyEndpoint::try_from_uri(uri, tls_config)? } Protocol::Ip6(_) => { let (socket_addr, http_or_https) = parse_ip6(addr)?; let uri = format!("{http_or_https}://{socket_addr}"); - MyEndpoint::try_from_uri(uri)? + MyEndpoint::try_from_uri(uri, tls_config)? } unsupported => return Err(eyre!("unsupported protocol {unsupported}")), }; @@ -77,21 +91,25 @@ fn endpoint_from_multiaddr(addr: &Multiaddr) -> Result { struct MyEndpoint { endpoint: Endpoint, + tls_config: Option, } static DISABLE_CACHING_RESOLVER: OnceCell = OnceCell::new(); impl MyEndpoint { - fn new(endpoint: Endpoint) -> Self { - Self { endpoint } + fn new(endpoint: Endpoint, tls_config: Option) -> Self { + Self { + endpoint, + tls_config, + } } - fn try_from_uri(uri: String) -> Result { + fn try_from_uri(uri: String, tls_config: Option) -> Result { let uri: Uri = uri .parse() .with_context(|| format!("unable to create Uri from '{uri}'"))?; let endpoint = Endpoint::from(uri); - Ok(Self::new(endpoint)) + Ok(Self::new(endpoint, tls_config)) } fn apply_config(mut self, config: &Config) -> Self { @@ -107,7 +125,17 @@ impl MyEndpoint { }); if disable_caching_resolver { - self.endpoint.connect_lazy() + if let Some(tls_config) = self.tls_config { + self.endpoint.connect_with_connector_lazy( + hyper_rustls::HttpsConnectorBuilder::new() + .with_tls_config(tls_config) + .https_only() + .enable_http2() + .build(), + ) + } else { + self.endpoint.connect_lazy() + } } else { let mut http = HttpConnector::new_with_resolver(CachingResolver::new()); http.enforce_http(false); @@ -115,12 +143,33 @@ impl MyEndpoint { http.set_keepalive(None); http.set_connect_timeout(None); - self.endpoint.connect_with_connector_lazy(http) + if let Some(tls_config) = self.tls_config { + let https = hyper_rustls::HttpsConnectorBuilder::new() + .with_tls_config(tls_config) + .https_only() + .enable_http1() + .wrap_connector(http); + self.endpoint.connect_with_connector_lazy(https) + } else { + self.endpoint.connect_with_connector_lazy(http) + } } } async fn connect(self) -> Result { - self.endpoint.connect().await.map_err(Into::into) + if let Some(tls_config) = self.tls_config { + let https_connector = hyper_rustls::HttpsConnectorBuilder::new() + .with_tls_config(tls_config) + .https_only() + .enable_http2() + .build(); + self.endpoint + .connect_with_connector(https_connector) + .await + .map_err(Into::into) + } else { + self.endpoint.connect().await.map_err(Into::into) + } } } diff --git a/crates/mysten-network/src/config.rs b/crates/mysten-network/src/config.rs index 1e59dbe75bcf8..eab88a024ec41 100644 --- a/crates/mysten-network/src/config.rs +++ b/crates/mysten-network/src/config.rs @@ -9,6 +9,7 @@ use crate::{ use eyre::Result; use serde::{Deserialize, Serialize}; use std::time::Duration; +use tokio_rustls::rustls::ClientConfig; use tonic::transport::Channel; #[derive(Debug, Default, Deserialize, Serialize)] @@ -90,11 +91,19 @@ impl Config { ServerBuilder::from_config(self, metrics_provider) } - pub async fn connect(&self, addr: &Multiaddr) -> Result { - connect_with_config(addr, self).await + pub async fn connect( + &self, + addr: &Multiaddr, + tls_config: Option, + ) -> Result { + connect_with_config(addr, tls_config, self).await } - pub fn connect_lazy(&self, addr: &Multiaddr) -> Result { - connect_lazy_with_config(addr, self) + pub fn connect_lazy( + &self, + addr: &Multiaddr, + tls_config: Option, + ) -> Result { + connect_lazy_with_config(addr, tls_config, self) } } diff --git a/crates/mysten-network/src/server.rs b/crates/mysten-network/src/server.rs index 4bac6fe61ae52..8d3986c6fd205 100644 --- a/crates/mysten-network/src/server.rs +++ b/crates/mysten-network/src/server.rs @@ -9,11 +9,15 @@ use crate::{ multiaddr::{parse_dns, parse_ip4, parse_ip6, Multiaddr, Protocol}, }; use eyre::{eyre, Result}; -use futures::FutureExt; +use futures::{FutureExt, Stream}; +use std::pin::Pin; +use std::sync::Arc; use std::task::{Context, Poll}; use std::{convert::Infallible, net::SocketAddr}; -use tokio::net::{TcpListener, ToSocketAddrs}; -use tokio_stream::wrappers::TcpListenerStream; +use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::net::{TcpListener, TcpStream, ToSocketAddrs}; +use tokio_rustls::rustls::ServerConfig; +use tokio_rustls::{server::TlsStream, TlsAcceptor}; use tonic::codegen::http::HeaderValue; use tonic::{ body::BoxBody, @@ -35,6 +39,7 @@ use tower_http::classify::{GrpcErrorsAsFailures, SharedClassifier}; use tower_http::propagate_header::PropagateHeaderLayer; use tower_http::set_header::SetRequestHeaderLayer; use tower_http::trace::{DefaultMakeSpan, DefaultOnBodyChunk, DefaultOnEos, TraceLayer}; +use tracing::debug; pub struct ServerBuilder { router: Router>, @@ -155,46 +160,48 @@ impl ServerBuilder { self } - pub async fn bind(self, addr: &Multiaddr) -> Result { + pub async fn bind(self, addr: &Multiaddr, tls_config: Option) -> Result { let mut iter = addr.iter(); let (tx_cancellation, rx_cancellation) = tokio::sync::oneshot::channel(); let rx_cancellation = rx_cancellation.map(|_| ()); - let (local_addr, server): (Multiaddr, BoxFuture<(), tonic::transport::Error>) = - match iter.next().ok_or_else(|| eyre!("malformed addr"))? { - Protocol::Dns(_) => { - let (dns_name, tcp_port, _http_or_https) = parse_dns(addr)?; - let (local_addr, incoming) = - tcp_listener_and_update_multiaddr(addr, (dns_name.as_ref(), tcp_port)) - .await?; - let server = Box::pin( - self.router - .serve_with_incoming_shutdown(incoming, rx_cancellation), - ); - (local_addr, server) - } - Protocol::Ip4(_) => { - let (socket_addr, _http_or_https) = parse_ip4(addr)?; - let (local_addr, incoming) = - tcp_listener_and_update_multiaddr(addr, socket_addr).await?; - let server = Box::pin( - self.router - .serve_with_incoming_shutdown(incoming, rx_cancellation), - ); - (local_addr, server) - } - Protocol::Ip6(_) => { - let (socket_addr, _http_or_https) = parse_ip6(addr)?; - let (local_addr, incoming) = - tcp_listener_and_update_multiaddr(addr, socket_addr).await?; - let server = Box::pin( - self.router - .serve_with_incoming_shutdown(incoming, rx_cancellation), - ); - (local_addr, server) - } - unsupported => return Err(eyre!("unsupported protocol {unsupported}")), - }; + let (local_addr, server): (Multiaddr, BoxFuture<(), tonic::transport::Error>) = match iter + .next() + .ok_or_else(|| eyre!("malformed addr"))? + { + Protocol::Dns(_) => { + let (dns_name, tcp_port, _http_or_https) = parse_dns(addr)?; + let (local_addr, incoming) = + listen_and_update_multiaddr(addr, (dns_name.to_string(), tcp_port), tls_config) + .await?; + let server = Box::pin( + self.router + .serve_with_incoming_shutdown(incoming, rx_cancellation), + ); + (local_addr, server) + } + Protocol::Ip4(_) => { + let (socket_addr, _http_or_https) = parse_ip4(addr)?; + let (local_addr, incoming) = + listen_and_update_multiaddr(addr, socket_addr, tls_config).await?; + let server = Box::pin( + self.router + .serve_with_incoming_shutdown(incoming, rx_cancellation), + ); + (local_addr, server) + } + Protocol::Ip6(_) => { + let (socket_addr, _http_or_https) = parse_ip6(addr)?; + let (local_addr, incoming) = + listen_and_update_multiaddr(addr, socket_addr, tls_config).await?; + let server = Box::pin( + self.router + .serve_with_incoming_shutdown(incoming, rx_cancellation), + ); + (local_addr, server) + } + unsupported => return Err(eyre!("unsupported protocol {unsupported}")), + }; Ok(Server { server, @@ -205,22 +212,134 @@ impl ServerBuilder { } } -async fn tcp_listener_and_update_multiaddr( +async fn listen_and_update_multiaddr( address: &Multiaddr, socket_addr: T, -) -> Result<(Multiaddr, TcpListenerStream)> { - let (local_addr, incoming) = tcp_listener(socket_addr).await?; + tls_config: Option, +) -> Result<( + Multiaddr, + impl Stream>, +)> { + let listener = TcpListener::bind(socket_addr).await?; + let local_addr = listener.local_addr()?; let local_addr = update_tcp_port_in_multiaddr(address, local_addr.port()); - Ok((local_addr, incoming)) + + let tls_acceptor = tls_config.map(|tls_config| TlsAcceptor::from(Arc::new(tls_config))); + let incoming = TcpOrTlsListener::new(listener, tls_acceptor); + let stream = async_stream::stream! { + loop { + yield incoming.accept().await; + } + }; + + Ok((local_addr, stream)) } -async fn tcp_listener(address: T) -> Result<(SocketAddr, TcpListenerStream)> { - let listener = TcpListener::bind(address).await?; - let local_addr = listener.local_addr()?; - let incoming = TcpListenerStream::new(listener); - Ok((local_addr, incoming)) +pub struct TcpOrTlsListener { + listener: TcpListener, + tls_acceptor: Option, } +impl TcpOrTlsListener { + fn new(listener: TcpListener, tls_acceptor: Option) -> Self { + Self { + listener, + tls_acceptor, + } + } + + async fn accept(&self) -> std::io::Result { + let (stream, addr) = self.listener.accept().await?; + if self.tls_acceptor.is_none() { + return Ok(TcpOrTlsStream::Tcp(stream, addr)); + } + + // Determine whether new connection is TLS. + let mut buf = [0; 1]; + stream.peek(&mut buf).await?; + if buf[0] == 0x16 { + // First byte of a TLS handshake is 0x16. + debug!("accepting TLS connection from {addr:?}"); + let stream = self.tls_acceptor.as_ref().unwrap().accept(stream).await?; + Ok(TcpOrTlsStream::Tls(stream, addr)) + } else { + debug!("accepting TCP connection from {addr:?}"); + Ok(TcpOrTlsStream::Tcp(stream, addr)) + } + } +} + +pub enum TcpOrTlsStream { + Tcp(TcpStream, SocketAddr), + Tls(TlsStream, SocketAddr), +} + +impl AsyncRead for TcpOrTlsStream { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut tokio::io::ReadBuf, + ) -> Poll> { + match self.get_mut() { + TcpOrTlsStream::Tcp(stream, _) => Pin::new(stream).poll_read(cx, buf), + TcpOrTlsStream::Tls(stream, _) => Pin::new(stream).poll_read(cx, buf), + } + } +} + +impl AsyncWrite for TcpOrTlsStream { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + match self.get_mut() { + TcpOrTlsStream::Tcp(stream, _) => Pin::new(stream).poll_write(cx, buf), + TcpOrTlsStream::Tls(stream, _) => Pin::new(stream).poll_write(cx, buf), + } + } + + fn poll_flush( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + match self.get_mut() { + TcpOrTlsStream::Tcp(stream, _) => Pin::new(stream).poll_flush(cx), + TcpOrTlsStream::Tls(stream, _) => Pin::new(stream).poll_flush(cx), + } + } + + fn poll_shutdown( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + match self.get_mut() { + TcpOrTlsStream::Tcp(stream, _) => Pin::new(stream).poll_shutdown(cx), + TcpOrTlsStream::Tls(stream, _) => Pin::new(stream).poll_shutdown(cx), + } + } +} + +impl tonic::transport::server::Connected for TcpOrTlsStream { + type ConnectInfo = tonic::transport::server::TcpConnectInfo; + + fn connect_info(&self) -> Self::ConnectInfo { + match self { + TcpOrTlsStream::Tcp(stream, addr) => Self::ConnectInfo { + local_addr: stream.local_addr().ok(), + remote_addr: Some(*addr), + }, + TcpOrTlsStream::Tls(stream, addr) => Self::ConnectInfo { + local_addr: stream.get_ref().0.local_addr().ok(), + remote_addr: Some(*addr), + }, + } + } +} + +/// TLS server name to use for the public Sui validator interface. +pub const SUI_TLS_SERVER_NAME: &str = "sui"; + pub struct Server { server: BoxFuture<(), tonic::transport::Error>, cancel_handle: Option>, @@ -318,14 +437,14 @@ mod test { let mut server = config .server_builder_with_metrics(metrics.clone()) - .bind(&address) + .bind(&address, None) .await .unwrap(); let address = server.local_addr().to_owned(); let cancel_handle = server.take_cancel_handle().unwrap(); let server_handle = tokio::spawn(server.serve()); - let channel = config.connect(&address).await.unwrap(); + let channel = config.connect(&address, None).await.unwrap(); let mut client = HealthClient::new(channel); client @@ -381,14 +500,14 @@ mod test { let mut server = config .server_builder_with_metrics(metrics.clone()) - .bind(&address) + .bind(&address, None) .await .unwrap(); let address = server.local_addr().to_owned(); let cancel_handle = server.take_cancel_handle().unwrap(); let server_handle = tokio::spawn(server.serve()); - let channel = config.connect(&address).await.unwrap(); + let channel = config.connect(&address, None).await.unwrap(); let mut client = HealthClient::new(channel); // Call the healthcheck for a service that doesn't exist @@ -408,11 +527,11 @@ mod test { async fn test_multiaddr(address: Multiaddr) { let config = Config::new(); - let mut server = config.server_builder().bind(&address).await.unwrap(); + let mut server = config.server_builder().bind(&address, None).await.unwrap(); let address = server.local_addr().to_owned(); let cancel_handle = server.take_cancel_handle().unwrap(); let server_handle = tokio::spawn(server.serve()); - let channel = config.connect(&address).await.unwrap(); + let channel = config.connect(&address, None).await.unwrap(); let mut client = HealthClient::new(channel); client diff --git a/crates/mysten-util-mem/Cargo.toml b/crates/mysten-util-mem/Cargo.toml index 0c5b923205e31..4cd5f54afe08d 100644 --- a/crates/mysten-util-mem/Cargo.toml +++ b/crates/mysten-util-mem/Cargo.toml @@ -14,7 +14,7 @@ cfg-if.workspace = true hashbrown = { workspace = true, optional = true } mysten-util-mem-derive.workspace = true impl-trait-for-tuples.workspace = true -fastcrypto = { workspace = true } +fastcrypto.workspace = true fastcrypto-tbls.workspace = true indexmap.workspace = true roaring.workspace = true diff --git a/crates/simulacrum/src/lib.rs b/crates/simulacrum/src/lib.rs index 32ffab8beaca6..9e8024ac8469c 100644 --- a/crates/simulacrum/src/lib.rs +++ b/crates/simulacrum/src/lib.rs @@ -391,6 +391,12 @@ impl Simulacrum { .unwrap(); } + pub fn override_last_checkpoint_number(&mut self, number: CheckpointSequenceNumber) { + let committee = CommitteeWithKeys::new(&self.keystore, self.epoch_state.committee()); + self.checkpoint_builder + .override_last_checkpoint_number(number, &committee); + } + fn process_data_ingestion( &self, checkpoint: VerifiedCheckpoint, diff --git a/crates/sui-adapter-transactional-tests/tests/programmable/split_coins.exp b/crates/sui-adapter-transactional-tests/tests/programmable/split_coins.exp index 220540ce069e5..a14e830ed4f04 100644 --- a/crates/sui-adapter-transactional-tests/tests/programmable/split_coins.exp +++ b/crates/sui-adapter-transactional-tests/tests/programmable/split_coins.exp @@ -7,7 +7,7 @@ task 1, lines 9-22: //# publish created: object(1,0) mutated: object(0,2) -gas summary: computation_cost: 1000000, storage_cost: 5677200, storage_rebate: 0, non_refundable_storage_fee: 0 +gas summary: computation_cost: 1000000, storage_cost: 5563200, storage_rebate: 0, non_refundable_storage_fee: 0 task 2, lines 24-28: //# programmable --sender A --inputs 100000 @A diff --git a/crates/sui-archival/Cargo.toml b/crates/sui-archival/Cargo.toml index d96dba9edb39d..728fc4f98d343 100644 --- a/crates/sui-archival/Cargo.toml +++ b/crates/sui-archival/Cargo.toml @@ -19,7 +19,7 @@ rand.workspace = true object_store.workspace = true prometheus.workspace = true sui-config.workspace = true -sui-types = { workspace = true, features = ["test-utils"]} +sui-types = { workspace = true, features = ["test-utils"] } sui-storage.workspace = true fastcrypto = { workspace = true, features = ["copy_key"] } tokio = { workspace = true, features = ["full"] } @@ -34,7 +34,7 @@ move-core-types.workspace = true move-package.workspace = true tokio = { workspace = true, features = ["test-util"] } ed25519-consensus.workspace = true -fastcrypto = { workspace = true } +fastcrypto.workspace = true sui-swarm-config.workspace = true sui-macros.workspace = true diff --git a/crates/sui-benchmark/src/drivers/bench_driver.rs b/crates/sui-benchmark/src/drivers/bench_driver.rs index ed11e81d42390..148d79f51ae6a 100644 --- a/crates/sui-benchmark/src/drivers/bench_driver.rs +++ b/crates/sui-benchmark/src/drivers/bench_driver.rs @@ -49,6 +49,7 @@ pub struct BenchMetrics { pub benchmark_duration: IntGauge, pub num_success: IntCounterVec, pub num_error: IntCounterVec, + pub num_expected_error: IntCounterVec, pub num_submitted: IntCounterVec, pub num_in_flight: GaugeVec, pub latency_s: HistogramVec, @@ -79,6 +80,13 @@ impl BenchMetrics { registry, ) .unwrap(), + num_expected_error: register_int_counter_vec_with_registry!( + "num_expected_error", + "Total number of transaction errors that were expected", + &["workload"], + registry, + ) + .unwrap(), num_success_cmds: register_int_counter_vec_with_registry!( "num_success_cmds", "Total number of commands success", @@ -373,6 +381,7 @@ impl Driver<(BenchmarkStats, StressStats)> for BenchDriver { duration: Duration::ZERO, num_error_txes: 0, num_success_txes: 0, + num_expected_error_txes: 0, num_success_cmds: 0, total_gas_used: 0, latency_ms: HistogramWrapper { @@ -407,6 +416,7 @@ impl Driver<(BenchmarkStats, StressStats)> for BenchDriver { let mut total_cps: f32 = 0.0; let mut num_success_txes: u64 = 0; let mut num_error_txes: u64 = 0; + let mut num_expected_error_txes: u64 = 0; let mut num_success_cmds = 0; let mut latency_histogram = hdrhistogram::Histogram::::new_with_max(120_000, 3).unwrap(); @@ -426,6 +436,7 @@ impl Driver<(BenchmarkStats, StressStats)> for BenchDriver { total_cps += v.bench_stats.num_success_cmds as f32 / duration; num_success_txes += v.bench_stats.num_success_txes; num_error_txes += v.bench_stats.num_error_txes; + num_expected_error_txes += v.bench_stats.num_expected_error_txes; num_success_cmds += v.bench_stats.num_success_cmds; num_no_gas += v.num_no_gas; num_submitted += v.num_submitted; @@ -442,7 +453,24 @@ impl Driver<(BenchmarkStats, StressStats)> for BenchDriver { }; counter += 1; if counter % num_workers == 0 { - stat = format!("TPS = {}, CPS = {}, latency_ms(min/p50/p99/max) = {}/{}/{}/{}, num_success_tx = {}, num_error_tx = {}, num_success_cmds = {}, no_gas = {}, submitted = {}, in_flight = {}", total_qps, total_cps, latency_histogram.min(), latency_histogram.value_at_quantile(0.5), latency_histogram.value_at_quantile(0.99), latency_histogram.max(), num_success_txes, num_error_txes, num_success_cmds, num_no_gas, num_submitted, num_in_flight); + stat = format!( + "TPS = {}, CPS = {}, latency_ms(min/p50/p99/max) = {}/{}/{}/{}, \ + num_success_tx = {}, num_error_tx = {}, num_expected_error_tx = {}, \ + num_success_cmds = {}, no_gas = {}, submitted = {}, in_flight = {}", + total_qps, + total_cps, + latency_histogram.min(), + latency_histogram.value_at_quantile(0.5), + latency_histogram.value_at_quantile(0.99), + latency_histogram.max(), + num_success_txes, + num_error_txes, + num_expected_error_txes, + num_success_cmds, + num_no_gas, + num_submitted, + num_in_flight + ); if show_progress { eprintln!("{}", stat); } @@ -681,6 +709,7 @@ async fn run_bench_worker( let request_delay_micros = 1_000_000 / worker.target_qps; let mut num_success_txes = 0; let mut num_error_txes = 0; + let mut num_expected_error_txes = 0; let mut num_success_cmds = 0; let mut num_no_gas = 0; let mut num_in_flight: u64 = 0; @@ -708,6 +737,7 @@ async fn run_bench_worker( -> NextOp { match result { Ok(effects) => { + assert!(payload.get_failure_type().is_none()); let latency = start.elapsed(); let time_from_start = total_benchmark_start_time.elapsed(); @@ -767,27 +797,38 @@ async fn run_bench_worker( } Err(err) => { error!("{}", err); - if err - .downcast::() - .and_then(|err| { - if matches!( - err, - QuorumDriverError::NonRecoverableTransactionError { .. } - ) { - Err(err.into()) + match payload.get_failure_type() { + Some(_) => { + metrics_cloned + .num_expected_error + .with_label_values(&[&payload.to_string()]) + .inc(); + NextOp::Retry(Box::new((transaction, payload))) + } + None => { + if err + .downcast::() + .and_then(|err| { + if matches!( + err, + QuorumDriverError::NonRecoverableTransactionError { .. } + ) { + Err(err.into()) + } else { + Ok(()) + } + }) + .is_err() + { + NextOp::Failure } else { - Ok(()) + metrics_cloned + .num_error + .with_label_values(&[&payload.to_string(), "rpc"]) + .inc(); + NextOp::Retry(Box::new((transaction, payload))) } - }) - .is_err() - { - NextOp::Failure - } else { - metrics_cloned - .num_error - .with_label_values(&[&payload.to_string(), "rpc"]) - .inc(); - NextOp::Retry(Box::new((transaction, payload))) + } } } } @@ -841,6 +882,7 @@ async fn run_bench_worker( bench_stats: BenchmarkStats { duration:stat_start_time.elapsed(), num_error_txes, + num_expected_error_txes, num_success_txes, num_success_cmds, latency_ms:HistogramWrapper{ @@ -855,6 +897,7 @@ async fn run_bench_worker( } num_success_txes = 0; num_error_txes = 0; + num_expected_error_txes = 0; num_success_cmds = 0; num_no_gas = 0; num_submitted = 0; @@ -874,7 +917,11 @@ async fn run_bench_worker( if let Some(b) = retry_queue.pop_front() { let tx = b.0; let payload = b.1; - num_error_txes += 1; + if payload.get_failure_type().is_some() { + num_expected_error_txes += 1; + } else { + num_error_txes += 1; + } num_submitted += 1; metrics_cloned.num_submitted.with_label_values(&[&payload.to_string()]).inc(); // TODO: clone committee for each request is not ideal. @@ -958,6 +1005,7 @@ async fn run_bench_worker( bench_stats: BenchmarkStats { duration: stat_start_time.elapsed(), num_error_txes, + num_expected_error_txes, num_success_txes, num_success_cmds, total_gas_used: worker_gas_used, diff --git a/crates/sui-benchmark/src/drivers/mod.rs b/crates/sui-benchmark/src/drivers/mod.rs index 9a245f5742907..8f29631de5d6d 100644 --- a/crates/sui-benchmark/src/drivers/mod.rs +++ b/crates/sui-benchmark/src/drivers/mod.rs @@ -124,6 +124,8 @@ pub struct BenchmarkStats { pub duration: Duration, /// Number of transactions that ended in an error pub num_error_txes: u64, + /// Number of transactions that ended in an error but were expected + pub num_expected_error_txes: u64, /// Number of transactions that were executed successfully pub num_success_txes: u64, /// Total number of commands in transactions that executed successfully @@ -137,6 +139,7 @@ impl BenchmarkStats { pub fn update(&mut self, duration: Duration, sample_stat: &BenchmarkStats) { self.duration = duration; self.num_error_txes += sample_stat.num_error_txes; + self.num_expected_error_txes += sample_stat.num_expected_error_txes; self.num_success_txes += sample_stat.num_success_txes; self.num_success_cmds += sample_stat.num_success_cmds; self.total_gas_used += sample_stat.total_gas_used; @@ -155,6 +158,7 @@ impl BenchmarkStats { "tps", "cps", "error%", + "expected error%", "latency (min)", "latency (p50)", "latency (p99)", @@ -169,6 +173,10 @@ impl BenchmarkStats { (100 * self.num_error_txes) as f32 / (self.num_error_txes + self.num_success_txes) as f32, )); + row.add_cell(Cell::new( + (100 * self.num_expected_error_txes) as f32 + / (self.num_expected_error_txes + self.num_success_txes) as f32, + )); row.add_cell(Cell::new(self.latency_ms.histogram.min())); row.add_cell(Cell::new(self.latency_ms.histogram.value_at_quantile(0.5))); row.add_cell(Cell::new(self.latency_ms.histogram.value_at_quantile(0.99))); diff --git a/crates/sui-benchmark/src/lib.rs b/crates/sui-benchmark/src/lib.rs index b440e5ffb4062..67f9d6a57db27 100644 --- a/crates/sui-benchmark/src/lib.rs +++ b/crates/sui-benchmark/src/lib.rs @@ -742,10 +742,9 @@ impl ValidatorProxy for FullNodeProxy { .await { Ok(resp) => { - let effects = ExecutionEffects::SuiTransactionBlockEffects( + return Ok(ExecutionEffects::SuiTransactionBlockEffects( resp.effects.expect("effects field should not be None"), - ); - return Ok(effects); + )); } Err(err) => { error!( diff --git a/crates/sui-benchmark/src/options.rs b/crates/sui-benchmark/src/options.rs index f924aa54f110a..4bab34fd41930 100644 --- a/crates/sui-benchmark/src/options.rs +++ b/crates/sui-benchmark/src/options.rs @@ -181,6 +181,9 @@ pub enum RunSpec { // relative weight of randomness transactions in the benchmark workload #[clap(long, num_args(1..), value_delimiter = ',', default_values_t = [0])] randomness: Vec, + // relative weight of expected failure transactions in the benchmark workload + #[clap(long, num_args(1..), value_delimiter = ',', default_values_t = [0])] + expected_failure: Vec, // --- workload-specific options --- (TODO: use subcommands or similar) // 100 for max hotness i.e all requests target @@ -210,6 +213,10 @@ pub enum RunSpec { // Default is (0-0.5) implying random load at 50% load. See `AdversarialPayloadType` enum for `adversarial_type` #[clap(long, num_args(1..), value_delimiter = ',', default_values_t = ["0-1.0".to_string()])] adversarial_cfg: Vec, + // type of expected failure transactions in the benchmark workload. + // See `ExpectedFailureType` enum for `expected_failure_type` + #[clap(long, num_args(1..), value_delimiter = ',', default_values_t = [0])] + expected_failure_type: Vec, // --- generic options --- // Target qps diff --git a/crates/sui-benchmark/src/workloads/adversarial.rs b/crates/sui-benchmark/src/workloads/adversarial.rs index f4d871d79bf40..eeb82dd150aea 100644 --- a/crates/sui-benchmark/src/workloads/adversarial.rs +++ b/crates/sui-benchmark/src/workloads/adversarial.rs @@ -10,7 +10,7 @@ use crate::in_memory_wallet::move_call_pt_impl; use crate::in_memory_wallet::InMemoryWallet; use crate::system_state_observer::{SystemState, SystemStateObserver}; use crate::workloads::payload::Payload; -use crate::workloads::{Gas, GasCoinConfig}; +use crate::workloads::{workload::ExpectedFailureType, Gas, GasCoinConfig}; use crate::ProgrammableTransactionBuilder; use crate::{convert_move_call_args, BenchMoveCallArg, ExecutionEffects, ValidatorProxy}; use anyhow::anyhow; @@ -189,6 +189,10 @@ impl Payload for AdversarialTestPayload { .expect("Protocol config not in system state"), ) } + + fn get_failure_type(&self) -> Option { + None + } } impl AdversarialTestPayload { diff --git a/crates/sui-benchmark/src/workloads/batch_payment.rs b/crates/sui-benchmark/src/workloads/batch_payment.rs index 771dee09c0d88..94e58da2ae484 100644 --- a/crates/sui-benchmark/src/workloads/batch_payment.rs +++ b/crates/sui-benchmark/src/workloads/batch_payment.rs @@ -5,7 +5,7 @@ use crate::drivers::Interval; use crate::in_memory_wallet::InMemoryWallet; use crate::system_state_observer::SystemStateObserver; use crate::workloads::payload::Payload; -use crate::workloads::workload::{Workload, STORAGE_COST_PER_COIN}; +use crate::workloads::workload::{ExpectedFailureType, Workload, STORAGE_COST_PER_COIN}; use crate::workloads::workload::{WorkloadBuilder, ESTIMATED_COMPUTATION_COST}; use crate::workloads::{Gas, GasCoinConfig, WorkloadBuilderInfo, WorkloadParams}; use crate::{ExecutionEffects, ValidatorProxy}; @@ -116,6 +116,10 @@ impl Payload for BatchPaymentTestPayload { gas_budget, ) } + + fn get_failure_type(&self) -> Option { + None + } } #[derive(Debug)] diff --git a/crates/sui-benchmark/src/workloads/delegation.rs b/crates/sui-benchmark/src/workloads/delegation.rs index 8f55acf807b23..24f21a120f12c 100644 --- a/crates/sui-benchmark/src/workloads/delegation.rs +++ b/crates/sui-benchmark/src/workloads/delegation.rs @@ -4,7 +4,7 @@ use crate::drivers::Interval; use crate::system_state_observer::SystemStateObserver; use crate::workloads::payload::Payload; -use crate::workloads::workload::{Workload, WorkloadBuilder}; +use crate::workloads::workload::{ExpectedFailureType, Workload, WorkloadBuilder}; use crate::workloads::workload::{ ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, STORAGE_COST_PER_COIN, }; @@ -80,6 +80,10 @@ impl Payload for DelegationTestPayload { ), } } + + fn get_failure_type(&self) -> Option { + None + } } #[derive(Debug)] diff --git a/crates/sui-benchmark/src/workloads/expected_failure.rs b/crates/sui-benchmark/src/workloads/expected_failure.rs new file mode 100644 index 0000000000000..ae200ccbe759b --- /dev/null +++ b/crates/sui-benchmark/src/workloads/expected_failure.rs @@ -0,0 +1,265 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::drivers::Interval; +use crate::system_state_observer::SystemStateObserver; +use crate::workloads::payload::Payload; +use crate::workloads::workload::{ + ExpectedFailureType, WorkloadBuilder, ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, + STORAGE_COST_PER_COIN, +}; +use crate::workloads::{Gas, GasCoinConfig, Workload, WorkloadBuilderInfo, WorkloadParams}; +use crate::ExecutionEffects; +use crate::ValidatorProxy; +use async_trait::async_trait; +use rand::seq::IteratorRandom; +use std::collections::HashMap; +use std::fmt; +use std::sync::Arc; +use sui_core::test_utils::make_transfer_object_transaction; +use sui_types::base_types::SuiAddress; +use sui_types::crypto::{AccountKeyPair, Ed25519SuiSignature}; +use sui_types::signature::GenericSignature; +use sui_types::{base_types::ObjectRef, crypto::get_key_pair, transaction::Transaction}; +use tracing::debug; + +#[derive(Debug, Clone)] +pub struct ExpectedFailurePayload { + failure_type: ExpectedFailureType, + transfer_object: ObjectRef, + transfer_from: SuiAddress, + transfer_to: SuiAddress, + gas: Vec, + system_state_observer: Arc, +} + +#[derive(Debug, Clone)] +pub struct ExpectedFailurePayloadCfg { + pub failure_type: ExpectedFailureType, +} + +impl Copy for ExpectedFailurePayloadCfg {} + +impl ExpectedFailurePayload { + fn create_failing_transaction(&self, mut tx: Transaction) -> Transaction { + match self.failure_type { + ExpectedFailureType::InvalidSignature => { + let signatures = tx.tx_signatures_mut_for_testing(); + signatures.pop(); + signatures.push(GenericSignature::Signature( + sui_types::crypto::Signature::Ed25519SuiSignature( + Ed25519SuiSignature::default(), + ), + )); + tx + } + ExpectedFailureType::Random => unreachable!(), + } + } +} + +impl Payload for ExpectedFailurePayload { + fn make_new_payload(&mut self, _effects: &ExecutionEffects) { + // This should never be called, as an expected failure payload + // should fail (thereby having no effects) and be retried. Note + // that since these are failures rather than Move level errors, + // no gas should be consumed, nor any objects mutated. + unreachable!() + } + + fn make_transaction(&mut self) -> Transaction { + let (gas_obj, _, keypair) = self.gas.iter().find(|x| x.1 == self.transfer_from).unwrap(); + let tx = make_transfer_object_transaction( + self.transfer_object, + *gas_obj, + self.transfer_from, + keypair, + self.transfer_to, + self.system_state_observer + .state + .borrow() + .reference_gas_price, + ); + self.create_failing_transaction(tx) + } + + fn get_failure_type(&self) -> Option { + Some(self.failure_type) + } +} + +impl fmt::Display for ExpectedFailurePayload { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "ExpectedFailurePayload({:?})", self.failure_type) + } +} + +#[derive(Debug)] +pub struct ExpectedFailureWorkloadBuilder { + expected_failure_cfg: ExpectedFailurePayloadCfg, + num_transfer_accounts: u64, + num_payloads: u64, +} + +impl ExpectedFailureWorkloadBuilder { + pub fn from( + workload_weight: f32, + target_qps: u64, + num_workers: u64, + in_flight_ratio: u64, + num_transfer_accounts: u64, + expected_failure_cfg: ExpectedFailurePayloadCfg, + duration: Interval, + group: u32, + ) -> Option { + let target_qps = (workload_weight * target_qps as f32) as u64; + let num_workers = (workload_weight * num_workers as f32).ceil() as u64; + let max_ops = target_qps * in_flight_ratio; + if max_ops == 0 || num_workers == 0 { + None + } else { + let workload_params = WorkloadParams { + target_qps, + num_workers, + max_ops, + duration, + group, + }; + let workload_builder = Box::>::from(Box::new( + ExpectedFailureWorkloadBuilder { + expected_failure_cfg, + num_payloads: max_ops, + num_transfer_accounts, + }, + )); + let builder_info = WorkloadBuilderInfo { + workload_params, + workload_builder, + }; + Some(builder_info) + } + } +} + +#[async_trait] +impl WorkloadBuilder for ExpectedFailureWorkloadBuilder { + async fn generate_coin_config_for_init(&self) -> Vec { + vec![] + } + async fn generate_coin_config_for_payloads(&self) -> Vec { + let mut address_map = HashMap::new(); + // Have to include not just the coins that are going to be created and sent + // but the coin being used as gas as well. + let amount = MAX_GAS_FOR_TESTING + + ESTIMATED_COMPUTATION_COST + + STORAGE_COST_PER_COIN * (self.num_transfer_accounts + 1); + // gas for payloads + let mut payload_configs = vec![]; + for _i in 0..self.num_transfer_accounts { + let (address, keypair) = get_key_pair(); + let cloned_keypair: Arc = Arc::new(keypair); + address_map.insert(address, cloned_keypair.clone()); + for _j in 0..self.num_payloads { + payload_configs.push(GasCoinConfig { + amount, + address, + keypair: cloned_keypair.clone(), + }); + } + } + + let owner = *address_map.keys().choose(&mut rand::thread_rng()).unwrap(); + + // transfer tokens + let mut gas_configs = vec![]; + for _i in 0..self.num_payloads { + let (address, keypair) = (owner, address_map.get(&owner).unwrap().clone()); + gas_configs.push(GasCoinConfig { + amount, + address, + keypair: keypair.clone(), + }); + } + + gas_configs.extend(payload_configs); + gas_configs + } + async fn build( + &self, + _init_gas: Vec, + payload_gas: Vec, + ) -> Box> { + debug!( + "Using `{:?}` expected failure workloads", + self.expected_failure_cfg.failure_type, + ); + + Box::>::from(Box::new(ExpectedFailureWorkload { + num_tokens: self.num_payloads, + payload_gas, + expected_failure_cfg: self.expected_failure_cfg, + })) + } +} + +#[derive(Debug)] +pub struct ExpectedFailureWorkload { + num_tokens: u64, + payload_gas: Vec, + expected_failure_cfg: ExpectedFailurePayloadCfg, +} + +#[async_trait] +impl Workload for ExpectedFailureWorkload { + async fn init( + &mut self, + _proxy: Arc, + _system_state_observer: Arc, + ) { + } + + async fn make_test_payloads( + &self, + _proxy: Arc, + system_state_observer: Arc, + ) -> Vec> { + let (transfer_tokens, payload_gas) = self.payload_gas.split_at(self.num_tokens as usize); + let mut gas_by_address: HashMap> = HashMap::new(); + for gas in payload_gas.iter() { + gas_by_address + .entry(gas.1) + .or_insert_with(|| Vec::with_capacity(1)) + .push(gas.clone()); + } + + let addresses: Vec = gas_by_address.keys().cloned().collect(); + let mut transfer_gas: Vec> = vec![]; + for i in 0..self.num_tokens { + let mut account_transfer_gas = vec![]; + for address in addresses.iter() { + account_transfer_gas.push(gas_by_address[address][i as usize].clone()); + } + transfer_gas.push(account_transfer_gas); + } + let refs: Vec<(Vec, Gas)> = transfer_gas + .into_iter() + .zip(transfer_tokens.iter()) + .map(|(g, t)| (g, t.clone())) + .collect(); + refs.iter() + .map(|(g, t)| { + let from = t.1; + let to = g.iter().find(|x| x.1 != from).unwrap().1; + Box::new(ExpectedFailurePayload { + failure_type: self.expected_failure_cfg.failure_type, + transfer_object: t.0, + transfer_from: from, + transfer_to: to, + gas: g.to_vec(), + system_state_observer: system_state_observer.clone(), + }) + }) + .map(|b| Box::::from(b)) + .collect() + } +} diff --git a/crates/sui-benchmark/src/workloads/mod.rs b/crates/sui-benchmark/src/workloads/mod.rs index 5dd11418f1a5f..368f2c96fe0c4 100644 --- a/crates/sui-benchmark/src/workloads/mod.rs +++ b/crates/sui-benchmark/src/workloads/mod.rs @@ -4,6 +4,7 @@ pub mod adversarial; pub mod batch_payment; pub mod delegation; +pub mod expected_failure; pub mod payload; pub mod randomness; pub mod shared_counter; diff --git a/crates/sui-benchmark/src/workloads/payload.rs b/crates/sui-benchmark/src/workloads/payload.rs index 567e7ad897e95..0e3d61e8ef2ea 100644 --- a/crates/sui-benchmark/src/workloads/payload.rs +++ b/crates/sui-benchmark/src/workloads/payload.rs @@ -1,7 +1,7 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::ExecutionEffects; +use crate::{workloads::ExpectedFailureType, ExecutionEffects}; use std::fmt::Display; use sui_types::transaction::Transaction; @@ -12,4 +12,7 @@ use sui_types::transaction::Transaction; pub trait Payload: Send + Sync + std::fmt::Debug + Display { fn make_new_payload(&mut self, effects: &ExecutionEffects); fn make_transaction(&mut self) -> Transaction; + fn get_failure_type(&self) -> Option { + None // Default implementation returns None + } } diff --git a/crates/sui-benchmark/src/workloads/randomness.rs b/crates/sui-benchmark/src/workloads/randomness.rs index 6729f49f13f43..6a812b7b4d636 100644 --- a/crates/sui-benchmark/src/workloads/randomness.rs +++ b/crates/sui-benchmark/src/workloads/randomness.rs @@ -6,7 +6,7 @@ use crate::system_state_observer::SystemStateObserver; use crate::util::publish_basics_package; use crate::workloads::payload::Payload; use crate::workloads::workload::{ - Workload, WorkloadBuilder, ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, + ExpectedFailureType, Workload, WorkloadBuilder, ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, }; use crate::workloads::GasCoinConfig; use crate::workloads::{Gas, WorkloadBuilderInfo, WorkloadParams}; @@ -58,6 +58,9 @@ impl Payload for RandomnessTestPayload { .call_emit_random(self.package_id, self.randomness_initial_shared_version) .build_and_sign(self.gas.2.as_ref()) } + fn get_failure_type(&self) -> Option { + None + } } #[derive(Debug)] diff --git a/crates/sui-benchmark/src/workloads/shared_counter.rs b/crates/sui-benchmark/src/workloads/shared_counter.rs index d4c6036414282..5356d53b7184d 100644 --- a/crates/sui-benchmark/src/workloads/shared_counter.rs +++ b/crates/sui-benchmark/src/workloads/shared_counter.rs @@ -6,8 +6,8 @@ use crate::system_state_observer::SystemStateObserver; use crate::util::publish_basics_package; use crate::workloads::payload::Payload; use crate::workloads::workload::{ - Workload, WorkloadBuilder, ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, - STORAGE_COST_PER_COUNTER, + ExpectedFailureType, Workload, WorkloadBuilder, ESTIMATED_COMPUTATION_COST, + MAX_GAS_FOR_TESTING, STORAGE_COST_PER_COUNTER, }; use crate::workloads::GasCoinConfig; use crate::workloads::{Gas, WorkloadBuilderInfo, WorkloadParams}; @@ -72,6 +72,9 @@ impl Payload for SharedCounterTestPayload { ) .build_and_sign(self.gas.2.as_ref()) } + fn get_failure_type(&self) -> Option { + None + } } #[derive(Debug)] diff --git a/crates/sui-benchmark/src/workloads/shared_object_deletion.rs b/crates/sui-benchmark/src/workloads/shared_object_deletion.rs index 34250f5f82d23..552cd86c85fa8 100644 --- a/crates/sui-benchmark/src/workloads/shared_object_deletion.rs +++ b/crates/sui-benchmark/src/workloads/shared_object_deletion.rs @@ -6,8 +6,8 @@ use crate::system_state_observer::SystemStateObserver; use crate::util::publish_basics_package; use crate::workloads::payload::Payload; use crate::workloads::workload::{ - Workload, WorkloadBuilder, ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, - STORAGE_COST_PER_COUNTER, + ExpectedFailureType, Workload, WorkloadBuilder, ESTIMATED_COMPUTATION_COST, + MAX_GAS_FOR_TESTING, STORAGE_COST_PER_COUNTER, }; use crate::workloads::GasCoinConfig; use crate::workloads::{Gas, WorkloadBuilderInfo, WorkloadParams}; @@ -118,6 +118,9 @@ impl Payload for SharedCounterDeletionTestPayload { } .build_and_sign(self.gas.2.as_ref()) } + fn get_failure_type(&self) -> Option { + None + } } #[derive(Debug)] diff --git a/crates/sui-benchmark/src/workloads/transfer_object.rs b/crates/sui-benchmark/src/workloads/transfer_object.rs index 3bea7713ca98e..1835a5d0f17af 100644 --- a/crates/sui-benchmark/src/workloads/transfer_object.rs +++ b/crates/sui-benchmark/src/workloads/transfer_object.rs @@ -13,7 +13,8 @@ use crate::system_state_observer::SystemStateObserver; use crate::workloads::payload::Payload; use crate::workloads::workload::WorkloadBuilder; use crate::workloads::workload::{ - Workload, ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, STORAGE_COST_PER_COIN, + ExpectedFailureType, Workload, ESTIMATED_COMPUTATION_COST, MAX_GAS_FOR_TESTING, + STORAGE_COST_PER_COIN, }; use crate::workloads::{Gas, GasCoinConfig, WorkloadBuilderInfo, WorkloadParams}; use crate::{ExecutionEffects, ValidatorProxy}; @@ -80,6 +81,9 @@ impl Payload for TransferObjectTestPayload { .reference_gas_price, ) } + fn get_failure_type(&self) -> Option { + None + } } impl std::fmt::Display for TransferObjectTestPayload { diff --git a/crates/sui-benchmark/src/workloads/workload.rs b/crates/sui-benchmark/src/workloads/workload.rs index f6175834ea05a..d4c4cb507c270 100644 --- a/crates/sui-benchmark/src/workloads/workload.rs +++ b/crates/sui-benchmark/src/workloads/workload.rs @@ -5,8 +5,14 @@ use crate::system_state_observer::SystemStateObserver; use crate::workloads::payload::Payload; use crate::workloads::{Gas, GasCoinConfig}; use crate::ValidatorProxy; +use anyhow::anyhow; use async_trait::async_trait; +use rand::distributions::{Distribution, Standard}; +use rand::Rng; +use std::str::FromStr; use std::sync::Arc; +use strum::{EnumCount, IntoEnumIterator}; +use strum_macros::{EnumCount as EnumCountMacro, EnumIter}; use sui_types::gas_coin::MIST_PER_SUI; // This is the maximum gas we will transfer from primary coin into any gas coin @@ -23,6 +29,57 @@ pub const STORAGE_COST_PER_COUNTER: u64 = 341 * 76 * 100; /// Used to estimate the budget required for each transaction. pub const ESTIMATED_COMPUTATION_COST: u64 = 1_000_000; +#[derive(Debug, EnumCountMacro, EnumIter, Clone, Copy)] +pub enum ExpectedFailureType { + Random = 0, + InvalidSignature, + // TODO: Add other failure types +} + +impl TryFrom for ExpectedFailureType { + type Error = anyhow::Error; + + fn try_from(value: u32) -> Result { + match value { + 0 => Ok(rand::random()), + _ => ExpectedFailureType::iter() + .nth(value as usize) + .ok_or_else(|| { + anyhow!( + "Invalid failure type specifier. Valid options are {} to {}", + 0, + ExpectedFailureType::COUNT + ) + }), + } + } +} + +impl FromStr for ExpectedFailureType { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + let v = u32::from_str(s).map(ExpectedFailureType::try_from); + + if let Ok(Ok(q)) = v { + return Ok(q); + } + + Err(anyhow!( + "Invalid input string. Valid values are 0 to {}", + ExpectedFailureType::COUNT + )) + } +} + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> ExpectedFailureType { + // Exclude the "Random" variant + let n = rng.gen_range(1..ExpectedFailureType::COUNT); + ExpectedFailureType::iter().nth(n).unwrap() + } +} + #[async_trait] pub trait WorkloadBuilder: Send + Sync + std::fmt::Debug { async fn generate_coin_config_for_init(&self) -> Vec; diff --git a/crates/sui-benchmark/src/workloads/workload_configuration.rs b/crates/sui-benchmark/src/workloads/workload_configuration.rs index aae4924eaf469..9cbd18800ead3 100644 --- a/crates/sui-benchmark/src/workloads/workload_configuration.rs +++ b/crates/sui-benchmark/src/workloads/workload_configuration.rs @@ -9,7 +9,7 @@ use crate::workloads::batch_payment::BatchPaymentWorkloadBuilder; use crate::workloads::delegation::DelegationWorkloadBuilder; use crate::workloads::shared_counter::SharedCounterWorkloadBuilder; use crate::workloads::transfer_object::TransferObjectWorkloadBuilder; -use crate::workloads::{GroupID, WorkloadBuilderInfo, WorkloadInfo}; +use crate::workloads::{ExpectedFailureType, GroupID, WorkloadBuilderInfo, WorkloadInfo}; use anyhow::Result; use std::collections::BTreeMap; use std::str::FromStr; @@ -17,9 +17,36 @@ use std::sync::Arc; use tracing::info; use super::adversarial::{AdversarialPayloadCfg, AdversarialWorkloadBuilder}; +use super::expected_failure::{ExpectedFailurePayloadCfg, ExpectedFailureWorkloadBuilder}; use super::randomness::RandomnessWorkloadBuilder; use super::shared_object_deletion::SharedCounterDeletionWorkloadBuilder; +pub struct WorkloadWeights { + pub shared_counter: u32, + pub transfer_object: u32, + pub delegation: u32, + pub batch_payment: u32, + pub shared_deletion: u32, + pub adversarial: u32, + pub expected_failure: u32, + pub randomness: u32, +} + +pub struct WorkloadConfig { + pub group: u32, + pub num_workers: u64, + pub num_transfer_accounts: u64, + pub weights: WorkloadWeights, + pub adversarial_cfg: AdversarialPayloadCfg, + pub expected_failure_cfg: ExpectedFailurePayloadCfg, + pub batch_payment_size: u32, + pub shared_counter_hotness_factor: u32, + pub num_shared_counters: Option, + pub shared_counter_max_tip: u64, + pub target_qps: u64, + pub in_flight_ratio: u64, + pub duration: Interval, +} pub struct WorkloadConfiguration; impl WorkloadConfiguration { @@ -40,12 +67,14 @@ impl WorkloadConfiguration { delegation, batch_payment, adversarial, + expected_failure, randomness, shared_counter_hotness_factor, num_shared_counters, shared_counter_max_tip, batch_payment_size, adversarial_cfg, + expected_failure_type, target_qps, num_workers, in_flight_ratio, @@ -60,28 +89,36 @@ impl WorkloadConfiguration { // benchmark group will run in the same time for the same duration. for workload_group in 0..num_of_benchmark_groups { let i = workload_group as usize; - let builders = Self::create_workload_builders( - workload_group, - num_workers[i], - opts.num_transfer_accounts, - shared_counter[i], - transfer_object[i], - delegation[i], - batch_payment[i], - shared_deletion[i], - adversarial[i], - AdversarialPayloadCfg::from_str(&adversarial_cfg[i]).unwrap(), - randomness[i], - batch_payment_size[i], - shared_counter_hotness_factor[i], - num_shared_counters.as_ref().map(|n| n[i]), - shared_counter_max_tip[i], - target_qps[i], - in_flight_ratio[i], - duration[i], - system_state_observer.clone(), - ) - .await; + let config = WorkloadConfig { + group: workload_group, + num_workers: num_workers[i], + num_transfer_accounts: opts.num_transfer_accounts, + weights: WorkloadWeights { + shared_counter: shared_counter[i], + transfer_object: transfer_object[i], + delegation: delegation[i], + batch_payment: batch_payment[i], + shared_deletion: shared_deletion[i], + adversarial: adversarial[i], + expected_failure: expected_failure[i], + randomness: randomness[i], + }, + adversarial_cfg: AdversarialPayloadCfg::from_str(&adversarial_cfg[i]) + .unwrap(), + expected_failure_cfg: ExpectedFailurePayloadCfg { + failure_type: ExpectedFailureType::try_from(expected_failure_type[i]) + .unwrap(), + }, + batch_payment_size: batch_payment_size[i], + shared_counter_hotness_factor: shared_counter_hotness_factor[i], + num_shared_counters: num_shared_counters.as_ref().map(|n| n[i]), + shared_counter_max_tip: shared_counter_max_tip[i], + target_qps: target_qps[i], + in_flight_ratio: in_flight_ratio[i], + duration: duration[i], + }; + let builders = + Self::create_workload_builders(config, system_state_observer.clone()).await; workload_builders.extend(builders); } @@ -139,37 +176,35 @@ impl WorkloadConfiguration { } pub async fn create_workload_builders( - workload_group: u32, - num_workers: u64, - num_transfer_accounts: u64, - shared_counter_weight: u32, - transfer_object_weight: u32, - delegation_weight: u32, - batch_payment_weight: u32, - shared_deletion_weight: u32, - adversarial_weight: u32, - adversarial_cfg: AdversarialPayloadCfg, - randomness_weight: u32, - batch_payment_size: u32, - shared_counter_hotness_factor: u32, - num_shared_counters: Option, - shared_counter_max_tip: u64, - target_qps: u64, - in_flight_ratio: u64, - duration: Interval, + WorkloadConfig { + group, + num_workers, + num_transfer_accounts, + weights, + adversarial_cfg, + expected_failure_cfg, + batch_payment_size, + shared_counter_hotness_factor, + num_shared_counters, + shared_counter_max_tip, + target_qps, + in_flight_ratio, + duration, + }: WorkloadConfig, system_state_observer: Arc, ) -> Vec> { - let total_weight = shared_counter_weight - + shared_deletion_weight - + transfer_object_weight - + delegation_weight - + batch_payment_weight - + adversarial_weight - + randomness_weight; + let total_weight = weights.shared_counter + + weights.shared_deletion + + weights.transfer_object + + weights.delegation + + weights.batch_payment + + weights.adversarial + + weights.randomness + + weights.expected_failure; let reference_gas_price = system_state_observer.state.borrow().reference_gas_price; let mut workload_builders = vec![]; let shared_workload = SharedCounterWorkloadBuilder::from( - shared_counter_weight as f32 / total_weight as f32, + weights.shared_counter as f32 / total_weight as f32, target_qps, num_workers, in_flight_ratio, @@ -178,11 +213,11 @@ impl WorkloadConfiguration { shared_counter_max_tip, reference_gas_price, duration, - workload_group, + group, ); workload_builders.push(shared_workload); let shared_deletion_workload = SharedCounterDeletionWorkloadBuilder::from( - shared_deletion_weight as f32 / total_weight as f32, + weights.shared_deletion as f32 / total_weight as f32, target_qps, num_workers, in_flight_ratio, @@ -190,58 +225,69 @@ impl WorkloadConfiguration { shared_counter_max_tip, reference_gas_price, duration, - workload_group, + group, ); workload_builders.push(shared_deletion_workload); let transfer_workload = TransferObjectWorkloadBuilder::from( - transfer_object_weight as f32 / total_weight as f32, + weights.transfer_object as f32 / total_weight as f32, target_qps, num_workers, in_flight_ratio, num_transfer_accounts, duration, - workload_group, + group, ); workload_builders.push(transfer_workload); let delegation_workload = DelegationWorkloadBuilder::from( - delegation_weight as f32 / total_weight as f32, + weights.delegation as f32 / total_weight as f32, target_qps, num_workers, in_flight_ratio, duration, - workload_group, + group, ); workload_builders.push(delegation_workload); let batch_payment_workload = BatchPaymentWorkloadBuilder::from( - batch_payment_weight as f32 / total_weight as f32, + weights.batch_payment as f32 / total_weight as f32, target_qps, num_workers, in_flight_ratio, batch_payment_size, duration, - workload_group, + group, ); workload_builders.push(batch_payment_workload); let adversarial_workload = AdversarialWorkloadBuilder::from( - adversarial_weight as f32 / total_weight as f32, + weights.adversarial as f32 / total_weight as f32, target_qps, num_workers, in_flight_ratio, adversarial_cfg, duration, - workload_group, + group, ); workload_builders.push(adversarial_workload); let randomness_workload = RandomnessWorkloadBuilder::from( - randomness_weight as f32 / total_weight as f32, + weights.randomness as f32 / total_weight as f32, target_qps, num_workers, in_flight_ratio, reference_gas_price, duration, - workload_group, + group, ); workload_builders.push(randomness_workload); + let expected_failure_workload = ExpectedFailureWorkloadBuilder::from( + weights.expected_failure as f32 / total_weight as f32, + target_qps, + num_workers, + in_flight_ratio, + num_transfer_accounts, + expected_failure_cfg, + duration, + group, + ); + workload_builders.push(expected_failure_workload); workload_builders } diff --git a/crates/sui-benchmark/tests/simtest.rs b/crates/sui-benchmark/tests/simtest.rs index fcb22e160de14..3c0e9cacccc4b 100644 --- a/crates/sui-benchmark/tests/simtest.rs +++ b/crates/sui-benchmark/tests/simtest.rs @@ -5,6 +5,7 @@ mod test { use rand::{distributions::uniform::SampleRange, thread_rng, Rng}; use std::collections::HashSet; + use std::num::NonZeroUsize; use std::path::PathBuf; use std::str::FromStr; use std::sync::atomic::{AtomicBool, Ordering}; @@ -13,7 +14,11 @@ mod test { use sui_benchmark::bank::BenchmarkBank; use sui_benchmark::system_state_observer::SystemStateObserver; use sui_benchmark::workloads::adversarial::AdversarialPayloadCfg; - use sui_benchmark::workloads::workload_configuration::WorkloadConfiguration; + use sui_benchmark::workloads::expected_failure::ExpectedFailurePayloadCfg; + use sui_benchmark::workloads::workload::ExpectedFailureType; + use sui_benchmark::workloads::workload_configuration::{ + WorkloadConfig, WorkloadConfiguration, WorkloadWeights, + }; use sui_benchmark::{ drivers::{bench_driver::BenchDriver, driver::Driver, Interval}, util::get_ed25519_keypair_from_keystore, @@ -35,11 +40,13 @@ mod test { use sui_simulator::{configs::*, SimConfig}; use sui_storage::blob::Blob; use sui_surfer::surf_strategy::SurfStrategy; + use sui_swarm_config::network_config_builder::ConfigBuilder; use sui_types::base_types::{ConciseableName, ObjectID, SequenceNumber}; use sui_types::digests::TransactionDigest; use sui_types::full_checkpoint_content::CheckpointData; use sui_types::messages_checkpoint::VerifiedCheckpoint; use sui_types::supported_protocol_versions::SupportedProtocolVersions; + use sui_types::traffic_control::{FreqThresholdConfig, PolicyConfig, PolicyType}; use sui_types::transaction::{ DEFAULT_VALIDATOR_GAS_PRICE, TEST_ONLY_GAS_UNIT_FOR_HEAVY_COMPUTATION_STORAGE, }; @@ -409,6 +416,7 @@ mod test { } }); register_fail_point_async("consensus-delay", || delay_failpoint(10..20, 0.001)); + register_fail_point_async("write_object_entry", || delay_failpoint(10..20, 0.001)); register_fail_point_async("writeback-cache-commit", || delay_failpoint(10..20, 0.001)); @@ -462,16 +470,17 @@ mod test { let txn_count_limit; // When using transaction count as congestion control mode, the limit of transactions per object per commit. let max_deferral_rounds; let cap_factor_denominator; + let absolute_cap_factor; + let allow_overage_factor; + let separate_randomness_budget; { let mut rng = thread_rng(); mode = if rng.gen_bool(0.33) { PerObjectCongestionControlMode::TotalGasBudget + } else if rng.gen_bool(0.5) { + PerObjectCongestionControlMode::TotalTxCount } else { - if rng.gen_bool(0.5) { - PerObjectCongestionControlMode::TotalTxCount - } else { - PerObjectCongestionControlMode::TotalGasBudgetWithCap - } + PerObjectCongestionControlMode::TotalGasBudgetWithCap }; checkpoint_budget_factor = rng.gen_range(1..20); txn_count_limit = rng.gen_range(1..=10); @@ -480,26 +489,34 @@ mod test { } else { rng.gen_range(1000..10000) // Large deferral round (testing liveness) }; - + allow_overage_factor = if rng.gen_bool(0.5) { + 0 + } else { + rng.gen_range(1..100) + }; cap_factor_denominator = rng.gen_range(1..100); + absolute_cap_factor = rng.gen_range(2..50); + separate_randomness_budget = rng.gen_bool(0.5); } info!( "test_simulated_load_shared_object_congestion_control setup. - mode: {:?}, checkpoint_budget_factor: {:?}, - max_deferral_rounds: {:?}, - txn_count_limit: {:?}", - mode, checkpoint_budget_factor, max_deferral_rounds, txn_count_limit + mode: {mode:?}, checkpoint_budget_factor: {checkpoint_budget_factor:?}, + max_deferral_rounds: {max_deferral_rounds:?}, + txn_count_limit: {txn_count_limit:?}, allow_overage_factor: {allow_overage_factor:?}, + cap_factor_denominator: {cap_factor_denominator:?}, + absolute_cap_factor: {absolute_cap_factor:?}, + separate_randomness_budget: {separate_randomness_budget:?}", ); let _guard = ProtocolConfig::apply_overrides_for_testing(move |_, mut config| { + let total_gas_limit = checkpoint_budget_factor + * DEFAULT_VALIDATOR_GAS_PRICE + * TEST_ONLY_GAS_UNIT_FOR_HEAVY_COMPUTATION_STORAGE; config.set_per_object_congestion_control_mode_for_testing(mode); match mode { PerObjectCongestionControlMode::None => panic!("Congestion control mode cannot be None in test_simulated_load_shared_object_congestion_control"), PerObjectCongestionControlMode::TotalGasBudget => { - let total_gas_limit = checkpoint_budget_factor - * DEFAULT_VALIDATOR_GAS_PRICE - * TEST_ONLY_GAS_UNIT_FOR_HEAVY_COMPUTATION_STORAGE; config.set_max_accumulated_txn_cost_per_object_in_narwhal_commit_for_testing(total_gas_limit); config.set_max_accumulated_txn_cost_per_object_in_mysticeti_commit_for_testing(total_gas_limit); }, @@ -512,15 +529,25 @@ mod test { ); }, PerObjectCongestionControlMode::TotalGasBudgetWithCap => { - let total_gas_limit = checkpoint_budget_factor - * DEFAULT_VALIDATOR_GAS_PRICE - * TEST_ONLY_GAS_UNIT_FOR_HEAVY_COMPUTATION_STORAGE; config.set_max_accumulated_txn_cost_per_object_in_narwhal_commit_for_testing(total_gas_limit); config.set_max_accumulated_txn_cost_per_object_in_mysticeti_commit_for_testing(total_gas_limit); config.set_gas_budget_based_txn_cost_cap_factor_for_testing(total_gas_limit/cap_factor_denominator); + config.set_gas_budget_based_txn_cost_absolute_cap_commit_count_for_testing(absolute_cap_factor); }, } config.set_max_deferral_rounds_for_congestion_control_for_testing(max_deferral_rounds); + config.set_max_txn_cost_overage_per_object_in_commit_for_testing( + allow_overage_factor * total_gas_limit, + ); + if separate_randomness_budget { + config + .set_max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit_for_testing( + std::cmp::max( + 1, + config.max_accumulated_txn_cost_per_object_in_mysticeti_commit() / 10, + ), + ); + } config }); @@ -541,7 +568,64 @@ mod test { info!("Simulated load config: {:?}", simulated_load_config); } - test_simulated_load_with_test_config(test_cluster, 50, simulated_load_config).await; + test_simulated_load_with_test_config(test_cluster, 50, simulated_load_config, None, None) + .await; + } + + // Tests cluster defense against failing transaction floods Traffic Control + #[sim_test(config = "test_config()")] + async fn test_simulated_load_expected_failure_traffic_control() { + // TODO: can we get away with significatly increasing this? + let target_qps = get_var("SIM_STRESS_TEST_QPS", 10); + let num_workers = get_var("SIM_STRESS_TEST_WORKERS", 10); + + let expected_tps = target_qps * num_workers; + let error_policy_type = PolicyType::FreqThreshold(FreqThresholdConfig { + client_threshold: expected_tps / 2, + window_size_secs: 5, + update_interval_secs: 1, + ..Default::default() + }); + info!( + "test_simulated_load_expected_failure_traffic_control setup. + Policy type: {:?}", + error_policy_type + ); + + let policy_config = PolicyConfig { + connection_blocklist_ttl_sec: 1, + error_policy_type, + dry_run: false, + ..Default::default() + }; + let network_config = ConfigBuilder::new_with_temp_dir() + .committee_size(NonZeroUsize::new(4).unwrap()) + .with_policy_config(Some(policy_config)) + .with_epoch_duration(5000) + .build(); + let test_cluster = Arc::new( + TestClusterBuilder::new() + .set_network_config(network_config) + .build() + .await, + ); + + let mut simulated_load_config = SimulatedLoadConfig::default(); + { + simulated_load_config.expected_failure_weight = 20; + simulated_load_config.expected_failure_config.failure_type = + ExpectedFailureType::try_from(0).unwrap(); + info!("Simulated load config: {:?}", simulated_load_config); + } + + test_simulated_load_with_test_config( + test_cluster, + 50, + simulated_load_config, + Some(target_qps), + Some(num_workers), + ) + .await; } // Tests cluster liveness when DKG has failed. @@ -853,6 +937,8 @@ mod test { num_shared_counters: Option, use_shared_counter_max_tip: bool, shared_counter_max_tip: u64, + expected_failure_weight: u32, + expected_failure_config: ExpectedFailurePayloadCfg, } impl Default for SimulatedLoadConfig { @@ -869,6 +955,10 @@ mod test { num_shared_counters: Some(1), use_shared_counter_max_tip: false, shared_counter_max_tip: 0, + expected_failure_weight: 0, + expected_failure_config: ExpectedFailurePayloadCfg { + failure_type: ExpectedFailureType::try_from(0).unwrap(), + }, } } } @@ -878,6 +968,8 @@ mod test { test_cluster, test_duration_secs, SimulatedLoadConfig::default(), + None, + None, ) .await; } @@ -886,6 +978,8 @@ mod test { test_cluster: Arc, test_duration_secs: u64, config: SimulatedLoadConfig, + target_qps: Option, + num_workers: Option, ) { let sender = test_cluster.get_address_0(); let keystore_path = test_cluster.swarm.dir().join(SUI_KEYSTORE_FILENAME); @@ -916,17 +1010,10 @@ mod test { // The default test parameters are somewhat conservative in order to keep the running time // of the test reasonable in CI. - let target_qps = get_var("SIM_STRESS_TEST_QPS", 10); - let num_workers = get_var("SIM_STRESS_TEST_WORKERS", 10); + let target_qps = target_qps.unwrap_or(get_var("SIM_STRESS_TEST_QPS", 10)); + let num_workers = num_workers.unwrap_or(get_var("SIM_STRESS_TEST_WORKERS", 10)); let in_flight_ratio = get_var("SIM_STRESS_TEST_IFR", 2); let batch_payment_size = get_var("SIM_BATCH_PAYMENT_SIZE", 15); - let shared_counter_weight = config.shared_counter_weight; - let transfer_object_weight = config.transfer_object_weight; - let num_transfer_accounts = config.num_transfer_accounts; - let delegation_weight = config.delegation_weight; - let batch_payment_weight = config.batch_payment_weight; - let shared_object_deletion_weight = config.shared_deletion_weight; - let randomness_weight = config.randomness_weight; // Run random payloads at 100% load let adversarial_cfg = AdversarialPayloadCfg::from_str("0-1.0").unwrap(); @@ -937,8 +1024,6 @@ mod test { // tests run for ever let adversarial_weight = 0; - let shared_counter_hotness_factor = config.shared_counter_hotness_factor; - let num_shared_counters = config.num_shared_counters; let shared_counter_max_tip = if config.use_shared_counter_max_tip { config.shared_counter_max_tip } else { @@ -946,25 +1031,35 @@ mod test { }; let gas_request_chunk_size = 100; - let workloads_builders = WorkloadConfiguration::create_workload_builders( - 0, + let weights = WorkloadWeights { + shared_counter: config.shared_counter_weight, + transfer_object: config.transfer_object_weight, + delegation: config.delegation_weight, + batch_payment: config.batch_payment_weight, + shared_deletion: config.shared_deletion_weight, + randomness: config.randomness_weight, + adversarial: adversarial_weight, + expected_failure: config.expected_failure_weight, + }; + + let workload_config = WorkloadConfig { + group: 0, num_workers, - num_transfer_accounts, - shared_counter_weight, - transfer_object_weight, - delegation_weight, - batch_payment_weight, - shared_object_deletion_weight, - adversarial_weight, + num_transfer_accounts: config.num_transfer_accounts, + weights, adversarial_cfg, - randomness_weight, + expected_failure_cfg: config.expected_failure_config, batch_payment_size, - shared_counter_hotness_factor, - num_shared_counters, + shared_counter_hotness_factor: config.shared_counter_hotness_factor, + num_shared_counters: config.num_shared_counters, shared_counter_max_tip, target_qps, in_flight_ratio, duration, + }; + + let workloads_builders = WorkloadConfiguration::create_workload_builders( + workload_config, system_state_observer.clone(), ) .await; diff --git a/crates/sui-bridge-cli/src/lib.rs b/crates/sui-bridge-cli/src/lib.rs index a607e0acacf03..dc40a7f2f38d1 100644 --- a/crates/sui-bridge-cli/src/lib.rs +++ b/crates/sui-bridge-cli/src/lib.rs @@ -541,6 +541,8 @@ pub enum BridgeClientCommands { ClaimOnEth { #[clap(long)] seq_num: u64, + #[clap(long, default_value_t = true, action = clap::ArgAction::Set)] + dry_run: bool, }, } @@ -576,8 +578,8 @@ impl BridgeClientCommands { ); Ok(()) } - BridgeClientCommands::ClaimOnEth { seq_num } => { - claim_on_eth(seq_num, config, sui_bridge_client) + BridgeClientCommands::ClaimOnEth { seq_num, dry_run } => { + claim_on_eth(seq_num, config, sui_bridge_client, dry_run) .await .map_err(|e| anyhow!("{:?}", e)) } @@ -681,6 +683,7 @@ async fn claim_on_eth( seq_num: u64, config: &LoadedBridgeCliConfig, sui_bridge_client: SuiBridgeClient, + dry_run: bool, ) -> BridgeResult<()> { let sui_chain_id = sui_bridge_client.get_bridge_summary().await?.chain_id; let parsed_message = sui_bridge_client @@ -710,8 +713,20 @@ async fn claim_on_eth( ); let message = eth_sui_bridge::Message::from(parsed_message); let tx = eth_sui_bridge.transfer_bridged_tokens_with_signatures(signatures, message); - let _eth_claim_tx_receipt = tx.send().await.unwrap().await.unwrap().unwrap(); - info!("Sui to Eth bridge transfer claimed"); + if dry_run { + let tx = tx.tx; + let resp = config.eth_signer.estimate_gas(&tx, None).await; + println!( + "Sui to Eth bridge transfer claim dry run result: {:?}", + resp + ); + } else { + let eth_claim_tx_receipt = tx.send().await.unwrap().await.unwrap().unwrap(); + println!( + "Sui to Eth bridge transfer claimed: {:?}", + eth_claim_tx_receipt + ); + } Ok(()) } diff --git a/crates/sui-bridge-cli/src/main.rs b/crates/sui-bridge-cli/src/main.rs index 25532f6bc5335..3ee9c4acf5d76 100644 --- a/crates/sui-bridge-cli/src/main.rs +++ b/crates/sui-bridge-cli/src/main.rs @@ -7,6 +7,7 @@ use ethers::types::Address as EthAddress; use fastcrypto::encoding::{Encoding, Hex}; use shared_crypto::intent::Intent; use shared_crypto::intent::IntentMessage; +use std::collections::BTreeMap; use std::collections::HashMap; use std::str::from_utf8; use std::str::FromStr; @@ -83,7 +84,7 @@ async fn main() -> anyhow::Result<()> { let config = LoadedBridgeCliConfig::load(config).await?; let metrics = Arc::new(BridgeMetrics::new_for_testing()); let sui_bridge_client = - SuiClient::::new(&config.sui_rpc_url, metrics).await?; + SuiClient::::new(&config.sui_rpc_url, metrics.clone()).await?; let (sui_key, sui_address, gas_object_ref) = config .get_sui_account_info() @@ -99,7 +100,11 @@ async fn main() -> anyhow::Result<()> { .await .expect("Failed to get bridge committee"), ); - let agg = BridgeAuthorityAggregator::new(bridge_committee); + let agg = BridgeAuthorityAggregator::new( + bridge_committee, + metrics, + Arc::new(BTreeMap::new()), + ); // Handle Sui Side if chain_id.is_sui_chain() { diff --git a/crates/sui-bridge-indexer/Cargo.toml b/crates/sui-bridge-indexer/Cargo.toml index 4e41c2d6e0100..75602df4ae818 100644 --- a/crates/sui-bridge-indexer/Cargo.toml +++ b/crates/sui-bridge-indexer/Cargo.toml @@ -36,13 +36,16 @@ backoff.workspace = true sui-config.workspace = true tempfile.workspace = true sui-indexer-builder.workspace = true -sui-bridge-watchdog.workspace = true [dev-dependencies] sui-types = { workspace = true, features = ["test-utils"] } sui-test-transaction-builder.workspace = true test-cluster.workspace = true hex-literal = "0.3.4" +sui-indexer.workspace = true +diesel_migrations = "2.2.0" +sui-indexer-builder = { workspace = true, features = ["test-utils"] } +sui-bridge = { workspace = true, features = ["test-utils"] } [[bin]] name = "bridge-indexer" diff --git a/crates/sui-bridge-indexer/README.md b/crates/sui-bridge-indexer/README.md new file mode 100644 index 0000000000000..c6ce103c0d73e --- /dev/null +++ b/crates/sui-bridge-indexer/README.md @@ -0,0 +1,41 @@ +## Overview + +Sui Bridge Indexer is a binary that scans Sui Bridge transactions on Sui and Ethereum networks, and indexes the processed data for further use. + +## Get Binary + +```bash +cargo build --bin bridge-indexer --release +``` + +The pre-built Docker image for Bridge Indexer can be found in `mysten/sui-tools:{SHA}` + +## Run Binary + +``` +bridge-indexer --config-path config.yaml +``` + + +## Config + +```yaml +--- +remote_store_url: https://checkpoints.mainnet.sui.io +eth_rpc_url: {eth rpc url} +sui_rpc_url: {sui rpc url} + +concurrency: 500 +checkpoints_path: {path-for-checkpoints} + +eth_sui_bridge_contract_address: 0xda3bD1fE1973470312db04551B65f401Bc8a92fD # <-- mainnet, 0xAE68F87938439afEEDd6552B0E83D2CbC2473623 for testnet +metric_port: {port to export metrics} + +sui_bridge_genesis_checkpoint: 55455583 # <-- mainnet, 43917829 for testnet +# genesis block number for eth +eth_bridge_genesis_block: 20811249 # <-- mainnet, 5997013 for testnet + +eth_ws_url: {eth websocket url} + +``` + diff --git a/crates/sui-bridge-indexer/src/config.rs b/crates/sui-bridge-indexer/src/config.rs index 58b742642b9a8..6686da9fb2b74 100644 --- a/crates/sui-bridge-indexer/src/config.rs +++ b/crates/sui-bridge-indexer/src/config.rs @@ -25,9 +25,6 @@ pub struct IndexerConfig { pub eth_sui_bridge_contract_address: String, pub metric_port: u16, - - /// A temporary flag to disable the eth indexer to test mainnet before eth contracts are deployed. - pub disable_eth: Option, } impl sui_config::Config for IndexerConfig {} diff --git a/crates/sui-bridge-indexer/src/lib.rs b/crates/sui-bridge-indexer/src/lib.rs index b95802502eb93..8e8a906fbcb53 100644 --- a/crates/sui-bridge-indexer/src/lib.rs +++ b/crates/sui-bridge-indexer/src/lib.rs @@ -1,14 +1,35 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::fmt::{Display, Formatter}; -use strum_macros::Display; - -use sui_types::base_types::{SuiAddress, TransactionDigest}; - +use crate::config::IndexerConfig; +use crate::eth_bridge_indexer::{ + EthDataMapper, EthFinalizedSyncDatasource, EthSubscriptionDatasource, +}; +use crate::metrics::BridgeIndexerMetrics; use crate::models::GovernanceAction as DBGovernanceAction; use crate::models::TokenTransferData as DBTokenTransferData; use crate::models::{SuiErrorTransactions, TokenTransfer as DBTokenTransfer}; +use crate::postgres_manager::PgPool; +use crate::storage::PgBridgePersistent; +use crate::sui_bridge_indexer::SuiBridgeDataMapper; +use crate::sui_datasource::SuiCheckpointDatasource; +use ethers::providers::{Http, Provider}; +use ethers::types::Address as EthAddress; +use std::fmt::{Display, Formatter}; +use std::str::FromStr; +use std::sync::Arc; +use strum_macros::Display; +use sui_bridge::eth_client::EthClient; +use sui_bridge::metered_eth_provider::MeteredEthHttpProvier; +use sui_bridge::metrics::BridgeMetrics; +use sui_bridge::utils::get_eth_contract_addresses; +use sui_data_ingestion_core::DataIngestionMetrics; +use sui_indexer_builder::indexer_builder::{BackfillStrategy, Datasource, Indexer, IndexerBuilder}; +use sui_indexer_builder::progress::{ + OutOfOrderSaveAfterDurationPolicy, ProgressSavingPolicy, SaveAfterDurationPolicy, +}; +use sui_sdk::SuiClientBuilder; +use sui_types::base_types::{SuiAddress, TransactionDigest}; pub mod config; pub mod metrics; @@ -179,3 +200,146 @@ impl Display for BridgeDataSource { write!(f, "{str}") } } + +pub async fn create_sui_indexer( + pool: PgPool, + metrics: BridgeIndexerMetrics, + ingestion_metrics: DataIngestionMetrics, + config: &IndexerConfig, +) -> anyhow::Result< + Indexer, + anyhow::Error, +> { + let datastore_with_out_of_order_source = PgBridgePersistent::new( + pool, + ProgressSavingPolicy::OutOfOrderSaveAfterDuration(OutOfOrderSaveAfterDurationPolicy::new( + tokio::time::Duration::from_secs(30), + )), + ); + + let sui_client = Arc::new( + SuiClientBuilder::default() + .build(config.sui_rpc_url.clone()) + .await?, + ); + + let sui_checkpoint_datasource = SuiCheckpointDatasource::new( + config.remote_store_url.clone(), + sui_client, + config.concurrency as usize, + config + .checkpoints_path + .clone() + .map(|p| p.into()) + .unwrap_or(tempfile::tempdir()?.into_path()), + config.sui_bridge_genesis_checkpoint, + ingestion_metrics, + metrics.clone(), + ); + + Ok(IndexerBuilder::new( + "SuiBridgeIndexer", + sui_checkpoint_datasource, + SuiBridgeDataMapper { metrics }, + datastore_with_out_of_order_source, + ) + .build()) +} + +pub async fn create_eth_sync_indexer( + pool: PgPool, + metrics: BridgeIndexerMetrics, + bridge_metrics: Arc, + config: &IndexerConfig, + eth_client: Arc>, +) -> Result, anyhow::Error> { + let bridge_addresses = get_eth_bridge_contract_addresses(config).await?; + // Start the eth sync data source + let eth_sync_datasource = EthFinalizedSyncDatasource::new( + bridge_addresses, + eth_client.clone(), + config.eth_rpc_url.clone(), + metrics.clone(), + bridge_metrics.clone(), + config.eth_bridge_genesis_block, + ) + .await?; + Ok(create_eth_indexer_builder( + pool, + metrics, + eth_sync_datasource, + "EthBridgeFinalizedSyncIndexer", + ) + .await? + .with_backfill_strategy(BackfillStrategy::Partitioned { task_size: 1000 }) + .build()) +} + +pub async fn create_eth_subscription_indexer( + pool: PgPool, + metrics: BridgeIndexerMetrics, + config: &IndexerConfig, + eth_client: Arc>, +) -> Result, anyhow::Error> { + // Start the eth subscription indexer + let bridge_addresses = get_eth_bridge_contract_addresses(config).await?; + // Start the eth subscription indexer + let eth_subscription_datasource = EthSubscriptionDatasource::new( + bridge_addresses.clone(), + eth_client.clone(), + config.eth_ws_url.clone(), + metrics.clone(), + config.eth_bridge_genesis_block, + ) + .await?; + + Ok(create_eth_indexer_builder( + pool, + metrics, + eth_subscription_datasource, + "EthBridgeSubscriptionIndexer", + ) + .await? + .with_backfill_strategy(BackfillStrategy::Disabled) + .build()) +} + +async fn create_eth_indexer_builder>( + pool: PgPool, + metrics: BridgeIndexerMetrics, + datasource: D, + indexer_name: &str, +) -> Result, anyhow::Error> { + let datastore = PgBridgePersistent::new( + pool, + ProgressSavingPolicy::SaveAfterDuration(SaveAfterDurationPolicy::new( + tokio::time::Duration::from_secs(30), + )), + ); + + // Start the eth subscription indexer + Ok(IndexerBuilder::new( + indexer_name, + datasource, + EthDataMapper { metrics }, + datastore.clone(), + )) +} + +async fn get_eth_bridge_contract_addresses( + config: &IndexerConfig, +) -> Result, anyhow::Error> { + let bridge_address = EthAddress::from_str(&config.eth_sui_bridge_contract_address)?; + let provider = Arc::new( + Provider::::try_from(&config.eth_rpc_url)? + .interval(std::time::Duration::from_millis(2000)), + ); + let bridge_addresses = get_eth_contract_addresses(bridge_address, &provider).await?; + Ok(vec![ + bridge_address, + bridge_addresses.0, + bridge_addresses.1, + bridge_addresses.2, + bridge_addresses.3, + ]) +} diff --git a/crates/sui-bridge-indexer/src/main.rs b/crates/sui-bridge-indexer/src/main.rs index 8ba1d128ce1d4..a42fc23b04ab7 100644 --- a/crates/sui-bridge-indexer/src/main.rs +++ b/crates/sui-bridge-indexer/src/main.rs @@ -3,7 +3,6 @@ use anyhow::Result; use clap::*; -use ethers::providers::{Http, Provider}; use ethers::types::Address as EthAddress; use prometheus::Registry; use std::collections::HashSet; @@ -15,10 +14,9 @@ use std::str::FromStr; use std::sync::Arc; use sui_bridge::eth_client::EthClient; use sui_bridge::metered_eth_provider::{new_metered_eth_provider, MeteredEthHttpProvier}; +use sui_bridge::sui_bridge_watchdog::Observable; use sui_bridge::sui_client::SuiBridgeClient; use sui_bridge::utils::get_eth_contract_addresses; -use sui_bridge_indexer::eth_bridge_indexer::EthFinalizedSyncDatasource; -use sui_bridge_indexer::eth_bridge_indexer::EthSubscriptionDatasource; use sui_config::Config; use tokio::task::JoinHandle; use tracing::info; @@ -28,24 +26,19 @@ use mysten_metrics::spawn_logged_monitored_task; use mysten_metrics::start_prometheus_server; use sui_bridge::metrics::BridgeMetrics; +use sui_bridge::sui_bridge_watchdog::{ + eth_bridge_status::EthBridgeStatus, eth_vault_balance::EthVaultBalance, + metrics::WatchdogMetrics, sui_bridge_status::SuiBridgeStatus, BridgeWatchDog, +}; use sui_bridge_indexer::config::IndexerConfig; -use sui_bridge_indexer::eth_bridge_indexer::EthDataMapper; use sui_bridge_indexer::metrics::BridgeIndexerMetrics; use sui_bridge_indexer::postgres_manager::{get_connection_pool, read_sui_progress_store}; -use sui_bridge_indexer::storage::PgBridgePersistent; -use sui_bridge_indexer::sui_bridge_indexer::SuiBridgeDataMapper; -use sui_bridge_indexer::sui_datasource::SuiCheckpointDatasource; use sui_bridge_indexer::sui_transaction_handler::handle_sui_transactions_loop; use sui_bridge_indexer::sui_transaction_queries::start_sui_tx_polling_task; -use sui_bridge_watchdog::{ - eth_bridge_status::EthBridgeStatus, eth_vault_balance::EthVaultBalance, - metrics::WatchdogMetrics, sui_bridge_status::SuiBridgeStatus, BridgeWatchDog, +use sui_bridge_indexer::{ + create_eth_subscription_indexer, create_eth_sync_indexer, create_sui_indexer, }; use sui_data_ingestion_core::DataIngestionMetrics; -use sui_indexer_builder::indexer_builder::{BackfillStrategy, IndexerBuilder}; -use sui_indexer_builder::progress::{ - OutOfOrderSaveAfterDurationPolicy, ProgressSavingPolicy, SaveAfterDurationPolicy, -}; use sui_sdk::SuiClientBuilder; #[derive(Parser, Clone, Debug)] @@ -86,18 +79,7 @@ async fn main() -> Result<()> { let bridge_metrics = Arc::new(BridgeMetrics::new(®istry)); let db_url = config.db_url.clone(); - let datastore = PgBridgePersistent::new( - get_connection_pool(db_url.clone()).await, - ProgressSavingPolicy::SaveAfterDuration(SaveAfterDurationPolicy::new( - tokio::time::Duration::from_secs(30), - )), - ); - let datastore_with_out_of_order_source = PgBridgePersistent::new( - get_connection_pool(db_url.clone()).await, - ProgressSavingPolicy::OutOfOrderSaveAfterDuration(OutOfOrderSaveAfterDurationPolicy::new( - tokio::time::Duration::from_secs(30), - )), - ); + let pool = get_connection_pool(db_url.clone()).await; let eth_client: Arc> = Arc::new( EthClient::::new( @@ -109,98 +91,30 @@ async fn main() -> Result<()> { ); let eth_bridge_proxy_address = EthAddress::from_str(&config.eth_sui_bridge_contract_address)?; let mut tasks = vec![]; - if Some(true) == config.disable_eth { - info!("Eth indexer is disabled"); - } else { - // Start the eth subscription indexer - let bridge_address = EthAddress::from_str(&config.eth_sui_bridge_contract_address)?; - let provider = Arc::new( - Provider::::try_from(&config.eth_rpc_url)? - .interval(std::time::Duration::from_millis(2000)), - ); - let bridge_addresses = get_eth_contract_addresses(bridge_address, &provider).await?; - let bridge_addresses: Vec = vec![ - bridge_address, - bridge_addresses.0, - bridge_addresses.1, - bridge_addresses.2, - bridge_addresses.3, - ]; - - // Start the eth subscription indexer - let eth_subscription_datasource = EthSubscriptionDatasource::new( - bridge_addresses.clone(), - eth_client.clone(), - config.eth_ws_url.clone(), - indexer_meterics.clone(), - config.eth_bridge_genesis_block, - ) - .await?; - let eth_subscription_indexer = IndexerBuilder::new( - "EthBridgeSubscriptionIndexer", - eth_subscription_datasource, - EthDataMapper { - metrics: indexer_meterics.clone(), - }, - datastore.clone(), - ) - .with_backfill_strategy(BackfillStrategy::Disabled) - .build(); - tasks.push(spawn_logged_monitored_task!( - eth_subscription_indexer.start() - )); - - // Start the eth sync data source - let eth_sync_datasource = EthFinalizedSyncDatasource::new( - bridge_addresses.clone(), - eth_client.clone(), - config.eth_rpc_url.clone(), - indexer_meterics.clone(), - bridge_metrics.clone(), - config.eth_bridge_genesis_block, - ) - .await?; - - let eth_sync_indexer = IndexerBuilder::new( - "EthBridgeFinalizedSyncIndexer", - eth_sync_datasource, - EthDataMapper { - metrics: indexer_meterics.clone(), - }, - datastore, - ) - .with_backfill_strategy(BackfillStrategy::Partitioned { task_size: 1000 }) - .build(); - tasks.push(spawn_logged_monitored_task!(eth_sync_indexer.start())); - } + // Start the eth subscription indexer + let eth_subscription_indexer = create_eth_subscription_indexer( + pool.clone(), + indexer_meterics.clone(), + &config, + eth_client.clone(), + ) + .await?; + tasks.push(spawn_logged_monitored_task!( + eth_subscription_indexer.start() + )); - let sui_client = Arc::new( - SuiClientBuilder::default() - .build(config.sui_rpc_url.clone()) - .await?, - ); - let sui_checkpoint_datasource = SuiCheckpointDatasource::new( - config.remote_store_url.clone(), - sui_client, - config.concurrency as usize, - config - .checkpoints_path - .clone() - .map(|p| p.into()) - .unwrap_or(tempfile::tempdir()?.into_path()), - config.sui_bridge_genesis_checkpoint, - ingestion_metrics.clone(), + // Start the eth sync data source + let eth_sync_indexer = create_eth_sync_indexer( + pool.clone(), indexer_meterics.clone(), - ); - let indexer = IndexerBuilder::new( - "SuiBridgeIndexer", - sui_checkpoint_datasource, - SuiBridgeDataMapper { - metrics: indexer_meterics.clone(), - }, - datastore_with_out_of_order_source, + bridge_metrics.clone(), + &config, + eth_client, ) - .build(); + .await?; + tasks.push(spawn_logged_monitored_task!(eth_sync_indexer.start())); + + let indexer = create_sui_indexer(pool, indexer_meterics, ingestion_metrics, &config).await?; tasks.push(spawn_logged_monitored_task!(indexer.start())); let sui_bridge_client = @@ -247,14 +161,12 @@ async fn start_watchdog( let sui_bridge_status = SuiBridgeStatus::new(sui_client, watchdog_metrics.sui_bridge_paused.clone()); - - BridgeWatchDog::new(vec![ - Arc::new(eth_vault_balance), - Arc::new(eth_bridge_status), - Arc::new(sui_bridge_status), - ]) - .run() - .await; + let observables: Vec> = vec![ + Box::new(eth_vault_balance), + Box::new(eth_bridge_status), + Box::new(sui_bridge_status), + ]; + BridgeWatchDog::new(observables).run().await; Ok(()) } diff --git a/crates/sui-bridge-indexer/src/storage.rs b/crates/sui-bridge-indexer/src/storage.rs index 5955ca8e1c37c..a279c6aa7410e 100644 --- a/crates/sui-bridge-indexer/src/storage.rs +++ b/crates/sui-bridge-indexer/src/storage.rs @@ -202,7 +202,7 @@ impl IndexerProgressStore for PgBridgePersistent { let cp: Vec = // TODO: using like could be error prone, change the progress store schema to stare the task name properly. QueryDsl::filter( - QueryDsl::filter(dsl::progress_store, columns::task_name.like(format!("{prefix} - %"))), + QueryDsl::filter(dsl::progress_store, columns::task_name.like(format!("{prefix} - %"))), columns::checkpoint.lt(columns::target_checkpoint)) .order_by(columns::target_checkpoint.desc()) .load(&mut conn) @@ -219,7 +219,7 @@ impl IndexerProgressStore for PgBridgePersistent { let cp: Option = // TODO: using like could be error prone, change the progress store schema to stare the task name properly. QueryDsl::filter(QueryDsl::filter(dsl::progress_store - .select(columns::target_checkpoint), columns::task_name.like(format!("{prefix} - %"))), + .select(columns::target_checkpoint), columns::task_name.like(format!("{prefix} - %"))), columns::target_checkpoint.ne(i64::MAX)) .order_by(columns::target_checkpoint.desc()) .first::(&mut conn) diff --git a/crates/sui-bridge-indexer/tests/indexer_tests.rs b/crates/sui-bridge-indexer/tests/indexer_tests.rs new file mode 100644 index 0000000000000..34591bca4fa75 --- /dev/null +++ b/crates/sui-bridge-indexer/tests/indexer_tests.rs @@ -0,0 +1,174 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use diesel::associations::HasTable; +use diesel::QueryDsl; +use diesel_async::RunQueryDsl; +use diesel_migrations::{embed_migrations, EmbeddedMigrations}; +use prometheus::Registry; +use std::time::Duration; +use sui_bridge::e2e_tests::test_utils::{ + initiate_bridge_eth_to_sui, BridgeTestCluster, BridgeTestClusterBuilder, +}; +use sui_bridge_indexer::config::IndexerConfig; +use sui_bridge_indexer::metrics::BridgeIndexerMetrics; +use sui_bridge_indexer::models::{GovernanceAction, TokenTransfer}; +use sui_bridge_indexer::postgres_manager::get_connection_pool; +use sui_bridge_indexer::storage::PgBridgePersistent; +use sui_bridge_indexer::{create_sui_indexer, schema}; +use sui_data_ingestion_core::DataIngestionMetrics; +use sui_indexer::database::Connection; +use sui_indexer::tempdb::TempDb; +use sui_indexer_builder::indexer_builder::IndexerProgressStore; + +const MIGRATIONS: EmbeddedMigrations = embed_migrations!("src/migrations"); + +#[tokio::test] +async fn test_indexing_transfer() { + let metrics = BridgeIndexerMetrics::new_for_testing(); + let registry = Registry::new(); + let ingestion_metrics = DataIngestionMetrics::new(®istry); + + let (config, cluster, _db) = setup_bridge_env(false).await; + + let pool = get_connection_pool(config.db_url.clone()).await; + let indexer = create_sui_indexer(pool.clone(), metrics.clone(), ingestion_metrics, &config) + .await + .unwrap(); + let storage = indexer.test_only_storage().clone(); + let indexer_name = indexer.test_only_name(); + let indexer_handle = tokio::spawn(indexer.start()); + + // wait until backfill finish + wait_for_back_fill_to_finish(&storage, &indexer_name) + .await + .unwrap(); + + let data: Vec = schema::token_transfer::dsl::token_transfer::table() + .load(&mut pool.get().await.unwrap()) + .await + .unwrap(); + + // token transfer data should be empty + assert!(data.is_empty()); + + use schema::governance_actions::columns; + let data = schema::governance_actions::dsl::governance_actions::table() + .select(( + columns::nonce, + columns::data_source, + columns::txn_digest, + columns::sender_address, + columns::timestamp_ms, + columns::action, + columns::data, + )) + .load::(&mut pool.get().await.unwrap()) + .await + .unwrap(); + + // 8 governance actions in total, token registration and approval events for ETH USDC, USDT and BTC. + assert_eq!(8, data.len()); + + // transfer eth to sui + initiate_bridge_eth_to_sui(&cluster, 1000, 0).await.unwrap(); + + let current_block_height = cluster + .sui_client() + .read_api() + .get_latest_checkpoint_sequence_number() + .await + .unwrap(); + wait_for_block(&storage, &indexer_name, current_block_height) + .await + .unwrap(); + + let data = schema::token_transfer::dsl::token_transfer::table() + .load::(&mut pool.get().await.unwrap()) + .await + .unwrap() + .iter() + .map(|t| (t.chain_id, t.nonce, t.status.clone())) + .collect::>(); + + assert_eq!(2, data.len()); + assert_eq!( + vec![ + (12, 0, "Approved".to_string()), + (12, 0, "Claimed".to_string()) + ], + data + ); + + indexer_handle.abort() +} + +async fn wait_for_block( + storage: &PgBridgePersistent, + task: &str, + block: u64, +) -> Result<(), anyhow::Error> { + while storage + .get_ongoing_tasks(task) + .await? + .live_task() + .map(|t| t.start_checkpoint) + .unwrap_or_default() + < block + { + tokio::time::sleep(Duration::from_millis(100)).await; + } + Ok(()) +} + +async fn wait_for_back_fill_to_finish( + storage: &PgBridgePersistent, + task: &str, +) -> Result<(), anyhow::Error> { + // wait until tasks are set up + while storage.get_ongoing_tasks(task).await?.live_task().is_none() { + tokio::time::sleep(Duration::from_millis(100)).await; + } + // wait until all backfill tasks have completed + while !storage + .get_ongoing_tasks(task) + .await? + .backfill_tasks_ordered_desc() + .is_empty() + { + tokio::time::sleep(Duration::from_millis(1000)).await; + } + Ok(()) +} + +async fn setup_bridge_env(with_eth_env: bool) -> (IndexerConfig, BridgeTestCluster, TempDb) { + let bridge_test_cluster = BridgeTestClusterBuilder::new() + .with_eth_env(with_eth_env) + .with_bridge_cluster(true) + .with_num_validators(3) + .build() + .await; + + let db = TempDb::new().unwrap(); + + // Run database migration + let conn = Connection::dedicated(db.database().url()).await.unwrap(); + conn.run_pending_migrations(MIGRATIONS).await.unwrap(); + + let config = IndexerConfig { + remote_store_url: format!("{}/rest", bridge_test_cluster.sui_rpc_url()), + checkpoints_path: None, + sui_rpc_url: bridge_test_cluster.sui_rpc_url(), + eth_rpc_url: bridge_test_cluster.eth_rpc_url(), + // TODO: add WS support + eth_ws_url: "".to_string(), + db_url: db.database().url().to_string(), + concurrency: 10, + sui_bridge_genesis_checkpoint: 0, + eth_bridge_genesis_block: 0, + eth_sui_bridge_contract_address: bridge_test_cluster.sui_bridge_address(), + metric_port: 9001, + }; + + (config, bridge_test_cluster, db) +} diff --git a/crates/sui-bridge-watchdog/Cargo.toml b/crates/sui-bridge-watchdog/Cargo.toml deleted file mode 100644 index b6148e6bd6222..0000000000000 --- a/crates/sui-bridge-watchdog/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -name = "sui-bridge-watchdog" -version = "0.1.0" -authors = ["Mysten Labs "] -license = "Apache-2.0" -publish = false -edition = "2021" - -[dependencies] -sui-bridge.workspace = true -mysten-metrics.workspace = true -prometheus.workspace = true -anyhow.workspace = true -futures.workspace = true -async-trait.workspace = true -ethers = { version = "2.0" } -tracing.workspace = true -tokio = { workspace = true, features = ["full"] } diff --git a/crates/sui-bridge-watchdog/eth_bridge_status.rs b/crates/sui-bridge-watchdog/eth_bridge_status.rs new file mode 100644 index 0000000000000..cdd795f2f71f9 --- /dev/null +++ b/crates/sui-bridge-watchdog/eth_bridge_status.rs @@ -0,0 +1,58 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +//! The EthBridgeStatus observable monitors whether the Eth Bridge is paused. + +use crate::Observable; +use async_trait::async_trait; +use ethers::providers::Provider; +use ethers::types::Address as EthAddress; +use prometheus::IntGauge; +use std::sync::Arc; +use sui_bridge::abi::EthSuiBridge; +use sui_bridge::metered_eth_provider::MeteredEthHttpProvier; +use tokio::time::Duration; +use tracing::{error, info}; + +pub struct EthBridgeStatus { + bridge_contract: EthSuiBridge>, + metric: IntGauge, +} + +impl EthBridgeStatus { + pub fn new( + provider: Arc>, + bridge_address: EthAddress, + metric: IntGauge, + ) -> Self { + let bridge_contract = EthSuiBridge::new(bridge_address, provider.clone()); + Self { + bridge_contract, + metric, + } + } +} + +#[async_trait] +impl Observable for EthBridgeStatus { + fn name(&self) -> &str { + "EthBridgeStatus" + } + + async fn observe_and_report(&self) { + let status = self.bridge_contract.paused().call().await; + match status { + Ok(status) => { + self.metric.set(status as i64); + info!("Eth Bridge Status: {:?}", status); + } + Err(e) => { + error!("Error getting eth bridge status: {:?}", e); + } + } + } + + fn interval(&self) -> Duration { + Duration::from_secs(10) + } +} diff --git a/crates/sui-bridge-watchdog/eth_vault_balance.rs b/crates/sui-bridge-watchdog/eth_vault_balance.rs new file mode 100644 index 0000000000000..dfc359e0cb393 --- /dev/null +++ b/crates/sui-bridge-watchdog/eth_vault_balance.rs @@ -0,0 +1,75 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::Observable; +use async_trait::async_trait; +use ethers::providers::Provider; +use ethers::types::{Address as EthAddress, U256}; +use prometheus::IntGauge; +use std::sync::Arc; +use sui_bridge::abi::EthERC20; +use sui_bridge::metered_eth_provider::MeteredEthHttpProvier; +use tokio::time::Duration; +use tracing::{error, info}; + +const TEN_ZEROS: u64 = 10_u64.pow(10); + +pub struct EthVaultBalance { + coin_contract: EthERC20>, + vault_address: EthAddress, + ten_zeros: U256, + metric: IntGauge, +} + +impl EthVaultBalance { + pub fn new( + provider: Arc>, + vault_address: EthAddress, + coin_address: EthAddress, // for now this only support one coin which is WETH + metric: IntGauge, + ) -> Self { + let ten_zeros = U256::from(TEN_ZEROS); + let coin_contract = EthERC20::new(coin_address, provider); + Self { + coin_contract, + vault_address, + ten_zeros, + metric, + } + } +} + +#[async_trait] +impl Observable for EthVaultBalance { + fn name(&self) -> &str { + "EthVaultBalance" + } + + async fn observe_and_report(&self) { + match self + .coin_contract + .balance_of(self.vault_address) + .call() + .await + { + Ok(balance) => { + // Why downcasting is safe: + // 1. On Ethereum we only take the first 8 decimals into account, + // meaning the trailing 10 digits can be ignored + // 2. i64::MAX is 9_223_372_036_854_775_807, with 8 decimal places is + // 92_233_720_368. We likely won't see any balance higher than this + // in the next 12 months. + let balance = (balance / self.ten_zeros).as_u64() as i64; + self.metric.set(balance); + info!("Eth Vault Balance: {:?}", balance); + } + Err(e) => { + error!("Error getting balance from vault: {:?}", e); + } + } + } + + fn interval(&self) -> Duration { + Duration::from_secs(10) + } +} diff --git a/crates/sui-bridge-watchdog/lib.rs b/crates/sui-bridge-watchdog/lib.rs new file mode 100644 index 0000000000000..b78e436fd696a --- /dev/null +++ b/crates/sui-bridge-watchdog/lib.rs @@ -0,0 +1,62 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +//! The BridgeWatchDog module is responsible for monitoring the health +//! of the bridge by periodically running various observables and +//! reporting the results. + +use anyhow::Result; +use async_trait::async_trait; +use mysten_metrics::spawn_logged_monitored_task; +use std::sync::Arc; +use tokio::time::Duration; +use tokio::time::MissedTickBehavior; +use tracing::{error_span, info, Instrument}; + +pub mod eth_bridge_status; +pub mod eth_vault_balance; +pub mod metrics; +pub mod sui_bridge_status; + +pub struct BridgeWatchDog { + observables: Vec>, +} + +impl BridgeWatchDog { + pub fn new(observables: Vec>) -> Self { + Self { observables } + } + + pub async fn run(self) { + let mut handles = vec![]; + for observable in self.observables.into_iter() { + let handle = spawn_logged_monitored_task!(Self::run_observable(observable)); + handles.push(handle); + } + // Return when any task returns an error or all tasks exit. + futures::future::try_join_all(handles).await.unwrap(); + unreachable!("watch dog tasks should not exit"); + } + + async fn run_observable(observable: Arc) -> Result<()> { + let mut interval = tokio::time::interval(observable.interval()); + interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + let name = observable.name(); + let span = error_span!("observable", name); + loop { + info!("Running observable {}", name); + observable + .observe_and_report() + .instrument(span.clone()) + .await; + interval.tick().await; + } + } +} + +#[async_trait] +pub trait Observable { + fn name(&self) -> &str; + async fn observe_and_report(&self); + fn interval(&self) -> Duration; +} diff --git a/crates/sui-bridge-watchdog/metrics.rs b/crates/sui-bridge-watchdog/metrics.rs new file mode 100644 index 0000000000000..c33d2e4876e3b --- /dev/null +++ b/crates/sui-bridge-watchdog/metrics.rs @@ -0,0 +1,41 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use prometheus::{register_int_gauge_with_registry, IntGauge, Registry}; + +#[derive(Clone, Debug)] +pub struct WatchdogMetrics { + pub eth_vault_balance: IntGauge, + pub eth_bridge_paused: IntGauge, + pub sui_bridge_paused: IntGauge, +} + +impl WatchdogMetrics { + pub fn new(registry: &Registry) -> Self { + Self { + eth_vault_balance: register_int_gauge_with_registry!( + "bridge_eth_vault_balance", + "Current balance of eth vault", + registry, + ) + .unwrap(), + eth_bridge_paused: register_int_gauge_with_registry!( + "bridge_eth_bridge_paused", + "Whether the eth bridge is paused", + registry, + ) + .unwrap(), + sui_bridge_paused: register_int_gauge_with_registry!( + "bridge_sui_bridge_paused", + "Whether the sui bridge is paused", + registry, + ) + .unwrap(), + } + } + + pub fn new_for_testing() -> Self { + let registry = Registry::new(); + Self::new(®istry) + } +} diff --git a/crates/sui-bridge-watchdog/sui_bridge_status.rs b/crates/sui-bridge-watchdog/sui_bridge_status.rs new file mode 100644 index 0000000000000..09e5b5adf9cb3 --- /dev/null +++ b/crates/sui-bridge-watchdog/sui_bridge_status.rs @@ -0,0 +1,48 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +//! The SuiBridgeStatus observable monitors whether the Sui Bridge is paused. + +use crate::Observable; +use async_trait::async_trait; +use prometheus::IntGauge; +use std::sync::Arc; +use sui_bridge::sui_client::SuiBridgeClient; + +use tokio::time::Duration; +use tracing::{error, info}; + +pub struct SuiBridgeStatus { + sui_client: Arc, + metric: IntGauge, +} + +impl SuiBridgeStatus { + pub fn new(sui_client: Arc, metric: IntGauge) -> Self { + Self { sui_client, metric } + } +} + +#[async_trait] +impl Observable for SuiBridgeStatus { + fn name(&self) -> &str { + "SuiBridgeStatus" + } + + async fn observe_and_report(&self) { + let status = self.sui_client.is_bridge_paused().await; + match status { + Ok(status) => { + self.metric.set(status as i64); + info!("Sui Bridge Status: {:?}", status); + } + Err(e) => { + error!("Error getting sui bridge status: {:?}", e); + } + } + } + + fn interval(&self) -> Duration { + Duration::from_secs(2) + } +} diff --git a/crates/sui-bridge/Cargo.toml b/crates/sui-bridge/Cargo.toml index 700d4979f9b6f..8ed6d95cdeb64 100644 --- a/crates/sui-bridge/Cargo.toml +++ b/crates/sui-bridge/Cargo.toml @@ -48,12 +48,15 @@ mysten-common.workspace = true enum_dispatch.workspace = true sui-json-rpc-api.workspace = true sui-test-transaction-builder.workspace = true +hex-literal = { version = "0.3.4", optional = true } +test-cluster = { workspace = true, optional = true } [dev-dependencies] sui-types = { workspace = true, features = ["test-utils"] } sui-json-rpc-types = { workspace = true, features = ["test-utils"] } sui-config.workspace = true sui-test-transaction-builder.workspace = true -test-cluster.workspace = true -hex-literal = "0.3.4" maplit = "1.0.2" + +[features] +test-utils = ["hex-literal", "test-cluster"] diff --git a/crates/sui-bridge/src/action_executor.rs b/crates/sui-bridge/src/action_executor.rs index e8d54728e4011..8e0fabafb8191 100644 --- a/crates/sui-bridge/src/action_executor.rs +++ b/crates/sui-bridge/src/action_executor.rs @@ -1513,9 +1513,9 @@ mod tests { let committee = BridgeCommittee::new(authorities).unwrap(); - let agg = Arc::new(ArcSwap::new(Arc::new(BridgeAuthorityAggregator::new( - Arc::new(committee), - )))); + let agg = Arc::new(ArcSwap::new(Arc::new( + BridgeAuthorityAggregator::new_for_testing(Arc::new(committee)), + ))); let metrics = Arc::new(BridgeMetrics::new(®istry)); let sui_token_type_tags = sui_client.get_token_id_map().await.unwrap(); let sui_token_type_tags = Arc::new(ArcSwap::new(Arc::new(sui_token_type_tags))); diff --git a/crates/sui-bridge/src/client/bridge_authority_aggregator.rs b/crates/sui-bridge/src/client/bridge_authority_aggregator.rs index 1de1c41635a7e..c3229fc5e4dcc 100644 --- a/crates/sui-bridge/src/client/bridge_authority_aggregator.rs +++ b/crates/sui-bridge/src/client/bridge_authority_aggregator.rs @@ -7,6 +7,7 @@ use crate::client::bridge_client::BridgeClient; use crate::crypto::BridgeAuthorityPublicKeyBytes; use crate::crypto::BridgeAuthoritySignInfo; use crate::error::{BridgeError, BridgeResult}; +use crate::metrics::BridgeMetrics; use crate::types::BridgeCommitteeValiditySignInfo; use crate::types::{ BridgeAction, BridgeCommittee, CertifiedBridgeAction, VerifiedCertifiedBridgeAction, @@ -24,16 +25,23 @@ use sui_types::committee::StakeUnit; use sui_types::committee::TOTAL_VOTING_POWER; use tracing::{error, info, warn}; -const TOTAL_TIMEOUT_MS: u64 = 5000; -const PREFETCH_TIMEOUT_MS: u64 = 1500; +const TOTAL_TIMEOUT_MS: u64 = 5_000; +const PREFETCH_TIMEOUT_MS: u64 = 1_500; +const RETRY_INTERVAL_MS: u64 = 500; pub struct BridgeAuthorityAggregator { pub committee: Arc, pub clients: Arc>>, + pub metrics: Arc, + pub committee_keys_to_names: Arc>, } impl BridgeAuthorityAggregator { - pub fn new(committee: Arc) -> Self { + pub fn new( + committee: Arc, + metrics: Arc, + committee_keys_to_names: Arc>, + ) -> Self { let clients: BTreeMap> = committee .members() .iter() @@ -62,14 +70,30 @@ impl BridgeAuthorityAggregator { Self { committee, clients: Arc::new(clients), + metrics, + committee_keys_to_names, } } + #[cfg(test)] + pub fn new_for_testing(committee: Arc) -> Self { + Self::new( + committee, + Arc::new(BridgeMetrics::new_for_testing()), + Arc::new(BTreeMap::new()), + ) + } + pub async fn request_committee_signatures( &self, action: BridgeAction, ) -> BridgeResult { - let state = GetSigsState::new(action.approval_threshold(), self.committee.clone()); + let state = GetSigsState::new( + action.approval_threshold(), + self.committee.clone(), + self.metrics.clone(), + self.committee_keys_to_names.clone(), + ); request_sign_bridge_action_into_certification( action, self.committee.clone(), @@ -88,16 +112,25 @@ struct GetSigsState { sigs: BTreeMap, validity_threshold: StakeUnit, committee: Arc, + metrics: Arc, + committee_keys_to_names: Arc>, } impl GetSigsState { - fn new(validity_threshold: StakeUnit, committee: Arc) -> Self { + fn new( + validity_threshold: StakeUnit, + committee: Arc, + metrics: Arc, + committee_keys_to_names: Arc>, + ) -> Self { Self { committee, total_bad_stake: 0, total_ok_stake: 0, sigs: BTreeMap::new(), validity_threshold, + metrics, + committee_keys_to_names, } } @@ -119,7 +152,7 @@ impl GetSigsState { match self.sigs.entry(name.clone()) { Entry::Vacant(e) => { e.insert(signed_action.auth_sig().clone()); - self.total_ok_stake += stake; + self.add_ok_stake(stake, &name); } Entry::Occupied(_e) => { return Err(BridgeError::AuthoritySignatureDuplication(format!( @@ -156,7 +189,23 @@ impl GetSigsState { } } - fn add_bad_stake(&mut self, bad_stake: StakeUnit) { + fn add_ok_stake(&mut self, ok_stake: StakeUnit, name: &BridgeAuthorityPublicKeyBytes) { + if let Some(host_name) = self.committee_keys_to_names.get(name) { + self.metrics + .auth_agg_ok_responses + .with_label_values(&[host_name]) + .inc(); + } + self.total_ok_stake += ok_stake; + } + + fn add_bad_stake(&mut self, bad_stake: StakeUnit, name: &BridgeAuthorityPublicKeyBytes) { + if let Some(host_name) = self.committee_keys_to_names.get(name) { + self.metrics + .auth_agg_bad_responses + .with_label_values(&[host_name]) + .inc(); + } self.total_bad_stake += bad_stake; } @@ -201,8 +250,29 @@ async fn request_sign_bridge_action_into_certification( clients, preference, state, - |_name, client| { - Box::pin(async move { client.request_sign_bridge_action(action.clone()).await }) + |name, client| { + Box::pin(async move { + let start = std::time::Instant::now(); + let timeout = Duration::from_millis(TOTAL_TIMEOUT_MS); + let retry_interval = Duration::from_millis(RETRY_INTERVAL_MS); + while start.elapsed() < timeout { + match client.request_sign_bridge_action(action.clone()).await { + Ok(result) => { + return Ok(result); + } + // retryable errors + Err(BridgeError::TxNotFinalized) => { + warn!("Bridge authority {} observing transaction not yet finalized, retrying in {:?}", name.concise(), retry_interval); + tokio::time::sleep(retry_interval).await; + } + // non-retryable errors + Err(e) => { + return Err(e); + } + } + } + Err(BridgeError::TransientProviderError(format!("Bridge authority {} did not observe finalized transaction after {:?}", name.concise(), timeout))) + }) }, |mut state, name, stake, result| { Box::pin(async move { @@ -223,7 +293,7 @@ async fn request_sign_bridge_action_into_certification( name.concise(), e ); - state.add_bad_stake(stake); + state.add_bad_stake(stake, &name); } } } @@ -233,7 +303,7 @@ async fn request_sign_bridge_action_into_certification( name.concise(), e ); - state.add_bad_stake(stake); + state.add_bad_stake(stake, &name); } }; @@ -245,7 +315,7 @@ async fn request_sign_bridge_action_into_certification( } }) }, - Duration::from_secs(TOTAL_TIMEOUT_MS), + Duration::from_millis(TOTAL_TIMEOUT_MS), ) .await .map_err(|state| { @@ -296,7 +366,7 @@ mod tests { } let committee = BridgeCommittee::new(authorities.clone()).unwrap(); - let agg = BridgeAuthorityAggregator::new(Arc::new(committee)); + let agg = BridgeAuthorityAggregator::new_for_testing(Arc::new(committee)); assert_eq!( agg.clients.keys().cloned().collect::>(), BTreeSet::from_iter(vec![ @@ -310,7 +380,7 @@ mod tests { // authority 2 is blocklisted authorities[2].is_blocklisted = true; let committee = BridgeCommittee::new(authorities.clone()).unwrap(); - let agg = BridgeAuthorityAggregator::new(Arc::new(committee)); + let agg = BridgeAuthorityAggregator::new_for_testing(Arc::new(committee)); assert_eq!( agg.clients.keys().cloned().collect::>(), BTreeSet::from_iter(vec![ @@ -323,7 +393,7 @@ mod tests { // authority 3 has bad url authorities[3].base_url = "".into(); let committee = BridgeCommittee::new(authorities.clone()).unwrap(); - let agg = BridgeAuthorityAggregator::new(Arc::new(committee)); + let agg = BridgeAuthorityAggregator::new_for_testing(Arc::new(committee)); assert_eq!( agg.clients.keys().cloned().collect::>(), BTreeSet::from_iter(vec![ @@ -351,7 +421,7 @@ mod tests { let committee = BridgeCommittee::new(authorities).unwrap(); - let agg = BridgeAuthorityAggregator::new(Arc::new(committee)); + let agg = BridgeAuthorityAggregator::new_for_testing(Arc::new(committee)); let sui_tx_digest = TransactionDigest::random(); let sui_tx_event_index = 0; @@ -468,7 +538,7 @@ mod tests { let authorities_clone = authorities.clone(); let committee = Arc::new(BridgeCommittee::new(authorities_clone).unwrap()); - let agg = BridgeAuthorityAggregator::new(committee.clone()); + let agg = BridgeAuthorityAggregator::new_for_testing(committee.clone()); let sui_tx_digest = TransactionDigest::random(); let sui_tx_event_index = 0; @@ -542,7 +612,13 @@ mod tests { // we should receive all signatures in time, but only aggregate 2 authorities // to achieve quorum - let state = GetSigsState::new(action.approval_threshold(), committee.clone()); + let metrics = Arc::new(BridgeMetrics::new_for_testing()); + let state = GetSigsState::new( + action.approval_threshold(), + committee.clone(), + metrics.clone(), + Arc::new(BTreeMap::new()), + ); let resp = request_sign_bridge_action_into_certification( action.clone(), agg.committee.clone(), @@ -559,7 +635,12 @@ mod tests { // we should receive all but the highest stake signatures in time, but still be able to // achieve quorum with 3 sigs - let state = GetSigsState::new(action.approval_threshold(), committee.clone()); + let state = GetSigsState::new( + action.approval_threshold(), + committee.clone(), + metrics.clone(), + Arc::new(BTreeMap::new()), + ); let resp = request_sign_bridge_action_into_certification( action.clone(), agg.committee.clone(), @@ -576,7 +657,12 @@ mod tests { assert!(!sig_keys.contains(&authorities[8].pubkey_bytes())); // we should have fallen back to arrival order given that we timeout before we reach quorum - let state = GetSigsState::new(action.approval_threshold(), committee.clone()); + let state = GetSigsState::new( + action.approval_threshold(), + committee.clone(), + metrics.clone(), + Arc::new(BTreeMap::new()), + ); let start = std::time::Instant::now(); let resp = request_sign_bridge_action_into_certification( action.clone(), @@ -625,7 +711,7 @@ mod tests { let committee = BridgeCommittee::new(authorities.clone()).unwrap(); - let agg = BridgeAuthorityAggregator::new(Arc::new(committee)); + let agg = BridgeAuthorityAggregator::new_for_testing(Arc::new(committee)); let sui_tx_digest = TransactionDigest::random(); let sui_tx_event_index = 0; @@ -721,40 +807,52 @@ mod tests { let committee = BridgeCommittee::new(authorities.clone()).unwrap(); let threshold = VALIDITY_THRESHOLD; - let mut state = GetSigsState::new(threshold, Arc::new(committee)); + let metrics = Arc::new(BridgeMetrics::new_for_testing()); + let mut state = GetSigsState::new( + threshold, + Arc::new(committee), + metrics.clone(), + Arc::new(BTreeMap::new()), + ); assert!(!state.is_too_many_error()); - + let dummy = authorities[0].pubkey_bytes(); // bad stake: 2500 - state.add_bad_stake(2500); + state.add_bad_stake(2500, &dummy); assert!(!state.is_too_many_error()); // bad stake ; 5000 - state.add_bad_stake(2500); + state.add_bad_stake(2500, &dummy); assert!(!state.is_too_many_error()); // bad stake : 6666 - state.add_bad_stake(1666); + state.add_bad_stake(1666, &dummy); assert!(!state.is_too_many_error()); // bad stake : 6667 - too many errors - state.add_bad_stake(1); + state.add_bad_stake(1, &dummy); assert!(state.is_too_many_error()); // Authority 0 is blocklisted, we lose 2500 stake authorities[0].is_blocklisted = true; let committee = BridgeCommittee::new(authorities.clone()).unwrap(); let threshold = VALIDITY_THRESHOLD; - let mut state = GetSigsState::new(threshold, Arc::new(committee)); + let metrics = Arc::new(BridgeMetrics::new_for_testing()); + let mut state = GetSigsState::new( + threshold, + Arc::new(committee), + metrics.clone(), + Arc::new(BTreeMap::new()), + ); assert!(!state.is_too_many_error()); // bad stake: 2500 + 2500 - state.add_bad_stake(2500); + state.add_bad_stake(2500, &dummy); assert!(!state.is_too_many_error()); // bad stake: 5000 + 2500 - too many errors - state.add_bad_stake(2500); + state.add_bad_stake(2500, &dummy); assert!(state.is_too_many_error()); // Below we test `handle_verified_signed_action` @@ -764,7 +862,12 @@ mod tests { authorities[3].is_blocklisted = true; // blocklist authority 3 let committee = BridgeCommittee::new(authorities.clone()).unwrap(); let threshold = VALIDITY_THRESHOLD; - let mut state = GetSigsState::new(threshold, Arc::new(committee.clone())); + let mut state = GetSigsState::new( + threshold, + Arc::new(committee.clone()), + metrics.clone(), + Arc::new(BTreeMap::new()), + ); let sui_tx_digest = TransactionDigest::random(); let sui_tx_event_index = 0; diff --git a/crates/sui-bridge/src/client/bridge_client.rs b/crates/sui-bridge/src/client/bridge_client.rs index 83c11e73ba995..09ceac2d6d133 100644 --- a/crates/sui-bridge/src/client/bridge_client.rs +++ b/crates/sui-bridge/src/client/bridge_client.rs @@ -207,11 +207,16 @@ impl BridgeClient { .await?; if !resp.status().is_success() { let error_status = format!("{:?}", resp.error_for_status_ref()); - return Err(BridgeError::RestAPIError(format!( - "request_sign_bridge_action failed with status {:?}: {:?}", - error_status, - resp.text().await? - ))); + let resp_text = resp.text().await?; + return match resp_text { + text if text.contains(&format!("{:?}", BridgeError::TxNotFinalized)) => { + Err(BridgeError::TxNotFinalized) + } + _ => Err(BridgeError::RestAPIError(format!( + "request_sign_bridge_action failed with status {:?}: {:?}", + error_status, resp_text + ))), + }; } let signed_bridge_action = resp.json().await?; verify_signed_bridge_action( diff --git a/crates/sui-bridge/src/config.rs b/crates/sui-bridge/src/config.rs index e59576417caac..12464b171c621 100644 --- a/crates/sui-bridge/src/config.rs +++ b/crates/sui-bridge/src/config.rs @@ -17,6 +17,7 @@ use ethers::types::Address as EthAddress; use futures::{future, StreamExt}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; +use std::collections::BTreeMap; use std::collections::HashSet; use std::path::PathBuf; use std::str::FromStr; @@ -119,6 +120,9 @@ pub struct BridgeNodeConfig { pub metrics_key_pair: NetworkKeyPair, #[serde(skip_serializing_if = "Option::is_none")] pub metrics: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub watchdog_config: Option, } pub fn default_ed25519_key_pair() -> NetworkKeyPair { @@ -133,6 +137,13 @@ pub struct MetricsConfig { pub push_url: String, } +#[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(rename_all = "kebab-case")] +pub struct WatchdogConfig { + /// Total supplies to watch on Sui. Mapping from coin name to coin type tag + pub total_supplies: BTreeMap, +} + impl Config for BridgeNodeConfig {} impl BridgeNodeConfig { @@ -197,6 +208,7 @@ impl BridgeNodeConfig { let bridge_server_config = BridgeServerConfig { key: bridge_authority_key, metrics_port: self.metrics_port, + eth_bridge_proxy_address: eth_contracts[0], // the first contract is bridge proxy server_listen_port: self.server_listen_port, sui_client: sui_client.clone(), eth_client: eth_client.clone(), @@ -385,6 +397,7 @@ impl BridgeNodeConfig { pub struct BridgeServerConfig { pub key: BridgeAuthorityKeyPair, pub server_listen_port: u16, + pub eth_bridge_proxy_address: EthAddress, pub metrics_port: u16, pub sui_client: Arc>, pub eth_client: Arc>, diff --git a/crates/sui-bridge/src/e2e_tests/basic.rs b/crates/sui-bridge/src/e2e_tests/basic.rs index abdde652e9c13..21f11273b4e11 100644 --- a/crates/sui-bridge/src/e2e_tests/basic.rs +++ b/crates/sui-bridge/src/e2e_tests/basic.rs @@ -1,49 +1,37 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::abi::{eth_sui_bridge, EthBridgeEvent, EthERC20, EthSuiBridge}; +use crate::abi::{eth_sui_bridge, EthSuiBridge}; use crate::client::bridge_authority_aggregator::BridgeAuthorityAggregator; use crate::crypto::BridgeAuthorityKeyPair; +use crate::e2e_tests::test_utils::TestClusterWrapperBuilder; use crate::e2e_tests::test_utils::{ - get_signatures, send_eth_tx_and_get_tx_receipt, BridgeTestClusterBuilder, + get_signatures, initiate_bridge_erc20_to_sui, initiate_bridge_eth_to_sui, + initiate_bridge_sui_to_eth, send_eth_tx_and_get_tx_receipt, BridgeTestClusterBuilder, }; -use crate::e2e_tests::test_utils::{BridgeTestCluster, TestClusterWrapperBuilder}; use crate::eth_transaction_builder::build_eth_transaction; use crate::events::{ SuiBridgeEvent, SuiToEthTokenBridgeV1, TokenTransferApproved, TokenTransferClaimed, }; -use crate::sui_client::SuiBridgeClient; use crate::sui_transaction_builder::build_add_tokens_on_sui_transaction; -use crate::types::{AddTokensOnEvmAction, BridgeAction, BridgeActionStatus, SuiToEthBridgeAction}; +use crate::types::{AddTokensOnEvmAction, BridgeAction}; use crate::utils::publish_and_register_coins_return_add_coins_on_sui_action; -use crate::utils::EthSigner; use crate::BRIDGE_ENABLE_PROTOCOL_VERSION; -use eth_sui_bridge::EthSuiBridgeEvents; use ethers::prelude::*; use ethers::types::Address as EthAddress; -use move_core_types::ident_str; -use std::collections::{HashMap, HashSet}; +use std::collections::HashSet; use sui_json_rpc_api::BridgeReadApiClient; use sui_types::crypto::get_key_pair; use test_cluster::TestClusterBuilder; use std::path::Path; -use anyhow::anyhow; use std::sync::Arc; -use sui_json_rpc_types::{ - SuiExecutionStatus, SuiTransactionBlockEffectsAPI, SuiTransactionBlockResponse, -}; -use sui_sdk::wallet_context::WalletContext; -use sui_sdk::SuiClient; -use sui_types::base_types::{ObjectRef, SuiAddress}; +use sui_json_rpc_types::{SuiExecutionStatus, SuiTransactionBlockEffectsAPI}; use sui_types::bridge::{ - get_bridge, BridgeChainId, BridgeTokenMetadata, BridgeTrait, BRIDGE_MODULE_NAME, TOKEN_ID_ETH, + get_bridge, BridgeChainId, BridgeTokenMetadata, BridgeTrait, TOKEN_ID_ETH, }; -use sui_types::programmable_transaction_builder::ProgrammableTransactionBuilder; -use sui_types::transaction::{ObjectArg, TransactionData}; -use sui_types::{TypeTag, BRIDGE_PACKAGE_ID, SUI_BRIDGE_OBJECT_ID}; -use tap::TapFallible; +use sui_types::SUI_BRIDGE_OBJECT_ID; use tracing::info; #[tokio::test(flavor = "multi_thread", worker_threads = 8)] @@ -190,7 +178,6 @@ async fn test_add_new_coins_on_sui_and_eth() { .with_num_validators(3) .build() .await; - let bridge_arg = bridge_test_cluster.get_mut_bridge_arg().await.unwrap(); // Register tokens on Sui @@ -239,7 +226,7 @@ async fn test_add_new_coins_on_sui_and_eth() { .await .expect("Failed to get bridge committee"), ); - let agg = BridgeAuthorityAggregator::new(bridge_committee); + let agg = BridgeAuthorityAggregator::new_for_testing(bridge_committee); let certified_sui_action = agg .request_committee_signatures(sui_action) .await @@ -427,343 +414,3 @@ async fn test_bridge_api_compatibility() { .await .unwrap(); } - -pub(crate) async fn deposit_native_eth_to_sol_contract( - signer: &EthSigner, - contract_address: EthAddress, - sui_recipient_address: SuiAddress, - sui_chain_id: BridgeChainId, - amount: u64, -) -> ContractCall { - let contract = EthSuiBridge::new(contract_address, signer.clone().into()); - let sui_recipient_address = sui_recipient_address.to_vec().into(); - let amount = U256::from(amount) * U256::exp10(18); // 1 ETH - contract - .bridge_eth(sui_recipient_address, sui_chain_id as u8) - .value(amount) -} - -async fn deposit_eth_to_sui_package( - sui_client: &SuiClient, - sui_address: SuiAddress, - wallet_context: &WalletContext, - target_chain: BridgeChainId, - target_address: EthAddress, - token: ObjectRef, - bridge_object_arg: ObjectArg, - sui_token_type_tags: &HashMap, -) -> Result { - let mut builder = ProgrammableTransactionBuilder::new(); - let arg_target_chain = builder.pure(target_chain as u8).unwrap(); - let arg_target_address = builder.pure(target_address.as_bytes()).unwrap(); - let arg_token = builder.obj(ObjectArg::ImmOrOwnedObject(token)).unwrap(); - let arg_bridge = builder.obj(bridge_object_arg).unwrap(); - - builder.programmable_move_call( - BRIDGE_PACKAGE_ID, - BRIDGE_MODULE_NAME.to_owned(), - ident_str!("send_token").to_owned(), - vec![sui_token_type_tags.get(&TOKEN_ID_ETH).unwrap().clone()], - vec![arg_bridge, arg_target_chain, arg_target_address, arg_token], - ); - - let pt = builder.finish(); - let gas_object_ref = wallet_context - .get_one_gas_object_owned_by_address(sui_address) - .await - .unwrap() - .unwrap(); - let tx_data = TransactionData::new_programmable( - sui_address, - vec![gas_object_ref], - pt, - 500_000_000, - sui_client - .governance_api() - .get_reference_gas_price() - .await - .unwrap(), - ); - let tx = wallet_context.sign_transaction(&tx_data); - wallet_context.execute_transaction_may_fail(tx).await -} - -pub async fn initiate_bridge_erc20_to_sui( - bridge_test_cluster: &BridgeTestCluster, - amount_u64: u64, - token_address: EthAddress, - token_id: u8, - nonce: u64, -) -> Result<(), anyhow::Error> { - let (eth_signer, eth_address) = bridge_test_cluster - .get_eth_signer_and_address() - .await - .unwrap(); - - // First, mint ERC20 tokens to the signer - let contract = EthERC20::new(token_address, eth_signer.clone().into()); - let decimal = contract.decimals().await? as usize; - let amount = U256::from(amount_u64) * U256::exp10(decimal); - let sui_amount = amount.as_u64(); - let mint_call = contract.mint(eth_address, amount); - let mint_tx_receipt = send_eth_tx_and_get_tx_receipt(mint_call).await; - assert_eq!(mint_tx_receipt.status.unwrap().as_u64(), 1); - - // Second, set allowance - let allowance_call = contract.approve(bridge_test_cluster.contracts().sui_bridge, amount); - let allowance_tx_receipt = send_eth_tx_and_get_tx_receipt(allowance_call).await; - assert_eq!(allowance_tx_receipt.status.unwrap().as_u64(), 1); - - // Third, deposit to bridge - let sui_recipient_address = bridge_test_cluster.sui_user_address(); - let sui_chain_id = bridge_test_cluster.sui_chain_id(); - let eth_chain_id = bridge_test_cluster.eth_chain_id(); - - info!( - "Depositing ERC20 (token id:{}, token_address: {}) to Solidity contract", - token_id, token_address - ); - let contract = EthSuiBridge::new( - bridge_test_cluster.contracts().sui_bridge, - eth_signer.clone().into(), - ); - let deposit_call = contract.bridge_erc20( - token_id, - amount, - sui_recipient_address.to_vec().into(), - sui_chain_id as u8, - ); - let tx_receipt = send_eth_tx_and_get_tx_receipt(deposit_call).await; - let eth_bridge_event = tx_receipt - .logs - .iter() - .find_map(EthBridgeEvent::try_from_log) - .unwrap(); - let EthBridgeEvent::EthSuiBridgeEvents(EthSuiBridgeEvents::TokensDepositedFilter( - eth_bridge_event, - )) = eth_bridge_event - else { - unreachable!(); - }; - // assert eth log matches - assert_eq!(eth_bridge_event.source_chain_id, eth_chain_id as u8); - assert_eq!(eth_bridge_event.nonce, nonce); - assert_eq!(eth_bridge_event.destination_chain_id, sui_chain_id as u8); - assert_eq!(eth_bridge_event.token_id, token_id); - assert_eq!(eth_bridge_event.sui_adjusted_amount, sui_amount); - assert_eq!(eth_bridge_event.sender_address, eth_address); - assert_eq!( - eth_bridge_event.recipient_address, - sui_recipient_address.to_vec() - ); - info!( - "Deposited ERC20 (token id:{}, token_address: {}) to Solidity contract", - token_id, token_address - ); - - wait_for_transfer_action_status( - bridge_test_cluster.bridge_client(), - eth_chain_id, - nonce, - BridgeActionStatus::Claimed, - ) - .await - .tap_ok(|_| { - info!( - nonce, - token_id, amount_u64, "Eth to Sui bridge transfer claimed" - ); - }) -} - -pub async fn initiate_bridge_eth_to_sui( - bridge_test_cluster: &BridgeTestCluster, - amount: u64, - nonce: u64, -) -> Result<(), anyhow::Error> { - info!("Depositing native Ether to Solidity contract, nonce: {nonce}, amount: {amount}"); - let (eth_signer, eth_address) = bridge_test_cluster - .get_eth_signer_and_address() - .await - .unwrap(); - - let sui_address = bridge_test_cluster.sui_user_address(); - let sui_chain_id = bridge_test_cluster.sui_chain_id(); - let eth_chain_id = bridge_test_cluster.eth_chain_id(); - let token_id = TOKEN_ID_ETH; - - let sui_amount = (U256::from(amount) * U256::exp10(8)).as_u64(); // DP for Ether on Sui - - let eth_tx = deposit_native_eth_to_sol_contract( - ð_signer, - bridge_test_cluster.contracts().sui_bridge, - sui_address, - sui_chain_id, - amount, - ) - .await; - let tx_receipt = send_eth_tx_and_get_tx_receipt(eth_tx).await; - let eth_bridge_event = tx_receipt - .logs - .iter() - .find_map(EthBridgeEvent::try_from_log) - .unwrap(); - let EthBridgeEvent::EthSuiBridgeEvents(EthSuiBridgeEvents::TokensDepositedFilter( - eth_bridge_event, - )) = eth_bridge_event - else { - unreachable!(); - }; - // assert eth log matches - assert_eq!(eth_bridge_event.source_chain_id, eth_chain_id as u8); - assert_eq!(eth_bridge_event.nonce, nonce); - assert_eq!(eth_bridge_event.destination_chain_id, sui_chain_id as u8); - assert_eq!(eth_bridge_event.token_id, token_id); - assert_eq!(eth_bridge_event.sui_adjusted_amount, sui_amount); - assert_eq!(eth_bridge_event.sender_address, eth_address); - assert_eq!(eth_bridge_event.recipient_address, sui_address.to_vec()); - info!( - "Deposited Eth to Solidity contract, block: {:?}", - tx_receipt.block_number - ); - - wait_for_transfer_action_status( - bridge_test_cluster.bridge_client(), - eth_chain_id, - nonce, - BridgeActionStatus::Claimed, - ) - .await - .tap_ok(|_| { - info!("Eth to Sui bridge transfer claimed"); - }) -} - -pub async fn initiate_bridge_sui_to_eth( - bridge_test_cluster: &BridgeTestCluster, - eth_address: EthAddress, - token: ObjectRef, - nonce: u64, - sui_amount: u64, -) -> Result { - let bridge_object_arg = bridge_test_cluster - .bridge_client() - .get_mutable_bridge_object_arg_must_succeed() - .await; - let sui_client = bridge_test_cluster.sui_client(); - let token_types = bridge_test_cluster - .bridge_client() - .get_token_id_map() - .await - .unwrap(); - let sui_address = bridge_test_cluster.sui_user_address(); - - let resp = match deposit_eth_to_sui_package( - sui_client, - sui_address, - bridge_test_cluster.wallet(), - bridge_test_cluster.eth_chain_id(), - eth_address, - token, - bridge_object_arg, - &token_types, - ) - .await - { - Ok(resp) => { - if !resp.status_ok().unwrap() { - return Err(anyhow!("Sui TX error")); - } else { - resp - } - } - Err(e) => return Err(e), - }; - - let sui_events = resp.events.unwrap().data; - let bridge_event = sui_events - .iter() - .filter_map(|e| { - let sui_bridge_event = SuiBridgeEvent::try_from_sui_event(e).unwrap()?; - sui_bridge_event.try_into_bridge_action(e.id.tx_digest, e.id.event_seq as u16) - }) - .find_map(|e| { - if let BridgeAction::SuiToEthBridgeAction(a) = e { - Some(a) - } else { - None - } - }) - .unwrap(); - info!("Deposited Eth to move package"); - assert_eq!(bridge_event.sui_bridge_event.nonce, nonce); - assert_eq!( - bridge_event.sui_bridge_event.sui_chain_id, - bridge_test_cluster.sui_chain_id() - ); - assert_eq!( - bridge_event.sui_bridge_event.eth_chain_id, - bridge_test_cluster.eth_chain_id() - ); - assert_eq!(bridge_event.sui_bridge_event.sui_address, sui_address); - assert_eq!(bridge_event.sui_bridge_event.eth_address, eth_address); - assert_eq!(bridge_event.sui_bridge_event.token_id, TOKEN_ID_ETH); - assert_eq!( - bridge_event.sui_bridge_event.amount_sui_adjusted, - sui_amount - ); - - // Wait for the bridge action to be approved - wait_for_transfer_action_status( - bridge_test_cluster.bridge_client(), - bridge_test_cluster.sui_chain_id(), - nonce, - BridgeActionStatus::Approved, - ) - .await - .unwrap(); - info!("Sui to Eth bridge transfer approved."); - - Ok(bridge_event) -} - -async fn wait_for_transfer_action_status( - sui_bridge_client: &SuiBridgeClient, - chain_id: BridgeChainId, - nonce: u64, - status: BridgeActionStatus, -) -> Result<(), anyhow::Error> { - // Wait for the bridge action to be approved - let now = std::time::Instant::now(); - info!( - "Waiting for onchain status {:?}. chain: {:?}, nonce: {nonce}", - status, chain_id as u8 - ); - loop { - let timer = std::time::Instant::now(); - let res = sui_bridge_client - .get_token_transfer_action_onchain_status_until_success(chain_id as u8, nonce) - .await; - info!( - "get_token_transfer_action_onchain_status_until_success took {:?}, status: {:?}", - timer.elapsed(), - res - ); - - if res == status { - info!( - "detected on chain status {:?}. chain: {:?}, nonce: {nonce}", - status, chain_id as u8 - ); - return Ok(()); - } - if now.elapsed().as_secs() > 60 { - return Err(anyhow!( - "Timeout waiting for token transfer action to be {:?}. chain_id: {chain_id:?}, nonce: {nonce}. Time elapsed: {:?}", - status, - now.elapsed(), - )); - } - tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; - } -} diff --git a/crates/sui-bridge/src/e2e_tests/complex.rs b/crates/sui-bridge/src/e2e_tests/complex.rs index d822074146ae6..a52c3649d2f1c 100644 --- a/crates/sui-bridge/src/e2e_tests/complex.rs +++ b/crates/sui-bridge/src/e2e_tests/complex.rs @@ -2,9 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 use crate::client::bridge_authority_aggregator::BridgeAuthorityAggregator; -use crate::e2e_tests::basic::initiate_bridge_eth_to_sui; -use crate::e2e_tests::basic::initiate_bridge_sui_to_eth; -use crate::e2e_tests::test_utils::BridgeTestClusterBuilder; + +use crate::e2e_tests::test_utils::{ + initiate_bridge_eth_to_sui, initiate_bridge_sui_to_eth, BridgeTestClusterBuilder, +}; use crate::sui_transaction_builder::build_sui_transaction; use crate::types::{BridgeAction, EmergencyAction}; use crate::types::{BridgeActionStatus, EmergencyActionType}; @@ -70,7 +71,7 @@ async fn test_sui_bridge_paused() { // get pause bridge signatures from committee let bridge_committee = Arc::new(bridge_client.get_bridge_committee().await.unwrap()); - let agg = BridgeAuthorityAggregator::new(bridge_committee); + let agg = BridgeAuthorityAggregator::new_for_testing(bridge_committee); let certified_action = agg .request_committee_signatures(pause_action) .await diff --git a/crates/sui-bridge/src/e2e_tests/mod.rs b/crates/sui-bridge/src/e2e_tests/mod.rs index 26ee8f143271a..9b88cebf9d5d3 100644 --- a/crates/sui-bridge/src/e2e_tests/mod.rs +++ b/crates/sui-bridge/src/e2e_tests/mod.rs @@ -1,6 +1,8 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +#[cfg(test)] mod basic; +#[cfg(test)] mod complex; pub mod test_utils; diff --git a/crates/sui-bridge/src/e2e_tests/test_utils.rs b/crates/sui-bridge/src/e2e_tests/test_utils.rs index 187f28d5eb9b0..6a95435f26011 100644 --- a/crates/sui-bridge/src/e2e_tests/test_utils.rs +++ b/crates/sui-bridge/src/e2e_tests/test_utils.rs @@ -1,8 +1,8 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::abi::EthBridgeCommittee; use crate::abi::EthBridgeConfig; +use crate::abi::{EthBridgeCommittee, EthBridgeEvent, EthERC20, EthSuiBridge, EthSuiBridgeEvents}; use crate::config::default_ed25519_key_pair; use crate::crypto::BridgeAuthorityKeyPair; use crate::crypto::BridgeAuthorityPublicKeyBytes; @@ -12,10 +12,10 @@ use crate::metrics::BridgeMetrics; use crate::server::BridgeNodePublicMetadata; use crate::sui_transaction_builder::build_add_tokens_on_sui_transaction; use crate::sui_transaction_builder::build_committee_register_transaction; -use crate::types::BridgeAction; use crate::types::BridgeCommitteeValiditySignInfo; use crate::types::CertifiedBridgeAction; use crate::types::VerifiedCertifiedBridgeAction; +use crate::types::{BridgeAction, BridgeActionStatus, SuiToEthBridgeAction}; use crate::utils::get_eth_signer_client; use crate::utils::publish_and_register_coins_return_add_coins_on_sui_action; use crate::utils::wait_for_server_to_be_up; @@ -23,13 +23,13 @@ use crate::utils::EthSigner; use ethers::types::Address as EthAddress; use futures::future::join_all; use futures::Future; -use move_core_types::language_storage::StructTag; +use move_core_types::language_storage::{StructTag, TypeTag}; use prometheus::Registry; use rand::rngs::SmallRng; use rand::{Rng, SeedableRng}; use serde::{Deserialize, Serialize}; -use std::collections::BTreeMap; use std::collections::HashSet; +use std::collections::{BTreeMap, HashMap}; use std::fs::File; use std::fs::{self, DirBuilder}; use std::io::{Read, Write}; @@ -48,12 +48,12 @@ use sui_json_rpc_types::SuiTransactionBlockResponseQuery; use sui_json_rpc_types::TransactionFilter; use sui_sdk::wallet_context::WalletContext; use sui_test_transaction_builder::TestTransactionBuilder; -use sui_types::base_types::ObjectID; -use sui_types::bridge::get_bridge; +use sui_types::base_types::{ObjectID, ObjectRef}; use sui_types::bridge::get_bridge_obj_initial_shared_version; use sui_types::bridge::BridgeChainId; use sui_types::bridge::BridgeSummary; use sui_types::bridge::BridgeTrait; +use sui_types::bridge::{get_bridge, BRIDGE_MODULE_NAME}; use sui_types::bridge::{TOKEN_ID_BTC, TOKEN_ID_ETH, TOKEN_ID_USDC, TOKEN_ID_USDT}; use sui_types::committee::TOTAL_VOTING_POWER; use sui_types::crypto::get_key_pair; @@ -61,7 +61,7 @@ use sui_types::crypto::ToFromBytes; use sui_types::digests::TransactionDigest; use sui_types::object::Object; use sui_types::transaction::{ObjectArg, Transaction, TransactionData}; -use sui_types::SUI_BRIDGE_OBJECT_ID; +use sui_types::{BRIDGE_PACKAGE_ID, SUI_BRIDGE_OBJECT_ID}; use tokio::join; use tokio::task::JoinHandle; use tokio::time::Instant; @@ -73,13 +73,17 @@ use crate::config::{BridgeNodeConfig, EthConfig, SuiConfig}; use crate::node::run_bridge_node; use crate::sui_client::SuiBridgeClient; use crate::BRIDGE_ENABLE_PROTOCOL_VERSION; +use anyhow::anyhow; use ethers::prelude::*; +use move_core_types::ident_str; use std::process::Child; use sui_config::local_ip_utils::get_available_port; use sui_sdk::SuiClient; use sui_types::base_types::SuiAddress; use sui_types::crypto::EncodeDecodeBase64; use sui_types::crypto::KeypairTraits; +use sui_types::programmable_transaction_builder::ProgrammableTransactionBuilder; +use tap::TapFallible; use tempfile::tempdir; use test_cluster::TestCluster; use test_cluster::TestClusterBuilder; @@ -294,7 +298,7 @@ impl BridgeTestCluster { self.eth_chain_id } - pub(crate) fn eth_env(&self) -> &EthBridgeEnvironment { + pub fn eth_env(&self) -> &EthBridgeEnvironment { &self.eth_environment } @@ -716,7 +720,7 @@ impl EthBridgeEnvironment { self.contracts.as_ref().unwrap() } - pub(crate) fn get_bridge_config( + pub fn get_bridge_config( &self, ) -> EthBridgeConfig> { let provider = Arc::new( @@ -727,7 +731,7 @@ impl EthBridgeEnvironment { EthBridgeConfig::new(self.contracts().bridge_config, provider.clone()) } - pub(crate) async fn get_supported_token(&self, token_id: u8) -> (EthAddress, u8, u64) { + pub async fn get_supported_token(&self, token_id: u8) -> (EthAddress, u8, u64) { let config = self.get_bridge_config(); let token_address = config.token_address_of(token_id).call().await.unwrap(); let token_sui_decimal = config.token_sui_decimal_of(token_id).call().await.unwrap(); @@ -804,6 +808,7 @@ pub(crate) async fn start_bridge_cluster( }, metrics_key_pair: default_ed25519_key_pair(), metrics: None, + watchdog_config: None, }; // Spawn bridge node in memory handles.push( @@ -819,7 +824,7 @@ pub(crate) async fn start_bridge_cluster( handles } -pub(crate) async fn get_signatures( +pub async fn get_signatures( sui_bridge_client: &SuiBridgeClient, nonce: u64, sui_chain_id: u8, @@ -1210,3 +1215,343 @@ async fn trigger_reconfiguration_if_not_yet_and_assert_bridge_committee_initiali bridge.committee().members.contents.len() ); } + +pub async fn initiate_bridge_eth_to_sui( + bridge_test_cluster: &BridgeTestCluster, + amount: u64, + nonce: u64, +) -> Result<(), anyhow::Error> { + info!("Depositing native Ether to Solidity contract, nonce: {nonce}, amount: {amount}"); + let (eth_signer, eth_address) = bridge_test_cluster + .get_eth_signer_and_address() + .await + .unwrap(); + + let sui_address = bridge_test_cluster.sui_user_address(); + let sui_chain_id = bridge_test_cluster.sui_chain_id(); + let eth_chain_id = bridge_test_cluster.eth_chain_id(); + let token_id = TOKEN_ID_ETH; + + let sui_amount = (U256::from(amount) * U256::exp10(8)).as_u64(); // DP for Ether on Sui + + let eth_tx = deposit_native_eth_to_sol_contract( + ð_signer, + bridge_test_cluster.contracts().sui_bridge, + sui_address, + sui_chain_id, + amount, + ) + .await; + let tx_receipt = send_eth_tx_and_get_tx_receipt(eth_tx).await; + let eth_bridge_event = tx_receipt + .logs + .iter() + .find_map(EthBridgeEvent::try_from_log) + .unwrap(); + let EthBridgeEvent::EthSuiBridgeEvents(EthSuiBridgeEvents::TokensDepositedFilter( + eth_bridge_event, + )) = eth_bridge_event + else { + unreachable!(); + }; + // assert eth log matches + assert_eq!(eth_bridge_event.source_chain_id, eth_chain_id as u8); + assert_eq!(eth_bridge_event.nonce, nonce); + assert_eq!(eth_bridge_event.destination_chain_id, sui_chain_id as u8); + assert_eq!(eth_bridge_event.token_id, token_id); + assert_eq!(eth_bridge_event.sui_adjusted_amount, sui_amount); + assert_eq!(eth_bridge_event.sender_address, eth_address); + assert_eq!(eth_bridge_event.recipient_address, sui_address.to_vec()); + info!( + "Deposited Eth to Solidity contract, block: {:?}", + tx_receipt.block_number + ); + + wait_for_transfer_action_status( + bridge_test_cluster.bridge_client(), + eth_chain_id, + nonce, + BridgeActionStatus::Claimed, + ) + .await + .tap_ok(|_| { + info!("Eth to Sui bridge transfer claimed"); + }) +} + +pub async fn initiate_bridge_sui_to_eth( + bridge_test_cluster: &BridgeTestCluster, + eth_address: EthAddress, + token: ObjectRef, + nonce: u64, + sui_amount: u64, +) -> Result { + let bridge_object_arg = bridge_test_cluster + .bridge_client() + .get_mutable_bridge_object_arg_must_succeed() + .await; + let sui_client = bridge_test_cluster.sui_client(); + let token_types = bridge_test_cluster + .bridge_client() + .get_token_id_map() + .await + .unwrap(); + let sui_address = bridge_test_cluster.sui_user_address(); + + let resp = match deposit_eth_to_sui_package( + sui_client, + sui_address, + bridge_test_cluster.wallet(), + bridge_test_cluster.eth_chain_id(), + eth_address, + token, + bridge_object_arg, + &token_types, + ) + .await + { + Ok(resp) => { + if !resp.status_ok().unwrap() { + return Err(anyhow!("Sui TX error")); + } else { + resp + } + } + Err(e) => return Err(e), + }; + + let sui_events = resp.events.unwrap().data; + let bridge_event = sui_events + .iter() + .filter_map(|e| { + let sui_bridge_event = SuiBridgeEvent::try_from_sui_event(e).unwrap()?; + sui_bridge_event.try_into_bridge_action(e.id.tx_digest, e.id.event_seq as u16) + }) + .find_map(|e| { + if let BridgeAction::SuiToEthBridgeAction(a) = e { + Some(a) + } else { + None + } + }) + .unwrap(); + info!("Deposited Eth to move package"); + assert_eq!(bridge_event.sui_bridge_event.nonce, nonce); + assert_eq!( + bridge_event.sui_bridge_event.sui_chain_id, + bridge_test_cluster.sui_chain_id() + ); + assert_eq!( + bridge_event.sui_bridge_event.eth_chain_id, + bridge_test_cluster.eth_chain_id() + ); + assert_eq!(bridge_event.sui_bridge_event.sui_address, sui_address); + assert_eq!(bridge_event.sui_bridge_event.eth_address, eth_address); + assert_eq!(bridge_event.sui_bridge_event.token_id, TOKEN_ID_ETH); + assert_eq!( + bridge_event.sui_bridge_event.amount_sui_adjusted, + sui_amount + ); + + // Wait for the bridge action to be approved + wait_for_transfer_action_status( + bridge_test_cluster.bridge_client(), + bridge_test_cluster.sui_chain_id(), + nonce, + BridgeActionStatus::Approved, + ) + .await + .unwrap(); + info!("Sui to Eth bridge transfer approved."); + + Ok(bridge_event) +} + +async fn wait_for_transfer_action_status( + sui_bridge_client: &SuiBridgeClient, + chain_id: BridgeChainId, + nonce: u64, + status: BridgeActionStatus, +) -> Result<(), anyhow::Error> { + // Wait for the bridge action to be approved + let now = std::time::Instant::now(); + info!( + "Waiting for onchain status {:?}. chain: {:?}, nonce: {nonce}", + status, chain_id as u8 + ); + loop { + let timer = std::time::Instant::now(); + let res = sui_bridge_client + .get_token_transfer_action_onchain_status_until_success(chain_id as u8, nonce) + .await; + info!( + "get_token_transfer_action_onchain_status_until_success took {:?}, status: {:?}", + timer.elapsed(), + res + ); + + if res == status { + info!( + "detected on chain status {:?}. chain: {:?}, nonce: {nonce}", + status, chain_id as u8 + ); + return Ok(()); + } + if now.elapsed().as_secs() > 60 { + return Err(anyhow!( + "Timeout waiting for token transfer action to be {:?}. chain_id: {chain_id:?}, nonce: {nonce}. Time elapsed: {:?}", + status, + now.elapsed(), + )); + } + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + } +} + +async fn deposit_eth_to_sui_package( + sui_client: &SuiClient, + sui_address: SuiAddress, + wallet_context: &WalletContext, + target_chain: BridgeChainId, + target_address: EthAddress, + token: ObjectRef, + bridge_object_arg: ObjectArg, + sui_token_type_tags: &HashMap, +) -> Result { + let mut builder = ProgrammableTransactionBuilder::new(); + let arg_target_chain = builder.pure(target_chain as u8).unwrap(); + let arg_target_address = builder.pure(target_address.as_bytes()).unwrap(); + let arg_token = builder.obj(ObjectArg::ImmOrOwnedObject(token)).unwrap(); + let arg_bridge = builder.obj(bridge_object_arg).unwrap(); + + builder.programmable_move_call( + BRIDGE_PACKAGE_ID, + BRIDGE_MODULE_NAME.to_owned(), + ident_str!("send_token").to_owned(), + vec![sui_token_type_tags.get(&TOKEN_ID_ETH).unwrap().clone()], + vec![arg_bridge, arg_target_chain, arg_target_address, arg_token], + ); + + let pt = builder.finish(); + let gas_object_ref = wallet_context + .get_one_gas_object_owned_by_address(sui_address) + .await + .unwrap() + .unwrap(); + let tx_data = TransactionData::new_programmable( + sui_address, + vec![gas_object_ref], + pt, + 500_000_000, + sui_client + .governance_api() + .get_reference_gas_price() + .await + .unwrap(), + ); + let tx = wallet_context.sign_transaction(&tx_data); + wallet_context.execute_transaction_may_fail(tx).await +} + +pub async fn initiate_bridge_erc20_to_sui( + bridge_test_cluster: &BridgeTestCluster, + amount_u64: u64, + token_address: EthAddress, + token_id: u8, + nonce: u64, +) -> Result<(), anyhow::Error> { + let (eth_signer, eth_address) = bridge_test_cluster + .get_eth_signer_and_address() + .await + .unwrap(); + + // First, mint ERC20 tokens to the signer + let contract = EthERC20::new(token_address, eth_signer.clone().into()); + let decimal = contract.decimals().await? as usize; + let amount = U256::from(amount_u64) * U256::exp10(decimal); + let sui_amount = amount.as_u64(); + let mint_call = contract.mint(eth_address, amount); + let mint_tx_receipt = send_eth_tx_and_get_tx_receipt(mint_call).await; + assert_eq!(mint_tx_receipt.status.unwrap().as_u64(), 1); + + // Second, set allowance + let allowance_call = contract.approve(bridge_test_cluster.contracts().sui_bridge, amount); + let allowance_tx_receipt = send_eth_tx_and_get_tx_receipt(allowance_call).await; + assert_eq!(allowance_tx_receipt.status.unwrap().as_u64(), 1); + + // Third, deposit to bridge + let sui_recipient_address = bridge_test_cluster.sui_user_address(); + let sui_chain_id = bridge_test_cluster.sui_chain_id(); + let eth_chain_id = bridge_test_cluster.eth_chain_id(); + + info!( + "Depositing ERC20 (token id:{}, token_address: {}) to Solidity contract", + token_id, token_address + ); + let contract = EthSuiBridge::new( + bridge_test_cluster.contracts().sui_bridge, + eth_signer.clone().into(), + ); + let deposit_call = contract.bridge_erc20( + token_id, + amount, + sui_recipient_address.to_vec().into(), + sui_chain_id as u8, + ); + let tx_receipt = send_eth_tx_and_get_tx_receipt(deposit_call).await; + let eth_bridge_event = tx_receipt + .logs + .iter() + .find_map(EthBridgeEvent::try_from_log) + .unwrap(); + let EthBridgeEvent::EthSuiBridgeEvents(EthSuiBridgeEvents::TokensDepositedFilter( + eth_bridge_event, + )) = eth_bridge_event + else { + unreachable!(); + }; + // assert eth log matches + assert_eq!(eth_bridge_event.source_chain_id, eth_chain_id as u8); + assert_eq!(eth_bridge_event.nonce, nonce); + assert_eq!(eth_bridge_event.destination_chain_id, sui_chain_id as u8); + assert_eq!(eth_bridge_event.token_id, token_id); + assert_eq!(eth_bridge_event.sui_adjusted_amount, sui_amount); + assert_eq!(eth_bridge_event.sender_address, eth_address); + assert_eq!( + eth_bridge_event.recipient_address, + sui_recipient_address.to_vec() + ); + info!( + "Deposited ERC20 (token id:{}, token_address: {}) to Solidity contract", + token_id, token_address + ); + + wait_for_transfer_action_status( + bridge_test_cluster.bridge_client(), + eth_chain_id, + nonce, + BridgeActionStatus::Claimed, + ) + .await + .tap_ok(|_| { + info!( + nonce, + token_id, amount_u64, "Eth to Sui bridge transfer claimed" + ); + }) +} + +pub(crate) async fn deposit_native_eth_to_sol_contract( + signer: &EthSigner, + contract_address: EthAddress, + sui_recipient_address: SuiAddress, + sui_chain_id: BridgeChainId, + amount: u64, +) -> ContractCall { + let contract = EthSuiBridge::new(contract_address, signer.clone().into()); + let sui_recipient_address = sui_recipient_address.to_vec().into(); + let amount = U256::from(amount) * U256::exp10(18); // 1 ETH + contract + .bridge_eth(sui_recipient_address, sui_chain_id as u8) + .value(amount) +} diff --git a/crates/sui-bridge/src/eth_client.rs b/crates/sui-bridge/src/eth_client.rs index 30f8e6d92e536..215ea5428b01b 100644 --- a/crates/sui-bridge/src/eth_client.rs +++ b/crates/sui-bridge/src/eth_client.rs @@ -36,6 +36,10 @@ impl EthClient { self_.describe().await?; Ok(self_) } + + pub fn provider(&self) -> Arc> { + Arc::new(self.provider.clone()) + } } #[cfg(test)] diff --git a/crates/sui-bridge/src/lib.rs b/crates/sui-bridge/src/lib.rs index 0a138372c50fe..80a582e994c69 100644 --- a/crates/sui-bridge/src/lib.rs +++ b/crates/sui-bridge/src/lib.rs @@ -19,24 +19,25 @@ pub mod node; pub mod orchestrator; pub mod server; pub mod storage; +pub mod sui_bridge_watchdog; pub mod sui_client; pub mod sui_syncer; pub mod sui_transaction_builder; pub mod types; pub mod utils; -#[cfg(test)] +#[cfg(any(feature = "test-utils", test))] pub(crate) mod eth_mock_provider; #[cfg(test)] pub(crate) mod sui_mock_client; -#[cfg(test)] +#[cfg(any(feature = "test-utils", test))] pub mod test_utils; pub const BRIDGE_ENABLE_PROTOCOL_VERSION: u64 = 45; -#[cfg(test)] +#[cfg(any(feature = "test-utils", test))] pub mod e2e_tests; #[macro_export] diff --git a/crates/sui-bridge/src/metrics.rs b/crates/sui-bridge/src/metrics.rs index 6d1fdda6e2a7c..c147787dc11c6 100644 --- a/crates/sui-bridge/src/metrics.rs +++ b/crates/sui-bridge/src/metrics.rs @@ -114,6 +114,9 @@ pub struct BridgeMetrics { pub(crate) sui_rpc_errors: IntCounterVec, pub(crate) observed_governance_actions: IntCounterVec, pub(crate) current_bridge_voting_rights: IntGaugeVec, + + pub(crate) auth_agg_ok_responses: IntCounterVec, + pub(crate) auth_agg_bad_responses: IntCounterVec, } impl BridgeMetrics { @@ -325,6 +328,20 @@ impl BridgeMetrics { registry ) .unwrap(), + auth_agg_ok_responses: register_int_counter_vec_with_registry!( + "bridge_auth_agg_ok_responses", + "Total number of ok respones from auth agg", + &["authority"], + registry, + ) + .unwrap(), + auth_agg_bad_responses: register_int_counter_vec_with_registry!( + "bridge_auth_agg_bad_responses", + "Total number of bad respones from auth agg", + &["authority"], + registry, + ) + .unwrap(), } } diff --git a/crates/sui-bridge/src/monitor.rs b/crates/sui-bridge/src/monitor.rs index af169737f81d5..af837c394acff 100644 --- a/crates/sui-bridge/src/monitor.rs +++ b/crates/sui-bridge/src/monitor.rs @@ -162,9 +162,12 @@ where Duration::from_secs(10), ) .await; - bridge_auth_agg.store(Arc::new(BridgeAuthorityAggregator::new(Arc::new( - new_committee, - )))); + let committee_names = bridge_auth_agg.load().committee_keys_to_names.clone(); + bridge_auth_agg.store(Arc::new(BridgeAuthorityAggregator::new( + Arc::new(new_committee), + bridge_metrics.clone(), + committee_names, + ))); info!("Committee updated with CommitteeMemberUrlUpdateEvent"); } @@ -180,9 +183,12 @@ where Duration::from_secs(10), ) .await; - bridge_auth_agg.store(Arc::new(BridgeAuthorityAggregator::new(Arc::new( - new_committee, - )))); + let committee_names = bridge_auth_agg.load().committee_keys_to_names.clone(); + bridge_auth_agg.store(Arc::new(BridgeAuthorityAggregator::new( + Arc::new(new_committee), + bridge_metrics.clone(), + committee_names, + ))); info!("Committee updated with BlocklistValidatorEvent"); } @@ -926,9 +932,9 @@ mod tests { bridge_metrics, ) = setup(); let old_committee = BridgeCommittee::new(authorities.clone()).unwrap(); - let agg = Arc::new(ArcSwap::new(Arc::new(BridgeAuthorityAggregator::new( - Arc::new(old_committee), - )))); + let agg = Arc::new(ArcSwap::new(Arc::new( + BridgeAuthorityAggregator::new_for_testing(Arc::new(old_committee)), + ))); let sui_token_type_tags = Arc::new(ArcSwap::from(Arc::new(HashMap::new()))); let _handle = tokio::task::spawn( BridgeMonitor::new( @@ -985,9 +991,9 @@ mod tests { bridge_metrics, ) = setup(); let old_committee = BridgeCommittee::new(authorities.clone()).unwrap(); - let agg = Arc::new(ArcSwap::new(Arc::new(BridgeAuthorityAggregator::new( - Arc::new(old_committee), - )))); + let agg = Arc::new(ArcSwap::new(Arc::new( + BridgeAuthorityAggregator::new_for_testing(Arc::new(old_committee)), + ))); let sui_token_type_tags = Arc::new(ArcSwap::from(Arc::new(HashMap::new()))); let _handle = tokio::task::spawn( BridgeMonitor::new( @@ -1045,9 +1051,9 @@ mod tests { frozen: !*bridge_pause_tx.borrow(), // toggle the bridge pause status }; let committee = BridgeCommittee::new(authorities.clone()).unwrap(); - let agg = Arc::new(ArcSwap::new(Arc::new(BridgeAuthorityAggregator::new( - Arc::new(committee), - )))); + let agg = Arc::new(ArcSwap::new(Arc::new( + BridgeAuthorityAggregator::new_for_testing(Arc::new(committee)), + ))); let sui_token_type_tags = Arc::new(ArcSwap::from(Arc::new(HashMap::new()))); let _handle = tokio::task::spawn( BridgeMonitor::new( @@ -1095,9 +1101,9 @@ mod tests { notional_value: 100000000, }; let committee = BridgeCommittee::new(authorities.clone()).unwrap(); - let agg = Arc::new(ArcSwap::new(Arc::new(BridgeAuthorityAggregator::new( - Arc::new(committee), - )))); + let agg = Arc::new(ArcSwap::new(Arc::new( + BridgeAuthorityAggregator::new_for_testing(Arc::new(committee)), + ))); let sui_token_type_tags = Arc::new(ArcSwap::from(Arc::new(HashMap::new()))); let sui_token_type_tags_clone = sui_token_type_tags.clone(); let _handle = tokio::task::spawn( diff --git a/crates/sui-bridge/src/node.rs b/crates/sui-bridge/src/node.rs index 97b0b2caf22e9..671f2b358f3c0 100644 --- a/crates/sui-bridge/src/node.rs +++ b/crates/sui-bridge/src/node.rs @@ -1,8 +1,20 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use crate::config::WatchdogConfig; +use crate::crypto::BridgeAuthorityPublicKeyBytes; +use crate::metered_eth_provider::MeteredEthHttpProvier; +use crate::sui_bridge_watchdog::eth_bridge_status::EthBridgeStatus; +use crate::sui_bridge_watchdog::eth_vault_balance::EthVaultBalance; +use crate::sui_bridge_watchdog::metrics::WatchdogMetrics; +use crate::sui_bridge_watchdog::sui_bridge_status::SuiBridgeStatus; +use crate::sui_bridge_watchdog::total_supplies::TotalSupplies; +use crate::sui_bridge_watchdog::{BridgeWatchDog, Observable}; +use crate::sui_client::SuiBridgeClient; use crate::types::BridgeCommittee; -use crate::utils::get_committee_voting_power_by_name; +use crate::utils::{ + get_committee_voting_power_by_name, get_eth_contract_addresses, get_validator_names_by_pub_keys, +}; use crate::{ action_executor::BridgeActionExecutor, client::bridge_authority_aggregator::BridgeAuthorityAggregator, @@ -17,8 +29,10 @@ use crate::{ sui_syncer::SuiSyncer, }; use arc_swap::ArcSwap; +use ethers::providers::Provider; use ethers::types::Address as EthAddress; use mysten_metrics::spawn_logged_monitored_task; +use std::collections::BTreeMap; use std::{ collections::HashMap, net::{IpAddr, Ipv4Addr, SocketAddr}, @@ -43,6 +57,7 @@ pub async fn run_bridge_node( ) -> anyhow::Result> { init_all_struct_tags(); let metrics = Arc::new(BridgeMetrics::new(&prometheus_registry)); + let watchdog_config = config.watchdog_config.clone(); let (server_config, client_config) = config.validate(metrics.clone()).await?; let sui_chain_identifier = server_config .sui_client @@ -71,12 +86,19 @@ pub async fn run_bridge_node( .await .expect("Failed to get committee"), ); - // Start Client - let _handles = if let Some(client_config) = client_config { - start_client_components(client_config, committee.clone(), metrics.clone()).await - } else { - Ok(vec![]) - }?; + let mut handles = vec![]; + + // Start watchdog + let eth_provider = server_config.eth_client.provider(); + let eth_bridge_proxy_address = server_config.eth_bridge_proxy_address; + let sui_client = server_config.sui_client.clone(); + handles.push(spawn_logged_monitored_task!(start_watchdog( + watchdog_config, + &prometheus_registry, + eth_provider, + eth_bridge_proxy_address, + sui_client + ))); // Update voting right metrics // Before reconfiguration happens we only set it once when the node starts @@ -86,7 +108,22 @@ pub async fn run_bridge_node( .governance_api() .get_latest_sui_system_state() .await?; - let committee_name_mapping = get_committee_voting_power_by_name(&committee, sui_system).await; + + // Start Client + if let Some(client_config) = client_config { + let committee_keys_to_names = + Arc::new(get_validator_names_by_pub_keys(&committee, &sui_system).await); + let client_components = start_client_components( + client_config, + committee.clone(), + committee_keys_to_names, + metrics.clone(), + ) + .await?; + handles.extend(client_components); + } + + let committee_name_mapping = get_committee_voting_power_by_name(&committee, &sui_system).await; for (name, voting_power) in committee_name_mapping.into_iter() { metrics .current_bridge_voting_rights @@ -113,10 +150,61 @@ pub async fn run_bridge_node( )) } +async fn start_watchdog( + watchdog_config: Option, + registry: &prometheus::Registry, + eth_provider: Arc>, + eth_bridge_proxy_address: EthAddress, + sui_client: Arc, +) { + let watchdog_metrics = WatchdogMetrics::new(registry); + let (_committee_address, _limiter_address, vault_address, _config_address, weth_address) = + get_eth_contract_addresses(eth_bridge_proxy_address, ð_provider) + .await + .unwrap_or_else(|e| panic!("get_eth_contract_addresses should not fail: {}", e)); + + let eth_vault_balance = EthVaultBalance::new( + eth_provider.clone(), + vault_address, + weth_address, + watchdog_metrics.eth_vault_balance.clone(), + ); + + let eth_bridge_status = EthBridgeStatus::new( + eth_provider, + eth_bridge_proxy_address, + watchdog_metrics.eth_bridge_paused.clone(), + ); + + let sui_bridge_status = SuiBridgeStatus::new( + sui_client.clone(), + watchdog_metrics.sui_bridge_paused.clone(), + ); + + let mut observables: Vec> = vec![ + Box::new(eth_vault_balance), + Box::new(eth_bridge_status), + Box::new(sui_bridge_status), + ]; + if let Some(watchdog_config) = watchdog_config { + if !watchdog_config.total_supplies.is_empty() { + let total_supplies = TotalSupplies::new( + Arc::new(sui_client.sui_client().clone()), + watchdog_config.total_supplies, + watchdog_metrics.total_supplies.clone(), + ); + observables.push(Box::new(total_supplies)); + } + } + + BridgeWatchDog::new(observables).run().await +} + // TODO: is there a way to clean up the overrides after it's stored in DB? async fn start_client_components( client_config: BridgeClientConfig, committee: Arc, + committee_keys_to_names: Arc>, metrics: Arc, ) -> anyhow::Result>> { let store: std::sync::Arc = @@ -154,6 +242,8 @@ async fn start_client_components( let bridge_auth_agg = Arc::new(ArcSwap::from(Arc::new(BridgeAuthorityAggregator::new( committee, + metrics.clone(), + committee_keys_to_names, )))); // TODO: should we use one query instead of two? let sui_token_type_tags = sui_client.get_token_id_map().await.unwrap(); @@ -488,6 +578,7 @@ mod tests { db_path: None, metrics_key_pair: default_ed25519_key_pair(), metrics: None, + watchdog_config: None, }; // Spawn bridge node in memory let _handle = run_bridge_node( @@ -554,6 +645,7 @@ mod tests { db_path: Some(db_path), metrics_key_pair: default_ed25519_key_pair(), metrics: None, + watchdog_config: None, }; // Spawn bridge node in memory let _handle = run_bridge_node( @@ -631,6 +723,7 @@ mod tests { db_path: Some(db_path), metrics_key_pair: default_ed25519_key_pair(), metrics: None, + watchdog_config: None, }; // Spawn bridge node in memory let _handle = run_bridge_node( diff --git a/crates/sui-bridge/src/server/mod.rs b/crates/sui-bridge/src/server/mod.rs index 7986f3483692b..8b68513e732be 100644 --- a/crates/sui-bridge/src/server/mod.rs +++ b/crates/sui-bridge/src/server/mod.rs @@ -33,7 +33,7 @@ use tracing::{info, instrument}; pub mod governance_verifier; pub mod handler; -#[cfg(test)] +#[cfg(any(feature = "test-utils", test))] pub(crate) mod mock_handler; pub const APPLICATION_JSON: &str = "application/json"; diff --git a/crates/sui-bridge/src/sui_bridge_watchdog/eth_bridge_status.rs b/crates/sui-bridge/src/sui_bridge_watchdog/eth_bridge_status.rs new file mode 100644 index 0000000000000..2df78d137b62c --- /dev/null +++ b/crates/sui-bridge/src/sui_bridge_watchdog/eth_bridge_status.rs @@ -0,0 +1,58 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +//! The EthBridgeStatus observable monitors whether the Eth Bridge is paused. + +use crate::abi::EthSuiBridge; +use crate::metered_eth_provider::MeteredEthHttpProvier; +use crate::sui_bridge_watchdog::Observable; +use async_trait::async_trait; +use ethers::providers::Provider; +use ethers::types::Address as EthAddress; +use prometheus::IntGauge; +use std::sync::Arc; +use tokio::time::Duration; +use tracing::{error, info}; + +pub struct EthBridgeStatus { + bridge_contract: EthSuiBridge>, + metric: IntGauge, +} + +impl EthBridgeStatus { + pub fn new( + provider: Arc>, + bridge_address: EthAddress, + metric: IntGauge, + ) -> Self { + let bridge_contract = EthSuiBridge::new(bridge_address, provider.clone()); + Self { + bridge_contract, + metric, + } + } +} + +#[async_trait] +impl Observable for EthBridgeStatus { + fn name(&self) -> &str { + "EthBridgeStatus" + } + + async fn observe_and_report(&self) { + let status = self.bridge_contract.paused().call().await; + match status { + Ok(status) => { + self.metric.set(status as i64); + info!("Eth Bridge Status: {:?}", status); + } + Err(e) => { + error!("Error getting eth bridge status: {:?}", e); + } + } + } + + fn interval(&self) -> Duration { + Duration::from_secs(10) + } +} diff --git a/crates/sui-bridge/src/sui_bridge_watchdog/eth_vault_balance.rs b/crates/sui-bridge/src/sui_bridge_watchdog/eth_vault_balance.rs new file mode 100644 index 0000000000000..b43b7538067d4 --- /dev/null +++ b/crates/sui-bridge/src/sui_bridge_watchdog/eth_vault_balance.rs @@ -0,0 +1,75 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::abi::EthERC20; +use crate::metered_eth_provider::MeteredEthHttpProvier; +use crate::sui_bridge_watchdog::Observable; +use async_trait::async_trait; +use ethers::providers::Provider; +use ethers::types::{Address as EthAddress, U256}; +use prometheus::IntGauge; +use std::sync::Arc; +use tokio::time::Duration; +use tracing::{error, info}; + +const TEN_ZEROS: u64 = 10_u64.pow(10); + +pub struct EthVaultBalance { + coin_contract: EthERC20>, + vault_address: EthAddress, + ten_zeros: U256, + metric: IntGauge, +} + +impl EthVaultBalance { + pub fn new( + provider: Arc>, + vault_address: EthAddress, + coin_address: EthAddress, // for now this only support one coin which is WETH + metric: IntGauge, + ) -> Self { + let ten_zeros = U256::from(TEN_ZEROS); + let coin_contract = EthERC20::new(coin_address, provider); + Self { + coin_contract, + vault_address, + ten_zeros, + metric, + } + } +} + +#[async_trait] +impl Observable for EthVaultBalance { + fn name(&self) -> &str { + "EthVaultBalance" + } + + async fn observe_and_report(&self) { + match self + .coin_contract + .balance_of(self.vault_address) + .call() + .await + { + Ok(balance) => { + // Why downcasting is safe: + // 1. On Ethereum we only take the first 8 decimals into account, + // meaning the trailing 10 digits can be ignored + // 2. i64::MAX is 9_223_372_036_854_775_807, with 8 decimal places is + // 92_233_720_368. We likely won't see any balance higher than this + // in the next 12 months. + let balance = (balance / self.ten_zeros).as_u64() as i64; + self.metric.set(balance); + info!("Eth Vault Balance: {:?}", balance); + } + Err(e) => { + error!("Error getting balance from vault: {:?}", e); + } + } + } + + fn interval(&self) -> Duration { + Duration::from_secs(10) + } +} diff --git a/crates/sui-bridge/src/sui_bridge_watchdog/metrics.rs b/crates/sui-bridge/src/sui_bridge_watchdog/metrics.rs new file mode 100644 index 0000000000000..8fea209d7f43f --- /dev/null +++ b/crates/sui-bridge/src/sui_bridge_watchdog/metrics.rs @@ -0,0 +1,52 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use prometheus::{ + register_int_gauge_vec_with_registry, register_int_gauge_with_registry, IntGauge, IntGaugeVec, + Registry, +}; + +#[derive(Clone, Debug)] +pub struct WatchdogMetrics { + pub eth_vault_balance: IntGauge, + pub total_supplies: IntGaugeVec, + pub eth_bridge_paused: IntGauge, + pub sui_bridge_paused: IntGauge, +} + +impl WatchdogMetrics { + pub fn new(registry: &Registry) -> Self { + Self { + eth_vault_balance: register_int_gauge_with_registry!( + "bridge_eth_vault_balance", + "Current balance of eth vault", + registry, + ) + .unwrap(), + total_supplies: register_int_gauge_vec_with_registry!( + "bridge_total_supplies", + "Current total supplies of coins on Sui based on Treasury Cap", + &["token_name"], + registry, + ) + .unwrap(), + eth_bridge_paused: register_int_gauge_with_registry!( + "bridge_eth_bridge_paused", + "Whether the eth bridge is paused", + registry, + ) + .unwrap(), + sui_bridge_paused: register_int_gauge_with_registry!( + "bridge_sui_bridge_paused", + "Whether the sui bridge is paused", + registry, + ) + .unwrap(), + } + } + + pub fn new_for_testing() -> Self { + let registry = Registry::new(); + Self::new(®istry) + } +} diff --git a/crates/sui-bridge/src/sui_bridge_watchdog/mod.rs b/crates/sui-bridge/src/sui_bridge_watchdog/mod.rs new file mode 100644 index 0000000000000..63ed7af86990e --- /dev/null +++ b/crates/sui-bridge/src/sui_bridge_watchdog/mod.rs @@ -0,0 +1,62 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +//! The BridgeWatchDog module is responsible for monitoring the health +//! of the bridge by periodically running various observables and +//! reporting the results. + +use anyhow::Result; +use async_trait::async_trait; +use mysten_metrics::spawn_logged_monitored_task; +use tokio::time::Duration; +use tokio::time::MissedTickBehavior; +use tracing::{error_span, info, Instrument}; + +pub mod eth_bridge_status; +pub mod eth_vault_balance; +pub mod metrics; +pub mod sui_bridge_status; +pub mod total_supplies; + +pub struct BridgeWatchDog { + observables: Vec>, +} + +impl BridgeWatchDog { + pub fn new(observables: Vec>) -> Self { + Self { observables } + } + + pub async fn run(self) { + let mut handles = vec![]; + for observable in self.observables.into_iter() { + let handle = spawn_logged_monitored_task!(Self::run_observable(observable)); + handles.push(handle); + } + // Return when any task returns an error or all tasks exit. + futures::future::try_join_all(handles).await.unwrap(); + unreachable!("watch dog tasks should not exit"); + } + + async fn run_observable(observable: Box) -> Result<()> { + let mut interval = tokio::time::interval(observable.interval()); + interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + let name = observable.name(); + let span = error_span!("observable", name); + loop { + info!("Running observable {}", name); + observable + .observe_and_report() + .instrument(span.clone()) + .await; + interval.tick().await; + } + } +} + +#[async_trait] +pub trait Observable { + fn name(&self) -> &str; + async fn observe_and_report(&self); + fn interval(&self) -> Duration; +} diff --git a/crates/sui-bridge/src/sui_bridge_watchdog/sui_bridge_status.rs b/crates/sui-bridge/src/sui_bridge_watchdog/sui_bridge_status.rs new file mode 100644 index 0000000000000..42506286c55e8 --- /dev/null +++ b/crates/sui-bridge/src/sui_bridge_watchdog/sui_bridge_status.rs @@ -0,0 +1,48 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +//! The SuiBridgeStatus observable monitors whether the Sui Bridge is paused. + +use crate::sui_bridge_watchdog::Observable; +use crate::sui_client::SuiBridgeClient; +use async_trait::async_trait; +use prometheus::IntGauge; +use std::sync::Arc; + +use tokio::time::Duration; +use tracing::{error, info}; + +pub struct SuiBridgeStatus { + sui_client: Arc, + metric: IntGauge, +} + +impl SuiBridgeStatus { + pub fn new(sui_client: Arc, metric: IntGauge) -> Self { + Self { sui_client, metric } + } +} + +#[async_trait] +impl Observable for SuiBridgeStatus { + fn name(&self) -> &str { + "SuiBridgeStatus" + } + + async fn observe_and_report(&self) { + let status = self.sui_client.is_bridge_paused().await; + match status { + Ok(status) => { + self.metric.set(status as i64); + info!("Sui Bridge Status: {:?}", status); + } + Err(e) => { + error!("Error getting sui bridge status: {:?}", e); + } + } + } + + fn interval(&self) -> Duration { + Duration::from_secs(10) + } +} diff --git a/crates/sui-bridge/src/sui_bridge_watchdog/total_supplies.rs b/crates/sui-bridge/src/sui_bridge_watchdog/total_supplies.rs new file mode 100644 index 0000000000000..199074a8e1a7a --- /dev/null +++ b/crates/sui-bridge/src/sui_bridge_watchdog/total_supplies.rs @@ -0,0 +1,65 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +//! The SuiBridgeStatus observable monitors whether the Sui Bridge is paused. + +use crate::sui_bridge_watchdog::Observable; +use async_trait::async_trait; +use prometheus::IntGaugeVec; +use std::{collections::BTreeMap, sync::Arc}; +use sui_sdk::SuiClient; + +use tokio::time::Duration; +use tracing::{error, info}; + +pub struct TotalSupplies { + sui_client: Arc, + coins: BTreeMap, + metric: IntGaugeVec, +} + +impl TotalSupplies { + pub fn new( + sui_client: Arc, + coins: BTreeMap, + metric: IntGaugeVec, + ) -> Self { + Self { + sui_client, + coins, + metric, + } + } +} + +#[async_trait] +impl Observable for TotalSupplies { + fn name(&self) -> &str { + "TotalSupplies" + } + + async fn observe_and_report(&self) { + for (coin_name, coin_type) in &self.coins { + let resp = self + .sui_client + .coin_read_api() + .get_total_supply(coin_type.clone()) + .await; + match resp { + Ok(supply) => { + self.metric + .with_label_values(&[coin_name]) + .set(supply.value as i64); + info!("Total supply for {coin_type}: {}", supply.value); + } + Err(e) => { + error!("Error getting total supply for coin {coin_type}: {:?}", e); + } + } + } + } + + fn interval(&self) -> Duration { + Duration::from_secs(10) + } +} diff --git a/crates/sui-bridge/src/types.rs b/crates/sui-bridge/src/types.rs index d4d69e1bf10ed..13aba4d461839 100644 --- a/crates/sui-bridge/src/types.rs +++ b/crates/sui-bridge/src/types.rs @@ -147,23 +147,6 @@ impl core::fmt::Display for BridgeCommittee { } } -impl core::fmt::Display for BridgeCommittee { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> std::fmt::Result { - for m in self.members.values() { - writeln!( - f, - "pubkey: {:?}, url: {:?}, stake: {:?}, blocklisted: {}, eth address: {:x}", - Hex::encode(m.pubkey_bytes().as_bytes()), - m.base_url, - m.voting_power, - m.is_blocklisted, - m.pubkey_bytes().to_eth_address(), - )?; - } - Ok(()) - } -} - impl CommitteeTrait for BridgeCommittee { // Note: blocklisted members are always excluded. fn shuffle_by_stake_with_rng( diff --git a/crates/sui-bridge/src/utils.rs b/crates/sui-bridge/src/utils.rs index 7990ec79e0cec..d6f7ca487e191 100644 --- a/crates/sui-bridge/src/utils.rs +++ b/crates/sui-bridge/src/utils.rs @@ -5,7 +5,7 @@ use crate::abi::{ EthBridgeCommittee, EthBridgeConfig, EthBridgeLimiter, EthBridgeVault, EthSuiBridge, }; use crate::config::{ - default_ed25519_key_pair, BridgeNodeConfig, EthConfig, MetricsConfig, SuiConfig, + default_ed25519_key_pair, BridgeNodeConfig, EthConfig, MetricsConfig, SuiConfig, WatchdogConfig, }; use crate::crypto::BridgeAuthorityKeyPair; use crate::crypto::BridgeAuthorityPublicKeyBytes; @@ -207,6 +207,13 @@ pub fn generate_bridge_node_config_and_write_to_file( push_interval_seconds: None, // use default value push_url: "metrics_proxy_url".to_string(), }), + watchdog_config: Some(WatchdogConfig { + total_supplies: BTreeMap::from_iter(vec![( + "eth".to_string(), + "0xd0e89b2af5e4910726fbcd8b8dd37bb79b29e5f83f7491bca830e94f7f226d29::eth::ETH" + .to_string(), + )]), + }), }; if run_client { config.sui.bridge_client_key_path = Some(PathBuf::from("/path/to/your/bridge_client_key")); @@ -389,7 +396,7 @@ pub async fn wait_for_server_to_be_up(server_url: String, timeout_sec: u64) -> a /// If a validator is not in the Sui committee, we will use its base URL as the name. pub async fn get_committee_voting_power_by_name( bridge_committee: &Arc, - system_state: SuiSystemStateSummary, + system_state: &SuiSystemStateSummary, ) -> BTreeMap { let mut sui_committee: BTreeMap<_, _> = system_state .active_validators @@ -409,3 +416,28 @@ pub async fn get_committee_voting_power_by_name( }) .collect() } + +/// Return a mappping from validator pub keys to their names. +/// If a validator is not in the Sui committee, we will use its base URL as the name. +pub async fn get_validator_names_by_pub_keys( + bridge_committee: &Arc, + system_state: &SuiSystemStateSummary, +) -> BTreeMap { + let mut sui_committee: BTreeMap<_, _> = system_state + .active_validators + .iter() + .map(|v| (v.sui_address, v.name.clone())) + .collect(); + bridge_committee + .members() + .iter() + .map(|(name, validator)| { + ( + name.clone(), + sui_committee + .remove(&validator.sui_address) + .unwrap_or(validator.base_url.clone()), + ) + }) + .collect() +} diff --git a/crates/sui-cluster-test/src/cluster.rs b/crates/sui-cluster-test/src/cluster.rs index decf58e81714d..166d71e0ada33 100644 --- a/crates/sui-cluster-test/src/cluster.rs +++ b/crates/sui-cluster-test/src/cluster.rs @@ -223,6 +223,7 @@ impl Cluster for LocalNewCluster { // This cluster has fullnode handle, safe to unwrap let fullnode_url = test_cluster.fullnode_handle.rpc_url.clone(); + // TODO: with TestCluster supporting indexer backed rpc as well, we can remove the indexer related logic here. let mut cancellation_tokens = vec![]; let (database, indexer_url, graphql_url) = if options.with_indexer_and_graphql { let database = TempDb::new()?; @@ -237,6 +238,8 @@ impl Cluster for LocalNewCluster { None, Some(data_ingestion_path.path().to_path_buf()), None, /* cancel */ + None, /* start_checkpoint */ + None, /* end_checkpoint */ ) .await; cancellation_tokens.push(writer_token.drop_guard()); diff --git a/crates/sui-config/src/local_ip_utils.rs b/crates/sui-config/src/local_ip_utils.rs index 5e7d1298f3629..e8fb02c7f145f 100644 --- a/crates/sui-config/src/local_ip_utils.rs +++ b/crates/sui-config/src/local_ip_utils.rs @@ -122,15 +122,18 @@ pub fn new_udp_address_for_testing(host: &str) -> Multiaddr { .unwrap() } -/// Returns a new unique TCP address (SocketAddr) for localhost, by finding a new available port on localhost. -pub fn new_local_tcp_socket_for_testing() -> SocketAddr { +/// Returns a new unique TCP address in String format for localhost, by finding a new available port on localhost. +pub fn new_local_tcp_socket_for_testing_string() -> String { format!( "{}:{}", localhost_for_testing(), get_available_port(&localhost_for_testing()) ) - .parse() - .unwrap() +} + +/// Returns a new unique TCP address (SocketAddr) for localhost, by finding a new available port on localhost. +pub fn new_local_tcp_socket_for_testing() -> SocketAddr { + new_local_tcp_socket_for_testing_string().parse().unwrap() } /// Returns a new unique TCP address (Multiaddr) for localhost, by finding a new available port on localhost. diff --git a/crates/sui-config/src/node.rs b/crates/sui-config/src/node.rs index 1ab9ec678d364..41d67009c45e0 100644 --- a/crates/sui-config/src/node.rs +++ b/crates/sui-config/src/node.rs @@ -683,7 +683,10 @@ pub struct AuthorityStorePruningConfig { /// enables periodic background compaction for old SST files whose last modified time is /// older than `periodic_compaction_threshold_days` days. /// That ensures that all sst files eventually go through the compaction process - #[serde(skip_serializing_if = "Option::is_none")] + #[serde( + default = "default_periodic_compaction_threshold_days", + skip_serializing_if = "Option::is_none" + )] pub periodic_compaction_threshold_days: Option, /// number of epochs to keep the latest version of transactions and effects for #[serde(skip_serializing_if = "Option::is_none")] @@ -715,6 +718,10 @@ fn default_smoothing() -> bool { cfg!(not(test)) } +fn default_periodic_compaction_threshold_days() -> Option { + Some(1) +} + impl Default for AuthorityStorePruningConfig { fn default() -> Self { Self { diff --git a/crates/sui-core/Cargo.toml b/crates/sui-core/Cargo.toml index eac6a7277c61e..a5ee4ea284796 100644 --- a/crates/sui-core/Cargo.toml +++ b/crates/sui-core/Cargo.toml @@ -89,6 +89,7 @@ sui-protocol-config.workspace = true sui-transaction-checks.workspace = true sui-simulator.workspace = true sui-storage.workspace = true +sui-tls.workspace = true sui-types.workspace = true zeroize.workspace = true nonempty.workspace = true diff --git a/crates/sui-core/src/authority.rs b/crates/sui-core/src/authority.rs index c3268d9247faf..5c033919eb909 100644 --- a/crates/sui-core/src/authority.rs +++ b/crates/sui-core/src/authority.rs @@ -11,6 +11,7 @@ use crate::verify_indexes::verify_indexes; use anyhow::anyhow; use arc_swap::{ArcSwap, Guard}; use async_trait::async_trait; +use authority_per_epoch_store::CertLockGuard; use chrono::prelude::*; use fastcrypto::encoding::Base58; use fastcrypto::encoding::Encoding; @@ -54,11 +55,12 @@ use sui_types::layout_resolver::LayoutResolver; use sui_types::messages_consensus::{AuthorityCapabilitiesV1, AuthorityCapabilitiesV2}; use sui_types::object::bounded_visitor::BoundedVisitor; use sui_types::transaction_executor::SimulateTransactionResult; + use tap::{TapFallible, TapOptional}; use tokio::sync::mpsc::unbounded_channel; use tokio::sync::{mpsc, oneshot, RwLock}; use tokio::task::JoinHandle; -use tracing::{debug, error, info, instrument, warn, Instrument}; +use tracing::{debug, error, info, instrument, warn}; use self::authority_store::ExecutionLockWriteGuard; use self::authority_store_pruner::AuthorityStorePruningMetrics; @@ -67,8 +69,10 @@ use mysten_metrics::{monitored_scope, spawn_monitored_task}; use mamoru_sui_sniffer::SuiSniffer; use move_core_types::trace::CallTrace; + use crate::jsonrpc_index::IndexStore; use crate::jsonrpc_index::{CoinInfo, ObjectIndexChanges}; +use mysten_common::debug_fatal; use once_cell::sync::OnceCell; use shared_crypto::intent::{AppId, Intent, IntentMessage, IntentScope, IntentVersion}; use sui_archival::reader::ArchiveReaderBalancer; @@ -92,7 +96,7 @@ use sui_types::digests::TransactionEventsDigest; use sui_types::dynamic_field::{DynamicFieldInfo, DynamicFieldName}; use sui_types::effects::{ InputSharedObject, SignedTransactionEffects, TransactionEffects, TransactionEffectsAPI, - TransactionEvents, VerifiedCertifiedTransactionEffects, VerifiedSignedTransactionEffects, + TransactionEvents, VerifiedSignedTransactionEffects, }; use sui_types::error::{ExecutionError, UserInputError}; use sui_types::event::{Event, EventID}; @@ -126,7 +130,6 @@ use sui_types::{ committee::Committee, crypto::AuthoritySignature, error::{SuiError, SuiResult}, - fp_ensure, object::{Object, ObjectRead}, transaction::*, SUI_SYSTEM_ADDRESS, @@ -240,7 +243,6 @@ pub struct AuthorityMetrics { execute_certificate_latency_shared_object: Histogram, await_transaction_latency: Histogram, - execute_certificate_with_effects_latency: Histogram, internal_execution_latency: Histogram, execution_load_input_objects_latency: Histogram, prepare_certificate_latency: Histogram, @@ -304,8 +306,6 @@ pub struct AuthorityMetrics { /// bytecode verifier metrics for tracking timeouts pub bytecode_verifier_metrics: Arc, - pub authenticator_state_update_failed: IntCounter, - /// Count of zklogin signatures pub zklogin_sig_count: IntCounter, /// Count of multisig signatures @@ -460,13 +460,6 @@ impl AuthorityMetrics { registry, ) .unwrap(), - execute_certificate_with_effects_latency: register_histogram_with_registry!( - "authority_state_execute_certificate_with_effects_latency", - "Latency of executing certificates with effects, including waiting for inputs", - LATENCY_SEC_BUCKETS.to_vec(), - registry, - ) - .unwrap(), internal_execution_latency: register_histogram_with_registry!( "authority_state_internal_execution_latency", "Latency of actual certificate executions", @@ -737,12 +730,6 @@ impl AuthorityMetrics { ).unwrap(), limits_metrics: Arc::new(LimitsMetrics::new(registry)), bytecode_verifier_metrics: Arc::new(BytecodeVerifierMetrics::new(registry)), - authenticator_state_update_failed: register_int_counter_with_registry!( - "authenticator_state_update_failed", - "Number of failed authenticator state updates", - registry, - ) - .unwrap(), zklogin_sig_count: register_int_counter_with_registry!( "zklogin_sig_count", "Count of zkLogin signatures", @@ -1122,81 +1109,6 @@ impl AuthorityState { .inc(); } - /// Executes a transaction that's known to have correct effects. - /// For such transaction, we don't have to wait for consensus to set shared object - /// locks because we already know the shared object versions based on the effects. - /// This function can be called by a fullnode only. - // TODO: This function is no longer needed. Remove it and cleanup all the - // related functions. - #[instrument(level = "trace", skip_all)] - pub async fn fullnode_execute_certificate_with_effects( - &self, - transaction: &VerifiedExecutableTransaction, - // NOTE: the caller of this must promise to wait until it - // knows for sure this tx is finalized, namely, it has seen a - // CertifiedTransactionEffects or at least f+1 identifical effects - // digests matching this TransactionEffectsEnvelope, before calling - // this function, in order to prevent a byzantine validator from - // giving us incorrect effects. - effects: &VerifiedCertifiedTransactionEffects, - epoch_store: &Arc, - ) -> SuiResult { - assert!(self.is_fullnode(epoch_store)); - // NOTE: the fullnode can change epoch during local execution. It should not cause - // data inconsistency, but can be problematic for certain tests. - // The check below mitigates the issue, but it is not a fundamental solution to - // avoid race between local execution and reconfiguration. - if self.epoch_store.load().epoch() != epoch_store.epoch() { - return Err(SuiError::EpochEnded(epoch_store.epoch())); - } - let _metrics_guard = self - .metrics - .execute_certificate_with_effects_latency - .start_timer(); - let digest = *transaction.digest(); - debug!("execute_certificate_with_effects"); - fp_ensure!( - *effects.data().transaction_digest() == digest, - SuiError::ErrorWhileProcessingCertificate { - err: "effects/tx digest mismatch".to_string() - } - ); - - if transaction.contains_shared_object() { - epoch_store - .acquire_shared_locks_from_effects( - transaction, - effects.data(), - self.get_object_cache_reader().as_ref(), - ) - .await?; - } - - let expected_effects_digest = effects.digest(); - - self.transaction_manager - .enqueue(vec![transaction.clone()], epoch_store); - - let observed_effects = self - .get_transaction_cache_reader() - .notify_read_executed_effects(&[digest]) - .instrument(tracing::debug_span!( - "notify_read_effects_in_execute_certificate_with_effects" - )) - .await? - .pop() - .expect("notify_read_effects should return exactly 1 element"); - - let observed_effects_digest = observed_effects.digest(); - if &observed_effects_digest != expected_effects_digest { - panic!( - "Locally executed effects do not match canonical effects! expected_effects_digest={:?} observed_effects_digest={:?} expected_effects={:?} observed_effects={:?} input_objects={:?}", - expected_effects_digest, observed_effects_digest, effects.data(), observed_effects, transaction.data().transaction_data().input_objects() - ); - } - Ok(()) - } - /// Executes a certificate for its effects. #[instrument(level = "trace", skip_all)] pub async fn execute_certificate( @@ -1225,7 +1137,13 @@ impl AuthorityState { self.enqueue_certificates_for_execution(vec![certificate.clone()], epoch_store); } - self.notify_read_effects(*certificate.digest()).await + // tx could be reverted when epoch ends, so we must be careful not to return a result + // here after the epoch ends. + epoch_store + .within_alive_epoch(self.notify_read_effects(*certificate.digest())) + .await + .map_err(|_| SuiError::EpochEnded(epoch_store.epoch())) + .and_then(|r| r) } /// Awaits the effects of executing a user transaction. @@ -1234,12 +1152,20 @@ impl AuthorityState { pub async fn await_transaction_effects( &self, digest: TransactionDigest, + epoch_store: &Arc, ) -> SuiResult { let _metrics_guard = self.metrics.await_transaction_latency.start_timer(); debug!("await_transaction"); // TODO(fastpath): Add handling for transactions rejected by Mysticeti fast path. - self.notify_read_effects(digest).await + // TODO(fastpath): Can an MFP transaction be reverted after epoch ends? If so, + // same warning as above applies: We must be careful not to return a result + // here after the epoch ends. + epoch_store + .within_alive_epoch(self.notify_read_effects(digest)) + .await + .map_err(|_| SuiError::EpochEnded(epoch_store.epoch())) + .and_then(|r| r) } /// Internal logic to execute a certificate. @@ -1265,7 +1191,22 @@ impl AuthorityState { debug!("execute_certificate_internal"); let tx_digest = certificate.digest(); - let input_objects = self.read_objects_for_execution(certificate, epoch_store)?; + + // prevent concurrent executions of the same tx. + let tx_guard = epoch_store.acquire_tx_guard(certificate).await?; + + // The cert could have been processed by a concurrent attempt of the same cert, so check if + // the effects have already been written. + if let Some(effects) = self + .get_transaction_cache_reader() + .get_executed_effects(tx_digest)? + { + tx_guard.release(); + return Ok((effects, None)); + } + + let input_objects = + self.read_objects_for_execution(tx_guard.as_lock_guard(), certificate, epoch_store)?; if expected_effects_digest.is_none() { // We could be re-executing a previously executed but uncommitted transaction, perhaps after @@ -1275,12 +1216,6 @@ impl AuthorityState { expected_effects_digest = epoch_store.get_signed_effects_digest(tx_digest)?; } - // This acquires a lock on the tx digest to prevent multiple concurrent executions of the - // same tx. While we don't need this for safety (tx sequencing is ultimately atomic), it is - // very common to receive the same tx multiple times simultaneously due to gossip, so we - // may as well hold the lock and save the cpu time for other requests. - let tx_guard = epoch_store.acquire_tx_guard(certificate).await?; - self.process_certificate( tx_guard, certificate, @@ -1294,6 +1229,7 @@ impl AuthorityState { pub fn read_objects_for_execution( &self, + tx_lock: &CertLockGuard, certificate: &VerifiedExecutableTransaction, epoch_store: &Arc, ) -> SuiResult { @@ -1306,6 +1242,7 @@ impl AuthorityState { self.input_loader.read_objects_for_execution( epoch_store.as_ref(), &certificate.key(), + tx_lock, input_objects, epoch_store.epoch(), ) @@ -1395,15 +1332,6 @@ impl AuthorityState { } }); - // The cert could have been processed by a concurrent attempt of the same cert, so check if - // the effects have already been written. - if let Some(effects) = self - .get_transaction_cache_reader() - .get_executed_effects(&digest)? - { - tx_guard.release(); - return Ok((effects, None)); - } let execution_guard = self .execution_lock_for_executable_transaction(certificate) .await; @@ -1503,10 +1431,8 @@ impl AuthorityState { certificate.data().transaction_data().kind() { if let Some(err) = &execution_error_opt { - error!("Authenticator state update failed: {err}"); - self.metrics.authenticator_state_update_failed.inc(); + debug_fatal!("Authenticator state update failed: {:?}", err); } - debug_assert!(execution_error_opt.is_none()); epoch_store.update_authenticator_state(auth_state); // double check that the signature verifier always matches the authenticator state @@ -3224,7 +3150,16 @@ impl AuthorityState { ); self.committee_store.insert_new_committee(&new_committee)?; + + // Wait until no transactions are being executed. let mut execution_lock = self.execution_lock_for_reconfiguration().await; + + // Terminate all epoch-specific tasks (those started with within_alive_epoch). + cur_epoch_store.epoch_terminated().await; + + // Safe to being reconfiguration now. No transactions are being executed, + // and no epoch-specific tasks are running. + // TODO: revert_uncommitted_epoch_transactions will soon be unnecessary - // clear_state_end_of_epoch() can simply drop all uncommitted transactions self.revert_uncommitted_epoch_transactions(cur_epoch_store) @@ -5119,7 +5054,7 @@ impl AuthorityState { ); fail_point_async!("change_epoch_tx_delay"); - let _tx_lock = epoch_store.acquire_tx_lock(tx_digest).await; + let tx_lock = epoch_store.acquire_tx_lock(tx_digest).await; // The tx could have been executed by state sync already - if so simply return an error. // The checkpoint builder will shortly be terminated by reconfiguration anyway. @@ -5147,7 +5082,8 @@ impl AuthorityState { ) .await?; - let input_objects = self.read_objects_for_execution(&executable_tx, epoch_store)?; + let input_objects = + self.read_objects_for_execution(&tx_lock, &executable_tx, epoch_store)?; let (temporary_store, effects, _execution_error_opt) = self.prepare_certificate(&execution_guard, &executable_tx, input_objects, epoch_store)?; @@ -5238,7 +5174,6 @@ impl AuthorityState { cur_epoch_store.get_chain_identifier(), ); self.epoch_store.store(new_epoch_store.clone()); - cur_epoch_store.epoch_terminated().await; Ok(new_epoch_store) } diff --git a/crates/sui-core/src/authority/authority_per_epoch_store.rs b/crates/sui-core/src/authority/authority_per_epoch_store.rs index 70c1e98bbb2e1..b49ae87c33881 100644 --- a/crates/sui-core/src/authority/authority_per_epoch_store.rs +++ b/crates/sui-core/src/authority/authority_per_epoch_store.rs @@ -26,7 +26,9 @@ use sui_types::base_types::{AuthorityName, EpochId, ObjectID, SequenceNumber, Tr use sui_types::base_types::{ConciseableName, ObjectRef}; use sui_types::committee::Committee; use sui_types::committee::CommitteeTrait; -use sui_types::crypto::{AuthoritySignInfo, AuthorityStrongQuorumSignInfo, RandomnessRound}; +use sui_types::crypto::{ + AuthorityPublicKeyBytes, AuthoritySignInfo, AuthorityStrongQuorumSignInfo, RandomnessRound, +}; use sui_types::digests::{ChainIdentifier, TransactionEffectsDigest}; use sui_types::error::{SuiError, SuiResult}; use sui_types::signature::GenericSignature; @@ -48,7 +50,9 @@ use typed_store::{ use super::authority_store_tables::ENV_VAR_LOCKS_BLOCK_CACHE_SIZE; use super::epoch_start_configuration::EpochStartConfigTrait; -use super::shared_object_congestion_tracker::SharedObjectCongestionTracker; +use super::shared_object_congestion_tracker::{ + CongestionPerObjectDebt, SharedObjectCongestionTracker, +}; use super::transaction_deferral::{transaction_deferral_within_limit, DeferralKey, DeferralReason}; use crate::authority::epoch_start_configuration::{EpochFlag, EpochStartConfiguration}; use crate::authority::AuthorityMetrics; @@ -129,6 +133,16 @@ pub struct CertTxGuard(#[allow(unused)] CertLockGuard); impl CertTxGuard { pub fn release(self) {} pub fn commit_tx(self) {} + pub fn as_lock_guard(&self) -> &CertLockGuard { + &self.0 + } +} + +impl CertLockGuard { + pub fn dummy_for_tests() -> Self { + let lock = Arc::new(tokio::sync::Mutex::new(())); + Self(lock.try_lock_owned().unwrap()) + } } type JwkAggregator = GenericMultiStakeAggregator<(JwkId, JWK), true>; @@ -596,6 +610,10 @@ pub struct AuthorityEpochTables { pub(crate) randomness_highest_completed_round: DBMap, /// Holds the timestamp of the most recently generated round of randomness. pub(crate) randomness_last_round_timestamp: DBMap, + + /// Accumulated per-object debts for congestion control. + pub(crate) congestion_control_object_debts: DBMap, + pub(crate) congestion_control_randomness_object_debts: DBMap, } fn signed_transactions_table_default_config() -> DBOptions { @@ -746,6 +764,62 @@ impl AuthorityEpochTables { batch.write()?; Ok(()) } + + pub fn load_initial_object_debts( + &self, + current_round: Round, + for_randomness: bool, + protocol_config: &ProtocolConfig, + transactions: &[VerifiedSequencedConsensusTransaction], + ) -> SuiResult> { + let default_per_commit_budget = protocol_config + .max_accumulated_txn_cost_per_object_in_mysticeti_commit_as_option() + .unwrap_or(0); + let (table, per_commit_budget) = if for_randomness { + ( + &self.congestion_control_randomness_object_debts, + protocol_config + .max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit_as_option() + .unwrap_or(default_per_commit_budget), + ) + } else { + ( + &self.congestion_control_object_debts, + default_per_commit_budget, + ) + }; + + let shared_input_object_ids: BTreeSet<_> = transactions + .iter() + .filter_map(|tx| { + if let SequencedConsensusTransactionKind::External(ConsensusTransaction { + kind: ConsensusTransactionKind::CertifiedTransaction(tx), + .. + }) = &tx.0.transaction + { + Some(tx.shared_input_objects().map(|obj| obj.id)) + } else { + None + } + }) + .flatten() + .collect(); + Ok(table + .multi_get(shared_input_object_ids.iter())? + .into_iter() + .zip(shared_input_object_ids) + .filter_map(|(debt, object_id)| debt.map(|debt| (debt, object_id))) + .map(move |(debt, object_id)| { + let (round, debt) = debt.into_v1(); + ( + object_id, + // Stored debts already account for the budget of the round in which + // they were accumulated. Application of budget from future rounds to + // the debt is handled here. + debt.saturating_sub(per_commit_budget * (current_round - round - 1)), + ) + })) + } } pub(crate) const MUTEX_TABLE_SIZE: usize = 1024; @@ -898,18 +972,6 @@ impl AuthorityPerEpochStore { randomness_reporter: OnceCell::new(), }); - if matches!(chain_identifier.chain(), Chain::Mainnet | Chain::Testnet) { - // If we disable randomness, and if the release in which it was disabled did not have - // the commit that added this comment, we will need to revert this commit. This is - // because the previous release will have been writing to the deprecated - // assigned_shared_object_versions table. - // - // If we disable randomness *after* this commit has been shipped to all networks, then - // we can simply remove this assert, as we will no longer switch back and forth between - // the two tables. - assert!(s.randomness_state_enabled()); - } - s.update_buffer_stake_metric(); s } @@ -1333,23 +1395,29 @@ impl AuthorityPerEpochStore { &self, key: &TransactionKey, objects: &[InputObjectKind], - ) -> BTreeSet { - let mut shared_locks = HashMap::::new(); + ) -> SuiResult> { + let shared_locks = + once_cell::unsync::OnceCell::>>::new(); objects .iter() .map(|kind| { - match kind { + Ok(match kind { InputObjectKind::SharedMoveObject { id, .. } => { - if shared_locks.is_empty() { - shared_locks = self - .get_shared_locks(key) - .expect("Read from storage should not fail!") - .into_iter() - .collect(); - } - // If we can't find the locked version, it means - // 1. either we have a bug that skips shared object version assignment - // 2. or we have some DB corruption + let shared_locks = shared_locks + .get_or_init(|| { + self.get_shared_locks(key) + .expect("reading shared locks should not fail") + .map(|locks| locks.into_iter().collect()) + }) + .as_ref() + // Shared version assignments could have been deleted if the tx just + // finished executing concurrently. + .ok_or(SuiError::GenericAuthorityError { + error: "no shared locks".to_string(), + })?; + + // If we found locks, but they are missing the assignment for this object, + // it indicates a serious inconsistency! let Some(version) = shared_locks.get(id) else { panic!( "Shared object locks should have been set. key: {key:?}, obj \ @@ -1366,7 +1434,7 @@ impl AuthorityPerEpochStore { id: objref.0, version: objref.1, }, - } + }) }) .collect() } @@ -1794,11 +1862,6 @@ impl AuthorityPerEpochStore { .collect::, _>>()?) } - fn get_max_accumulated_txn_cost_per_object_in_commit(&self) -> Option { - self.protocol_config() - .max_accumulated_txn_cost_per_object_in_mysticeti_commit_as_option() - } - fn should_defer( &self, cert: &VerifiedExecutableTransaction, @@ -1825,25 +1888,18 @@ impl AuthorityPerEpochStore { )); } - if let Some(max_accumulated_txn_cost_per_object_in_commit) = - self.get_max_accumulated_txn_cost_per_object_in_commit() + // Defer transaction if it uses shared objects that are congested. + if let Some((deferral_key, congested_objects)) = shared_object_congestion_tracker + .should_defer_due_to_object_congestion( + cert, + previously_deferred_tx_digests, + commit_round, + ) { - // Defer transaction if it uses shared objects that are congested. - if let Some((deferral_key, congested_objects)) = shared_object_congestion_tracker - .should_defer_due_to_object_congestion( - cert, - max_accumulated_txn_cost_per_object_in_commit, - previously_deferred_tx_digests, - commit_round, - ) - { - Some(( - deferral_key, - DeferralReason::SharedObjectCongestion(congested_objects), - )) - } else { - None - } + Some(( + deferral_key, + DeferralReason::SharedObjectCongestion(congested_objects), + )) } else { None } @@ -2665,7 +2721,7 @@ impl AuthorityPerEpochStore { } } - let mut output = ConsensusCommitOutput::new(); + let mut output = ConsensusCommitOutput::new(consensus_commit_info.round); // Load transactions deferred from previous commits. let deferred_txs: Vec<(DeferralKey, Vec)> = self @@ -2792,7 +2848,6 @@ impl AuthorityPerEpochStore { }) .collect(); - // We always order transactions using randomness last. PostConsensusTxReorder::reorder( &mut sequenced_transactions, self.protocol_config.consensus_transaction_ordering(), @@ -2801,6 +2856,27 @@ impl AuthorityPerEpochStore { &mut sequenced_randomness_transactions, self.protocol_config.consensus_transaction_ordering(), ); + + // We track transaction execution cost separately for regular transactions and transactions using randomness, since + // they will be in different PendingCheckpoints. + let tables = self.tables()?; + let shared_object_congestion_tracker = SharedObjectCongestionTracker::from_protocol_config( + &tables, + self.protocol_config(), + consensus_commit_info.round, + false, + &sequenced_transactions, + )?; + let shared_object_using_randomness_congestion_tracker = + SharedObjectCongestionTracker::from_protocol_config( + &tables, + self.protocol_config(), + consensus_commit_info.round, + true, + &sequenced_randomness_transactions, + )?; + + // We always order transactions using randomness last. let consensus_transactions: Vec<_> = system_transactions .into_iter() .chain(sequenced_transactions) @@ -2823,6 +2899,8 @@ impl AuthorityPerEpochStore { consensus_commit_info, &mut roots, &mut randomness_roots, + shared_object_congestion_tracker, + shared_object_using_randomness_congestion_tracker, previously_deferred_tx_digests, randomness_manager.as_deref_mut(), dkg_failed, @@ -2845,7 +2923,6 @@ impl AuthorityPerEpochStore { }; let make_checkpoint = should_accept_tx || final_round; if make_checkpoint { - // Generate pending checkpoint for regular user tx. let checkpoint_height = if self.randomness_state_enabled() { consensus_commit_info.round * 2 } else { @@ -2866,29 +2943,34 @@ impl AuthorityPerEpochStore { } } checkpoint_roots.extend(roots.into_iter()); + + if let Some(randomness_round) = randomness_round { + randomness_roots.insert(TransactionKey::RandomnessRound( + self.epoch(), + randomness_round, + )); + } + + // Determine whether to write pending checkpoint for user tx with randomness. + // - If randomness is not generated for this commit, we will skip the + // checkpoint with the associated height. Therefore checkpoint heights may + // not be contiguous. + // - Exception: if DKG fails, we always need to write out a PendingCheckpoint + // for randomness tx that are canceled. + let should_write_random_checkpoint = + randomness_round.is_some() || (dkg_failed && !randomness_roots.is_empty()); + let pending_checkpoint = PendingCheckpointV2::V2(PendingCheckpointV2Contents { roots: checkpoint_roots, details: PendingCheckpointInfo { timestamp_ms: consensus_commit_info.timestamp, - last_of_epoch: final_round && randomness_round.is_none(), + last_of_epoch: final_round && !should_write_random_checkpoint, checkpoint_height, }, }); self.write_pending_checkpoint(&mut output, &pending_checkpoint)?; - // Generate pending checkpoint for user tx with randomness. - // - If randomness is not generated for this commit, we will skip the - // checkpoint with the associated height. Therefore checkpoint heights may - // not be contiguous. - // - Exception: if DKG fails, we always need to write out a PendingCheckpoint - // for randomness tx that are canceled. - if let Some(randomness_round) = randomness_round { - randomness_roots.insert(TransactionKey::RandomnessRound( - self.epoch(), - randomness_round, - )); - } - if randomness_round.is_some() || (dkg_failed && !randomness_roots.is_empty()) { + if should_write_random_checkpoint { let pending_checkpoint = PendingCheckpointV2::V2(PendingCheckpointV2Contents { roots: randomness_roots.into_iter().collect(), details: PendingCheckpointInfo { @@ -3083,7 +3165,7 @@ impl AuthorityPerEpochStore { cache_reader: &dyn ObjectCacheRead, transactions: &[VerifiedExecutableTransaction], ) -> SuiResult { - let mut output = ConsensusCommitOutput::new(); + let mut output = ConsensusCommitOutput::new(0); self.process_consensus_transaction_shared_object_versions( cache_reader, transactions, @@ -3128,6 +3210,8 @@ impl AuthorityPerEpochStore { consensus_commit_info: &ConsensusCommitInfo, roots: &mut BTreeSet, randomness_roots: &mut BTreeSet, + mut shared_object_congestion_tracker: SharedObjectCongestionTracker, + mut shared_object_using_randomness_congestion_tracker: SharedObjectCongestionTracker, previously_deferred_tx_digests: HashMap, mut randomness_manager: Option<&mut RandomnessManager>, dkg_failed: bool, @@ -3140,6 +3224,8 @@ impl AuthorityPerEpochStore { bool, // true if final round Option, // consensus commit prologue root )> { + let _scope = monitored_scope("ConsensusCommitHandler::process_consensus_transactions"); + if randomness_round.is_some() { assert!(!dkg_failed); // invariant check } @@ -3152,20 +3238,6 @@ impl AuthorityPerEpochStore { let mut cancelled_txns: BTreeMap = BTreeMap::new(); - // We track transaction execution cost separately for regular transactions and transactions using randomness, since - // they will be in different checkpoints. - let mut shared_object_congestion_tracker = SharedObjectCongestionTracker::new( - self.protocol_config().per_object_congestion_control_mode(), - self.protocol_config() - .gas_budget_based_txn_cost_cap_factor_as_option(), - ); - let mut shared_object_using_randomness_congestion_tracker = - SharedObjectCongestionTracker::new( - self.protocol_config().per_object_congestion_control_mode(), - self.protocol_config() - .gas_budget_based_txn_cost_cap_factor_as_option(), - ); - fail_point_arg!( "initial_congestion_tracker", |tracker: SharedObjectCongestionTracker| { @@ -3278,6 +3350,13 @@ impl AuthorityPerEpochStore { .with_label_values(&["randomness_commit"]) .set(shared_object_using_randomness_congestion_tracker.max_cost() as i64); + output.set_congestion_control_object_debts( + shared_object_congestion_tracker.accumulated_debts(), + ); + output.set_congestion_control_randomness_object_debts( + shared_object_using_randomness_congestion_tracker.accumulated_debts(), + ); + if randomness_state_updated { if let Some(randomness_manager) = randomness_manager.as_mut() { randomness_manager @@ -3434,7 +3513,8 @@ impl AuthorityPerEpochStore { shared_object_congestion_tracker: &mut SharedObjectCongestionTracker, authority_metrics: &Arc, ) -> SuiResult { - let _scope = monitored_scope("HandleConsensusTransaction"); + let _scope = monitored_scope("ConsensusCommitHandler::process_consensus_transaction"); + let VerifiedSequencedConsensusTransaction(SequencedConsensusTransaction { certificate_author_index: _, certificate_author, @@ -3457,107 +3537,21 @@ impl AuthorityPerEpochStore { ); return Ok(ConsensusCertificateResult::Ignored); } - if self.has_sent_end_of_publish(certificate_author)? - && !previously_deferred_tx_digests.contains_key(certificate.digest()) - { - // This can not happen with valid authority - // With some edge cases consensus might sometimes resend previously seen certificate after EndOfPublish - // However this certificate will be filtered out before this line by `consensus_message_processed` call in `verify_consensus_transaction` - // If we see some new certificate here it means authority is byzantine and sent certificate after EndOfPublish (or we have some bug in ConsensusAdapter) - warn!("[Byzantine authority] Authority {:?} sent a new, previously unseen certificate {:?} after it sent EndOfPublish message to consensus", certificate_author.concise(), certificate.digest()); - return Ok(ConsensusCertificateResult::Ignored); - } // Safe because signatures are verified when consensus called into SuiTxValidator::validate_batch. let certificate = VerifiedCertificate::new_unchecked(*certificate.clone()); - let certificate = VerifiedExecutableTransaction::new_from_certificate(certificate); - - debug!( - ?tracking_id, - tx_digest = ?certificate.digest(), - "handle_consensus_transaction UserTransaction", - ); - - if !self - .get_reconfig_state_read_lock_guard() - .should_accept_consensus_certs() - && !previously_deferred_tx_digests.contains_key(certificate.digest()) - { - debug!("Ignoring consensus certificate for transaction {:?} because of end of epoch", - certificate.digest()); - return Ok(ConsensusCertificateResult::Ignored); - } + let transaction = VerifiedExecutableTransaction::new_from_certificate(certificate); - let deferral_info = self.should_defer( - &certificate, + self.process_consensus_user_transaction( + transaction, + certificate_author, commit_round, + tracking_id, + previously_deferred_tx_digests, dkg_failed, generating_randomness, - previously_deferred_tx_digests, shared_object_congestion_tracker, - ); - - if let Some((deferral_key, deferral_reason)) = deferral_info { - debug!( - "Deferring consensus certificate for transaction {:?} until {:?}", - certificate.digest(), - deferral_key - ); - - let deferral_result = match deferral_reason { - DeferralReason::RandomnessNotReady => { - // Always defer transaction due to randomness not ready. - ConsensusCertificateResult::Deferred(deferral_key) - } - DeferralReason::SharedObjectCongestion(congested_objects) => { - authority_metrics - .consensus_handler_congested_transactions - .inc(); - if transaction_deferral_within_limit( - &deferral_key, - self.protocol_config() - .max_deferral_rounds_for_congestion_control(), - ) { - ConsensusCertificateResult::Deferred(deferral_key) - } else { - // Cancel the transaction that has been deferred for too long. - debug!( - "Cancelling consensus certificate for transaction {:?} with deferral key {:?} due to congestion on objects {:?}", - certificate.digest(), - deferral_key, - congested_objects - ); - ConsensusCertificateResult::Cancelled(( - certificate, - CancelConsensusCertificateReason::CongestionOnObjects( - congested_objects, - ), - )) - } - } - }; - return Ok(deferral_result); - } - - if dkg_failed - && self.randomness_state_enabled() - && certificate.transaction_data().uses_randomness() - { - debug!( - "Canceling randomness-using certificate for transaction {:?} because DKG failed", - certificate.digest(), - ); - return Ok(ConsensusCertificateResult::Cancelled(( - certificate, - CancelConsensusCertificateReason::DkgFailed, - ))); - } - - // This certificate will be scheduled. Update object execution cost. - if certificate.contains_shared_object() { - shared_object_congestion_tracker.bump_object_execution_cost(&certificate); - } - - Ok(ConsensusCertificateResult::SuiTransaction(certificate)) + authority_metrics, + ) } SequencedConsensusTransactionKind::External(ConsensusTransaction { kind: ConsensusTransactionKind::CheckpointSignature(info), @@ -3720,11 +3714,30 @@ impl AuthorityPerEpochStore { } SequencedConsensusTransactionKind::External(ConsensusTransaction { - kind: ConsensusTransactionKind::UserTransaction(_tx), + kind: ConsensusTransactionKind::UserTransaction(tx), .. }) => { - // TODO(fastpath): implement handling of user transactions from consensus commits. - Ok(ConsensusCertificateResult::Ignored) + // Ignore consensus certified user transaction if Mysticeti fastpath is not enabled. + if !self.protocol_config().mysticeti_fastpath() { + return Ok(ConsensusCertificateResult::Ignored); + } + // Safe because transactions are certified by consensus. + let tx = VerifiedTransaction::new_unchecked(*tx.clone()); + // TODO(fastpath): accept position in consensus, after plumbing consensus round, authority index, and transaction index here. + let transaction = + VerifiedExecutableTransaction::new_from_consensus(tx, self.epoch()); + + self.process_consensus_user_transaction( + transaction, + certificate_author, + commit_round, + tracking_id, + previously_deferred_tx_digests, + dkg_failed, + generating_randomness, + shared_object_congestion_tracker, + authority_metrics, + ) } SequencedConsensusTransactionKind::System(system_transaction) => { Ok(self.process_consensus_system_transaction(system_transaction)) @@ -3749,6 +3762,122 @@ impl AuthorityPerEpochStore { ConsensusCertificateResult::SuiTransaction(system_transaction.clone()) } + fn process_consensus_user_transaction( + &self, + transaction: VerifiedExecutableTransaction, + block_author: &AuthorityPublicKeyBytes, + commit_round: Round, + tracking_id: u64, + previously_deferred_tx_digests: &HashMap, + dkg_failed: bool, + generating_randomness: bool, + shared_object_congestion_tracker: &mut SharedObjectCongestionTracker, + authority_metrics: &Arc, + ) -> SuiResult { + let _scope = monitored_scope("ConsensusCommitHandler::process_consensus_user_transaction"); + + if self.has_sent_end_of_publish(block_author)? + && !previously_deferred_tx_digests.contains_key(transaction.digest()) + { + // This can not happen with valid authority + // With some edge cases consensus might sometimes resend previously seen certificate after EndOfPublish + // However this certificate will be filtered out before this line by `consensus_message_processed` call in `verify_consensus_transaction` + // If we see some new certificate here it means authority is byzantine and sent certificate after EndOfPublish (or we have some bug in ConsensusAdapter) + warn!("[Byzantine authority] Authority {:?} sent a new, previously unseen transaction {:?} after it sent EndOfPublish message to consensus", block_author.concise(), transaction.digest()); + return Ok(ConsensusCertificateResult::Ignored); + } + + debug!( + ?tracking_id, + tx_digest = ?transaction.digest(), + "handle_consensus_transaction UserTransaction", + ); + + if !self + .get_reconfig_state_read_lock_guard() + .should_accept_consensus_certs() + && !previously_deferred_tx_digests.contains_key(transaction.digest()) + { + debug!( + "Ignoring consensus transaction {:?} because of end of epoch", + transaction.digest() + ); + return Ok(ConsensusCertificateResult::Ignored); + } + + let deferral_info = self.should_defer( + &transaction, + commit_round, + dkg_failed, + generating_randomness, + previously_deferred_tx_digests, + shared_object_congestion_tracker, + ); + + if let Some((deferral_key, deferral_reason)) = deferral_info { + debug!( + "Deferring consensus certificate for transaction {:?} until {:?}", + transaction.digest(), + deferral_key + ); + + let deferral_result = match deferral_reason { + DeferralReason::RandomnessNotReady => { + // Always defer transaction due to randomness not ready. + ConsensusCertificateResult::Deferred(deferral_key) + } + DeferralReason::SharedObjectCongestion(congested_objects) => { + authority_metrics + .consensus_handler_congested_transactions + .inc(); + if transaction_deferral_within_limit( + &deferral_key, + self.protocol_config() + .max_deferral_rounds_for_congestion_control(), + ) { + ConsensusCertificateResult::Deferred(deferral_key) + } else { + // Cancel the transaction that has been deferred for too long. + debug!( + "Cancelling consensus transaction {:?} with deferral key {:?} due to congestion on objects {:?}", + transaction.digest(), + deferral_key, + congested_objects + ); + ConsensusCertificateResult::Cancelled(( + transaction, + CancelConsensusCertificateReason::CongestionOnObjects( + congested_objects, + ), + )) + } + } + }; + return Ok(deferral_result); + } + + if dkg_failed + && self.randomness_state_enabled() + && transaction.transaction_data().uses_randomness() + { + debug!( + "Canceling randomness-using transaction {:?} because DKG failed", + transaction.digest(), + ); + return Ok(ConsensusCertificateResult::Cancelled(( + transaction, + CancelConsensusCertificateReason::DkgFailed, + ))); + } + + // This certificate will be scheduled. Update object execution cost. + if transaction.contains_shared_object() { + shared_object_congestion_tracker.bump_object_execution_cost(&transaction); + } + + Ok(ConsensusCertificateResult::SuiTransaction(transaction)) + } + pub(crate) fn write_pending_checkpoint( &self, output: &mut ConsensusCommitOutput, @@ -4062,6 +4191,7 @@ impl AuthorityPerEpochStore { #[derive(Default)] pub(crate) struct ConsensusCommitOutput { // Consensus and reconfig state + consensus_round: Round, consensus_messages_processed: BTreeSet, end_of_publish: BTreeSet, reconfig_state: Option, @@ -4090,11 +4220,18 @@ pub(crate) struct ConsensusCommitOutput { // jwk state pending_jwks: BTreeSet<(AuthorityName, JwkId, JWK)>, active_jwks: BTreeSet<(u64, (JwkId, JWK))>, + + // congestion control state + congestion_control_object_debts: Vec<(ObjectID, u64)>, + congestion_control_randomness_object_debts: Vec<(ObjectID, u64)>, } impl ConsensusCommitOutput { - pub fn new() -> Self { - Default::default() + pub fn new(consensus_round: Round) -> Self { + Self { + consensus_round, + ..Default::default() + } } fn insert_end_of_publish(&mut self, authority: AuthorityName) { @@ -4189,6 +4326,17 @@ impl ConsensusCommitOutput { self.active_jwks.insert((round, key)); } + fn set_congestion_control_object_debts(&mut self, object_debts: Vec<(ObjectID, u64)>) { + self.congestion_control_object_debts = object_debts; + } + + fn set_congestion_control_randomness_object_debts( + &mut self, + object_debts: Vec<(ObjectID, u64)>, + ) { + self.congestion_control_randomness_object_debts = object_debts; + } + pub fn write_to_batch( self, epoch_store: &AuthorityPerEpochStore, @@ -4285,6 +4433,29 @@ impl ConsensusCommitOutput { self.active_jwks.into_iter().map(|j| (j, ())), )?; + batch.insert_batch( + &tables.congestion_control_object_debts, + self.congestion_control_object_debts + .into_iter() + .map(|(object_id, debt)| { + ( + object_id, + CongestionPerObjectDebt::new(self.consensus_round, debt), + ) + }), + )?; + batch.insert_batch( + &tables.congestion_control_randomness_object_debts, + self.congestion_control_randomness_object_debts + .into_iter() + .map(|(object_id, debt)| { + ( + object_id, + CongestionPerObjectDebt::new(self.consensus_round, debt), + ) + }), + )?; + Ok(()) } } @@ -4293,12 +4464,8 @@ impl GetSharedLocks for AuthorityPerEpochStore { fn get_shared_locks( &self, key: &TransactionKey, - ) -> Result, SuiError> { - Ok(self - .tables()? - .assigned_shared_object_versions_v2 - .get(key)? - .unwrap_or_default()) + ) -> SuiResult>> { + Ok(self.tables()?.assigned_shared_object_versions_v2.get(key)?) } } diff --git a/crates/sui-core/src/authority/shared_object_congestion_tracker.rs b/crates/sui-core/src/authority/shared_object_congestion_tracker.rs index 237d400114a43..82250fe4e37e9 100644 --- a/crates/sui-core/src/authority/shared_object_congestion_tracker.rs +++ b/crates/sui-core/src/authority/shared_object_congestion_tracker.rs @@ -1,11 +1,15 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use super::authority_per_epoch_store::AuthorityEpochTables; use crate::authority::transaction_deferral::DeferralKey; +use crate::consensus_handler::VerifiedSequencedConsensusTransaction; use narwhal_types::Round; +use serde::{Deserialize, Serialize}; use std::collections::HashMap; -use sui_protocol_config::PerObjectCongestionControlMode; +use sui_protocol_config::{PerObjectCongestionControlMode, ProtocolConfig}; use sui_types::base_types::{ObjectID, TransactionDigest}; +use sui_types::error::SuiResult; use sui_types::executable_transaction::VerifiedExecutableTransaction; use sui_types::transaction::{Argument, SharedInputObject, TransactionDataAPI}; @@ -25,35 +29,71 @@ use sui_types::transaction::{Argument, SharedInputObject, TransactionDataAPI}; pub struct SharedObjectCongestionTracker { object_execution_cost: HashMap, mode: PerObjectCongestionControlMode, + max_accumulated_txn_cost_per_object_in_commit: u64, gas_budget_based_txn_cost_cap_factor: Option, + gas_budget_based_txn_cost_absolute_cap: Option, + max_txn_cost_overage_per_object_in_commit: u64, } impl SharedObjectCongestionTracker { pub fn new( + initial_object_debts: impl IntoIterator, mode: PerObjectCongestionControlMode, + max_accumulated_txn_cost_per_object_in_commit: Option, gas_budget_based_txn_cost_cap_factor: Option, + gas_budget_based_txn_cost_absolute_cap_commit_count: Option, + max_txn_cost_overage_per_object_in_commit: u64, ) -> Self { + let max_accumulated_txn_cost_per_object_in_commit = + if mode == PerObjectCongestionControlMode::None { + 0 + } else { + max_accumulated_txn_cost_per_object_in_commit.expect( + "max_accumulated_txn_cost_per_object_in_commit must be set if mode is not None", + ) + }; Self { - object_execution_cost: HashMap::new(), + object_execution_cost: initial_object_debts.into_iter().collect(), mode, + max_accumulated_txn_cost_per_object_in_commit, gas_budget_based_txn_cost_cap_factor, + gas_budget_based_txn_cost_absolute_cap: + gas_budget_based_txn_cost_absolute_cap_commit_count + .map(|m| m * max_accumulated_txn_cost_per_object_in_commit), + max_txn_cost_overage_per_object_in_commit, } } - pub fn new_with_initial_value_for_test( - init_values: &[(ObjectID, u64)], - mode: PerObjectCongestionControlMode, - gas_budget_based_txn_cost_cap_factor: Option, - ) -> Self { - let mut object_execution_cost = HashMap::new(); - for (object_id, total_cost) in init_values { - object_execution_cost.insert(*object_id, *total_cost); - } - Self { - object_execution_cost, - mode, - gas_budget_based_txn_cost_cap_factor, - } + pub fn from_protocol_config( + tables: &AuthorityEpochTables, + protocol_config: &ProtocolConfig, + round: Round, + for_randomness: bool, + transactions: &[VerifiedSequencedConsensusTransaction], + ) -> SuiResult { + let max_accumulated_txn_cost_per_object_in_commit = + protocol_config.max_accumulated_txn_cost_per_object_in_mysticeti_commit_as_option(); + Ok(Self::new( + tables.load_initial_object_debts( + round, + for_randomness, + protocol_config, + transactions, + )?, + protocol_config.per_object_congestion_control_mode(), + if for_randomness { + protocol_config + .max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit_as_option() + .or(max_accumulated_txn_cost_per_object_in_commit) + } else { + max_accumulated_txn_cost_per_object_in_commit + }, + protocol_config.gas_budget_based_txn_cost_cap_factor_as_option(), + protocol_config.gas_budget_based_txn_cost_absolute_cap_commit_count_as_option(), + protocol_config + .max_txn_cost_overage_per_object_in_commit_as_option() + .unwrap_or(0), + )) } // Given a list of shared input objects, returns the starting cost of a transaction that operates on @@ -84,7 +124,6 @@ impl SharedObjectCongestionTracker { pub fn should_defer_due_to_object_congestion( &self, cert: &VerifiedExecutableTransaction, - max_accumulated_txn_cost_per_object_in_commit: u64, previously_deferred_tx_digests: &HashMap, commit_round: Round, ) -> Option<(DeferralKey, Vec)> { @@ -97,7 +136,18 @@ impl SharedObjectCongestionTracker { } let start_cost = self.compute_tx_start_at_cost(&shared_input_objects); - if start_cost + tx_cost <= max_accumulated_txn_cost_per_object_in_commit { + // Allow tx if it's within budget. + if start_cost + tx_cost <= self.max_accumulated_txn_cost_per_object_in_commit { + return None; + } + + // Allow over-budget tx if it's not above the overage limit. + if start_cost <= self.max_accumulated_txn_cost_per_object_in_commit + && start_cost + tx_cost + <= self + .max_accumulated_txn_cost_per_object_in_commit + .saturating_add(self.max_txn_cost_overage_per_object_in_commit) + { return None; } @@ -156,6 +206,28 @@ impl SharedObjectCongestionTracker { } } + // Returns accumulated debts for objects whose budgets have been exceeded over the course + // of the commit. Consumes the tracker object, since this should only be called once after + // all tx have been processed. + pub fn accumulated_debts(self) -> Vec<(ObjectID, u64)> { + if self.max_txn_cost_overage_per_object_in_commit == 0 { + return vec![]; // early-exit if overage is not allowed + } + + self.object_execution_cost + .into_iter() + .filter_map(|(obj_id, cost)| { + let remaining_cost = + cost.saturating_sub(self.max_accumulated_txn_cost_per_object_in_commit); + if remaining_cost > 0 { + Some((obj_id, remaining_cost)) + } else { + None + } + }) + .collect() + } + // Returns the maximum cost of all objects. pub fn max_cost(&self) -> u64 { self.object_execution_cost @@ -178,10 +250,34 @@ impl SharedObjectCongestionTracker { } } } - (number_of_move_call + number_of_move_input) as u64 + let cap = (number_of_move_call + number_of_move_input) as u64 * self .gas_budget_based_txn_cost_cap_factor - .expect("cap factor must be set if TotalGasBudgetWithCap mode is used.") + .expect("cap factor must be set if TotalGasBudgetWithCap mode is used."); + + // Apply absolute cap if configured. + std::cmp::min( + cap, + self.gas_budget_based_txn_cost_absolute_cap + .unwrap_or(u64::MAX), + ) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum CongestionPerObjectDebt { + V1(Round, u64), +} + +impl CongestionPerObjectDebt { + pub fn new(round: Round, debt: u64) -> Self { + Self::V1(round, debt) + } + + pub fn into_v1(self) -> (Round, u64) { + match self { + Self::V1(round, debt) => (round, debt), + } } } @@ -214,12 +310,14 @@ mod object_cost_tests { let object_id_1 = ObjectID::random(); let object_id_2 = ObjectID::random(); - let shared_object_congestion_tracker = - SharedObjectCongestionTracker::new_with_initial_value_for_test( - &[(object_id_0, 5), (object_id_1, 10)], - PerObjectCongestionControlMode::TotalGasBudget, - None, - ); + let shared_object_congestion_tracker = SharedObjectCongestionTracker::new( + [(object_id_0, 5), (object_id_1, 10)], + PerObjectCongestionControlMode::TotalGasBudget, + Some(0), // not part of this test + None, + None, + 0, + ); let shared_input_objects = construct_shared_input_objects(&[(object_id_0, false)]); assert_eq!( @@ -363,10 +461,13 @@ mod object_cost_tests { // 1 10 // object 0: | // object 1: | - SharedObjectCongestionTracker::new_with_initial_value_for_test( - &[(shared_obj_0, 10), (shared_obj_1, 1)], + SharedObjectCongestionTracker::new( + [(shared_obj_0, 10), (shared_obj_1, 1)], mode, + Some(max_accumulated_txn_cost_per_object_in_commit), + None, None, + 0, ) } PerObjectCongestionControlMode::TotalTxCount => { @@ -374,10 +475,13 @@ mod object_cost_tests { // 1 2 // object 0: | // object 1: | - SharedObjectCongestionTracker::new_with_initial_value_for_test( - &[(shared_obj_0, 2), (shared_obj_1, 1)], + SharedObjectCongestionTracker::new( + [(shared_obj_0, 2), (shared_obj_1, 1)], mode, + Some(max_accumulated_txn_cost_per_object_in_commit), + None, None, + 0, ) } PerObjectCongestionControlMode::TotalGasBudgetWithCap => { @@ -385,10 +489,13 @@ mod object_cost_tests { // 1 10 // object 0: | // object 1: | - SharedObjectCongestionTracker::new_with_initial_value_for_test( - &[(shared_obj_0, 10), (shared_obj_1, 1)], + SharedObjectCongestionTracker::new( + [(shared_obj_0, 10), (shared_obj_1, 1)], mode, + Some(max_accumulated_txn_cost_per_object_in_commit), Some(45), // Make the cap just less than the gas budget, there are 1 objects in tx. + None, + 0, ) } }; @@ -397,12 +504,7 @@ mod object_cost_tests { for mutable in [true, false].iter() { let tx = build_transaction(&[(shared_obj_0, *mutable)], tx_gas_budget); if let Some((_, congested_objects)) = shared_object_congestion_tracker - .should_defer_due_to_object_congestion( - &tx, - max_accumulated_txn_cost_per_object_in_commit, - &HashMap::new(), - 0, - ) + .should_defer_due_to_object_congestion(&tx, &HashMap::new(), 0) { assert_eq!(congested_objects.len(), 1); assert_eq!(congested_objects[0], shared_obj_0); @@ -417,12 +519,7 @@ mod object_cost_tests { for mutable in [true, false].iter() { let tx = build_transaction(&[(shared_obj_1, *mutable)], tx_gas_budget); assert!(shared_object_congestion_tracker - .should_defer_due_to_object_congestion( - &tx, - max_accumulated_txn_cost_per_object_in_commit, - &HashMap::new(), - 0, - ) + .should_defer_due_to_object_congestion(&tx, &HashMap::new(), 0,) .is_none()); } @@ -434,12 +531,7 @@ mod object_cost_tests { tx_gas_budget, ); if let Some((_, congested_objects)) = shared_object_congestion_tracker - .should_defer_due_to_object_congestion( - &tx, - max_accumulated_txn_cost_per_object_in_commit, - &HashMap::new(), - 0, - ) + .should_defer_due_to_object_congestion(&tx, &HashMap::new(), 0) { assert_eq!(congested_objects.len(), 1); assert_eq!(congested_objects[0], shared_obj_0); @@ -461,9 +553,15 @@ mod object_cost_tests { ) { let shared_obj_0 = ObjectID::random(); let tx = build_transaction(&[(shared_obj_0, true)], 100); - // Make should_defer_due_to_object_congestion always defer transactions. - let max_accumulated_txn_cost_per_object_in_commit = 0; - let shared_object_congestion_tracker = SharedObjectCongestionTracker::new(mode, Some(2)); + + let shared_object_congestion_tracker = SharedObjectCongestionTracker::new( + [], + mode, + Some(0), // Make should_defer_due_to_object_congestion always defer transactions. + Some(2), + None, + 0, + ); // Insert a random pre-existing transaction. let mut previously_deferred_tx_digests = HashMap::new(); @@ -484,7 +582,6 @@ mod object_cost_tests { _, )) = shared_object_congestion_tracker.should_defer_due_to_object_congestion( &tx, - max_accumulated_txn_cost_per_object_in_commit, &previously_deferred_tx_digests, 10, ) { @@ -511,7 +608,6 @@ mod object_cost_tests { _, )) = shared_object_congestion_tracker.should_defer_due_to_object_congestion( &tx, - max_accumulated_txn_cost_per_object_in_commit, &previously_deferred_tx_digests, 10, ) { @@ -539,7 +635,6 @@ mod object_cost_tests { _, )) = shared_object_congestion_tracker.should_defer_due_to_object_congestion( &tx, - max_accumulated_txn_cost_per_object_in_commit, &previously_deferred_tx_digests, 10, ) { @@ -550,6 +645,118 @@ mod object_cost_tests { } } + #[rstest] + fn test_should_defer_allow_overage( + #[values( + PerObjectCongestionControlMode::TotalGasBudget, + PerObjectCongestionControlMode::TotalTxCount, + PerObjectCongestionControlMode::TotalGasBudgetWithCap + )] + mode: PerObjectCongestionControlMode, + ) { + telemetry_subscribers::init_for_testing(); + + // Creates two shared objects and three transactions that operate on these objects. + let shared_obj_0 = ObjectID::random(); + let shared_obj_1 = ObjectID::random(); + + let tx_gas_budget = 100; + + // Set max_accumulated_txn_cost_per_object_in_commit to only allow 1 transaction to go through + // before overage occurs. + let max_accumulated_txn_cost_per_object_in_commit = match mode { + PerObjectCongestionControlMode::None => unreachable!(), + PerObjectCongestionControlMode::TotalGasBudget => tx_gas_budget + 1, + PerObjectCongestionControlMode::TotalTxCount => 2, + PerObjectCongestionControlMode::TotalGasBudgetWithCap => tx_gas_budget - 1, + }; + + let shared_object_congestion_tracker = match mode { + PerObjectCongestionControlMode::None => unreachable!(), + PerObjectCongestionControlMode::TotalGasBudget => { + // Construct object execution cost as following + // 90 102 + // object 0: | + // object 1: | + SharedObjectCongestionTracker::new( + [(shared_obj_0, 102), (shared_obj_1, 90)], + mode, + Some(max_accumulated_txn_cost_per_object_in_commit), + None, + None, + max_accumulated_txn_cost_per_object_in_commit * 10, + ) + } + PerObjectCongestionControlMode::TotalTxCount => { + // Construct object execution cost as following + // 2 3 + // object 0: | + // object 1: | + SharedObjectCongestionTracker::new( + [(shared_obj_0, 3), (shared_obj_1, 2)], + mode, + Some(max_accumulated_txn_cost_per_object_in_commit), + None, + None, + max_accumulated_txn_cost_per_object_in_commit * 10, + ) + } + PerObjectCongestionControlMode::TotalGasBudgetWithCap => { + // Construct object execution cost as following + // 90 100 + // object 0: | + // object 1: | + SharedObjectCongestionTracker::new( + [(shared_obj_0, 100), (shared_obj_1, 90)], + mode, + Some(max_accumulated_txn_cost_per_object_in_commit), + Some(45), // Make the cap just less than the gas budget, there are 1 objects in tx. + None, + max_accumulated_txn_cost_per_object_in_commit * 10, + ) + } + }; + + // Read/write to object 0 should be deferred. + for mutable in [true, false].iter() { + let tx = build_transaction(&[(shared_obj_0, *mutable)], tx_gas_budget); + if let Some((_, congested_objects)) = shared_object_congestion_tracker + .should_defer_due_to_object_congestion(&tx, &HashMap::new(), 0) + { + assert_eq!(congested_objects.len(), 1); + assert_eq!(congested_objects[0], shared_obj_0); + } else { + panic!("should defer"); + } + } + + // Read/write to object 1 should go through even though the budget is exceeded. + for mutable in [true, false].iter() { + let tx = build_transaction(&[(shared_obj_1, *mutable)], tx_gas_budget); + assert!(shared_object_congestion_tracker + .should_defer_due_to_object_congestion(&tx, &HashMap::new(), 0,) + .is_none()); + } + + // Transactions touching both objects should be deferred, with object 0 as the congested object. + for mutable_0 in [true, false].iter() { + for mutable_1 in [true, false].iter() { + let tx = build_transaction( + &[(shared_obj_0, *mutable_0), (shared_obj_1, *mutable_1)], + tx_gas_budget, + ); + if let Some((_, congested_objects)) = shared_object_congestion_tracker + .should_defer_due_to_object_congestion(&tx, &HashMap::new(), 0) + { + assert_eq!(congested_objects.len(), 1); + assert_eq!(congested_objects[0], shared_obj_0); + } else { + panic!("should defer"); + } + } + } + } + #[rstest] fn test_bump_object_execution_cost( #[values( @@ -565,12 +772,14 @@ mod object_cost_tests { let cap_factor = Some(1); - let mut shared_object_congestion_tracker = - SharedObjectCongestionTracker::new_with_initial_value_for_test( - &[(object_id_0, 5), (object_id_1, 10)], - mode, - cap_factor, - ); + let mut shared_object_congestion_tracker = SharedObjectCongestionTracker::new( + [(object_id_0, 5), (object_id_1, 10)], + mode, + Some(0), // not part of this test + cap_factor, + None, + 0, + ); assert_eq!(shared_object_congestion_tracker.max_cost(), 10); // Read two objects should not change the object execution cost. @@ -578,10 +787,13 @@ mod object_cost_tests { shared_object_congestion_tracker.bump_object_execution_cost(&cert); assert_eq!( shared_object_congestion_tracker, - SharedObjectCongestionTracker::new_with_initial_value_for_test( - &[(object_id_0, 5), (object_id_1, 10)], + SharedObjectCongestionTracker::new( + [(object_id_0, 5), (object_id_1, 10)], mode, + Some(0), // not part of this test cap_factor, + None, + 0, ) ); assert_eq!(shared_object_congestion_tracker.max_cost(), 10); @@ -597,10 +809,13 @@ mod object_cost_tests { }; assert_eq!( shared_object_congestion_tracker, - SharedObjectCongestionTracker::new_with_initial_value_for_test( - &[(object_id_0, expected_object_0_cost), (object_id_1, 10)], + SharedObjectCongestionTracker::new( + [(object_id_0, expected_object_0_cost), (object_id_1, 10)], mode, + Some(0), // not part of this test cap_factor, + None, + 0, ) ); assert_eq!( @@ -626,14 +841,17 @@ mod object_cost_tests { shared_object_congestion_tracker.bump_object_execution_cost(&cert); assert_eq!( shared_object_congestion_tracker, - SharedObjectCongestionTracker::new_with_initial_value_for_test( - &[ + SharedObjectCongestionTracker::new( + [ (object_id_0, expected_object_cost), (object_id_1, expected_object_cost), (object_id_2, expected_object_cost) ], mode, + Some(0), // not part of this test cap_factor, + None, + 0, ) ); assert_eq!( @@ -660,14 +878,17 @@ mod object_cost_tests { shared_object_congestion_tracker.bump_object_execution_cost(&cert); assert_eq!( shared_object_congestion_tracker, - SharedObjectCongestionTracker::new_with_initial_value_for_test( - &[ + SharedObjectCongestionTracker::new( + [ (object_id_0, expected_object_cost), (object_id_1, expected_object_cost), (object_id_2, expected_object_cost) ], mode, + Some(0), // not part of this test cap_factor, + None, + 0, ) ); assert_eq!( @@ -675,4 +896,152 @@ mod object_cost_tests { expected_object_cost ); } + + #[rstest] + fn test_accumulated_debts( + #[values( + PerObjectCongestionControlMode::TotalGasBudget, + PerObjectCongestionControlMode::TotalTxCount, + PerObjectCongestionControlMode::TotalGasBudgetWithCap + )] + mode: PerObjectCongestionControlMode, + ) { + telemetry_subscribers::init_for_testing(); + + // Creates two shared objects and three transactions that operate on these objects. + let shared_obj_0 = ObjectID::random(); + let shared_obj_1 = ObjectID::random(); + + let tx_gas_budget = 100; + + // Set max_accumulated_txn_cost_per_object_in_commit to only allow 1 transaction to go through + // before overage occurs. + let max_accumulated_txn_cost_per_object_in_commit = match mode { + PerObjectCongestionControlMode::None => unreachable!(), + PerObjectCongestionControlMode::TotalGasBudget + | PerObjectCongestionControlMode::TotalGasBudgetWithCap => 90, + PerObjectCongestionControlMode::TotalTxCount => 2, + }; + + let mut shared_object_congestion_tracker = match mode { + PerObjectCongestionControlMode::None => unreachable!(), + PerObjectCongestionControlMode::TotalGasBudget => { + // Starting with two objects with accumulated cost 80. + SharedObjectCongestionTracker::new( + [(shared_obj_0, 80), (shared_obj_1, 80)], + mode, + Some(max_accumulated_txn_cost_per_object_in_commit), + None, + None, + max_accumulated_txn_cost_per_object_in_commit * 10, + ) + } + PerObjectCongestionControlMode::TotalGasBudgetWithCap => { + // Starting with two objects with accumulated cost 80. + SharedObjectCongestionTracker::new( + [(shared_obj_0, 80), (shared_obj_1, 80)], + mode, + Some(max_accumulated_txn_cost_per_object_in_commit), + Some(45), + None, + max_accumulated_txn_cost_per_object_in_commit * 10, + ) + } + PerObjectCongestionControlMode::TotalTxCount => { + // Starting with two objects with accumulated tx count 2. + SharedObjectCongestionTracker::new( + [(shared_obj_0, 2), (shared_obj_1, 2)], + mode, + Some(max_accumulated_txn_cost_per_object_in_commit), + None, + None, + max_accumulated_txn_cost_per_object_in_commit * 10, + ) + } + }; + + // Simulate a tx on object 0 that exceeds the budget. + for mutable in [true, false].iter() { + let tx = build_transaction(&[(shared_obj_0, *mutable)], tx_gas_budget); + shared_object_congestion_tracker.bump_object_execution_cost(&tx); + } + + // Verify that accumulated_debts reports the debt for object 0. + let accumulated_debts = shared_object_congestion_tracker.accumulated_debts(); + assert_eq!(accumulated_debts.len(), 1); + match mode { + PerObjectCongestionControlMode::None => unreachable!(), + PerObjectCongestionControlMode::TotalGasBudget => { + assert_eq!(accumulated_debts[0], (shared_obj_0, 90)); // init 80 + cost 100 - budget 90 = 90 + } + PerObjectCongestionControlMode::TotalGasBudgetWithCap => { + assert_eq!(accumulated_debts[0], (shared_obj_0, 80)); // init 80 + capped cost 90 - budget 90 = 80 + } + PerObjectCongestionControlMode::TotalTxCount => { + assert_eq!(accumulated_debts[0], (shared_obj_0, 1)); // init 2 + 1 tx - budget 2 = 1 + } + } + } + + #[test] + fn test_accumulated_debts_empty() { + let object_id_0 = ObjectID::random(); + let object_id_1 = ObjectID::random(); + let object_id_2 = ObjectID::random(); + + let shared_object_congestion_tracker = SharedObjectCongestionTracker::new( + [(object_id_0, 5), (object_id_1, 10), (object_id_2, 100)], + PerObjectCongestionControlMode::TotalGasBudget, + Some(100), + None, + None, + 0, + ); + + let accumulated_debts = shared_object_congestion_tracker.accumulated_debts(); + assert!(accumulated_debts.is_empty()); + } + + #[test] + fn test_tx_cost_absolute_cap() { + let object_id_0 = ObjectID::random(); + let object_id_1 = ObjectID::random(); + let object_id_2 = ObjectID::random(); + + let tx_gas_budget = 2000; + + let mut shared_object_congestion_tracker = SharedObjectCongestionTracker::new( + [(object_id_0, 5), (object_id_1, 10), (object_id_2, 100)], + PerObjectCongestionControlMode::TotalGasBudgetWithCap, + Some(100), + Some(1000), + Some(2), + 1000, + ); + + // Create a transaction using all three objects + let tx = build_transaction( + &[ + (object_id_0, false), + (object_id_1, false), + (object_id_2, true), + ], + tx_gas_budget, + ); + + // Verify that the transaction is allowed to execute. + // 2000 gas budget would exceed overage limit of 1000 but is capped to 200 by the absolute cap. + assert!(shared_object_congestion_tracker + .should_defer_due_to_object_congestion(&tx, &HashMap::new(), 0) + .is_none()); + + // Verify max cost after bumping is limited by the absolute cap. + shared_object_congestion_tracker.bump_object_execution_cost(&tx); + assert_eq!(300, shared_object_congestion_tracker.max_cost()); + + // Verify accumulated debts still uses the per-commit budget to decrement. + let accumulated_debts = shared_object_congestion_tracker.accumulated_debts(); + assert_eq!(accumulated_debts.len(), 1); + assert_eq!(accumulated_debts[0], (object_id_2, 200)); + } } diff --git a/crates/sui-core/src/authority_client.rs b/crates/sui-core/src/authority_client.rs index 78461b0656476..9537dec300c71 100644 --- a/crates/sui-core/src/authority_client.rs +++ b/crates/sui-core/src/authority_client.rs @@ -11,6 +11,7 @@ use std::time::Duration; use sui_network::{api::ValidatorClient, tonic}; use sui_types::base_types::AuthorityName; use sui_types::committee::CommitteeWithNetworkMetadata; +use sui_types::crypto::NetworkPublicKey; use sui_types::messages_checkpoint::{ CheckpointRequest, CheckpointRequestV2, CheckpointResponse, CheckpointResponseV2, }; @@ -97,15 +98,32 @@ pub struct NetworkAuthorityClient { } impl NetworkAuthorityClient { - pub async fn connect(address: &Multiaddr) -> anyhow::Result { - let channel = mysten_network::client::connect(address) + pub async fn connect( + address: &Multiaddr, + tls_target: Option, + ) -> anyhow::Result { + let tls_config = tls_target.map(|tls_target| { + sui_tls::create_rustls_client_config( + tls_target, + sui_tls::SUI_VALIDATOR_SERVER_NAME.to_string(), + None, + ) + }); + let channel = mysten_network::client::connect(address, tls_config) .await .map_err(|err| anyhow!(err.to_string()))?; Ok(Self::new(channel)) } - pub fn connect_lazy(address: &Multiaddr) -> Self { - let client: SuiResult<_> = mysten_network::client::connect_lazy(address) + pub fn connect_lazy(address: &Multiaddr, tls_target: Option) -> Self { + let tls_config = tls_target.map(|tls_target| { + sui_tls::create_rustls_client_config( + tls_target, + sui_tls::SUI_VALIDATOR_SERVER_NAME.to_string(), + None, + ) + }); + let client: SuiResult<_> = mysten_network::client::connect_lazy(address, tls_config) .map(ValidatorClient::new) .map_err(|err| err.to_string().into()); Self { client } @@ -265,7 +283,16 @@ pub fn make_network_authority_clients_with_network_config( for (name, (_state, network_metadata)) in committee.validators() { let address = network_metadata.network_address.clone(); let address = address.rewrite_udp_to_tcp(); - let maybe_channel = network_config.connect_lazy(&address).map_err(|e| { + // TODO: Enable TLS on this interface with below config, once support is rolled out to validators. + // let tls_config = network_metadata.network_public_key.as_ref().map(|key| { + // sui_tls::create_rustls_client_config( + // key.clone(), + // sui_tls::SUI_VALIDATOR_SERVER_NAME.to_string(), + // None, + // ) + // }); + // TODO: Change below code to generate a SuiError if no valid TLS config is available. + let maybe_channel = network_config.connect_lazy(&address, None).map_err(|e| { tracing::error!( address = %address, name = %name, diff --git a/crates/sui-core/src/authority_server.rs b/crates/sui-core/src/authority_server.rs index a5ece01381f9b..0e9aeb30ee1ef 100644 --- a/crates/sui-core/src/authority_server.rs +++ b/crates/sui-core/src/authority_server.rs @@ -4,7 +4,9 @@ use anyhow::Result; use async_trait::async_trait; +use fastcrypto::traits::KeyPair; use mysten_metrics::spawn_monitored_task; +use mysten_network::server::SUI_TLS_SERVER_NAME; use prometheus::{ register_histogram_with_registry, register_int_counter_vec_with_registry, register_int_counter_with_registry, Histogram, IntCounter, IntCounterVec, Registry, @@ -149,6 +151,11 @@ impl AuthorityServer { self, address: Multiaddr, ) -> Result { + let tls_config = sui_tls::create_rustls_server_config( + self.state.config.network_key_pair().copy().private(), + SUI_TLS_SERVER_NAME.to_string(), + sui_tls::AllowAll, + ); let mut server = mysten_network::config::Config::new() .server_builder() .add_service(ValidatorServer::new(ValidatorService::new_for_tests( @@ -156,7 +163,7 @@ impl AuthorityServer { self.consensus_adapter, self.metrics, ))) - .bind(&address) + .bind(&address, Some(tls_config)) .await .unwrap(); let local_addr = server.local_addr().to_owned(); @@ -828,7 +835,7 @@ impl ValidatorService { .await? } ConsensusTransactionKind::UserTransaction(tx) => { - self.state.await_transaction_effects(*tx.digest()).await? + self.state.await_transaction_effects(*tx.digest(), epoch_store).await? } _ => panic!("`handle_submit_to_consensus` received transaction that is not a CertifiedTransaction or UserTransaction"), }; diff --git a/crates/sui-core/src/checkpoints/mod.rs b/crates/sui-core/src/checkpoints/mod.rs index d9aed27ff48f3..77694b0cc9a6f 100644 --- a/crates/sui-core/src/checkpoints/mod.rs +++ b/crates/sui-core/src/checkpoints/mod.rs @@ -18,10 +18,8 @@ use crate::execution_cache::TransactionCacheRead; use crate::stake_aggregator::{InsertResult, MultiStakeAggregator}; use crate::state_accumulator::StateAccumulator; use diffy::create_patch; -use futures::future::{select, Either}; -use futures::FutureExt; use itertools::Itertools; -use mysten_metrics::{monitored_scope, spawn_monitored_task, MonitoredFutureExt}; +use mysten_metrics::{monitored_future, monitored_scope, MonitoredFutureExt}; use parking_lot::Mutex; use serde::{Deserialize, Serialize}; use sui_macros::fail_point; @@ -63,10 +61,7 @@ use sui_types::messages_consensus::ConsensusTransactionKey; use sui_types::signature::GenericSignature; use sui_types::sui_system_state::{SuiSystemState, SuiSystemStateTrait}; use sui_types::transaction::{TransactionDataAPI, TransactionKey, TransactionKind}; -use tokio::{ - sync::{watch, Notify}, - time::timeout, -}; +use tokio::{sync::Notify, task::JoinSet, time::timeout}; use tracing::{debug, error, info, instrument, warn}; use typed_store::traits::{TableSummary, TypedStoreDebug}; use typed_store::DBMapUtils; @@ -862,7 +857,6 @@ pub struct CheckpointBuilder { effects_store: Arc, accumulator: Weak, output: Box, - exit: watch::Receiver<()>, metrics: Arc, max_transactions_per_checkpoint: usize, max_checkpoint_size_bytes: usize, @@ -872,7 +866,6 @@ pub struct CheckpointAggregator { tables: Arc, epoch_store: Arc, notify: Arc, - exit: watch::Receiver<()>, current: Option, output: Box, state: Arc, @@ -900,7 +893,6 @@ impl CheckpointBuilder { effects_store: Arc, accumulator: Weak, output: Box, - exit: watch::Receiver<()>, notify_aggregator: Arc, metrics: Arc, max_transactions_per_checkpoint: usize, @@ -914,7 +906,6 @@ impl CheckpointBuilder { effects_store, accumulator, output, - exit, notify_aggregator, metrics, max_transactions_per_checkpoint, @@ -925,26 +916,10 @@ impl CheckpointBuilder { async fn run(mut self) { info!("Starting CheckpointBuilder"); loop { - // Check whether an exit signal has been received, if so we break the loop. - // This gives us a chance to exit, in case checkpoint making keeps failing. - match self.exit.has_changed() { - Ok(true) | Err(_) => { - break; - } - Ok(false) => (), - }; - self.maybe_build_checkpoints().await; - match select(self.exit.changed().boxed(), self.notify.notified().boxed()).await { - Either::Left(_) => { - // break loop on exit signal - break; - } - Either::Right(_) => {} - } + self.notify.notified().await; } - info!("Shutting down CheckpointBuilder"); } async fn maybe_build_checkpoints(&mut self) { @@ -1768,7 +1743,6 @@ impl CheckpointAggregator { tables: Arc, epoch_store: Arc, notify: Arc, - exit: watch::Receiver<()>, output: Box, state: Arc, metrics: Arc, @@ -1778,7 +1752,6 @@ impl CheckpointAggregator { tables, epoch_store, notify, - exit, current, output, state, @@ -1799,19 +1772,7 @@ impl CheckpointAggregator { continue; } - match select( - self.exit.changed().boxed(), - timeout(Duration::from_secs(1), self.notify.notified()).boxed(), - ) - .await - { - Either::Left(_) => { - // return on exit signal - info!("Shutting down CheckpointAggregator"); - return; - } - Either::Right(_) => {} - } + let _ = timeout(Duration::from_secs(1), self.notify.notified()).await; } } @@ -2240,14 +2201,14 @@ impl CheckpointService { metrics: Arc, max_transactions_per_checkpoint: usize, max_checkpoint_size_bytes: usize, - ) -> (Arc, watch::Sender<()> /* The exit sender */) { + ) -> (Arc, JoinSet<()> /* Handle to tasks */) { info!( "Starting checkpoint service with {max_transactions_per_checkpoint} max_transactions_per_checkpoint and {max_checkpoint_size_bytes} max_checkpoint_size_bytes" ); let notify_builder = Arc::new(Notify::new()); let notify_aggregator = Arc::new(Notify::new()); - let (exit_snd, exit_rcv) = watch::channel(()); + let mut tasks = JoinSet::new(); let builder = CheckpointBuilder::new( state.clone(), @@ -2257,27 +2218,22 @@ impl CheckpointService { effects_store, accumulator, checkpoint_output, - exit_rcv.clone(), notify_aggregator.clone(), metrics.clone(), max_transactions_per_checkpoint, max_checkpoint_size_bytes, ); - - let epoch_store_clone = epoch_store.clone(); - spawn_monitored_task!(epoch_store_clone.within_alive_epoch(builder.run())); + tasks.spawn(monitored_future!(builder.run())); let aggregator = CheckpointAggregator::new( checkpoint_store.clone(), epoch_store.clone(), notify_aggregator.clone(), - exit_rcv, certified_checkpoint_output, state.clone(), metrics.clone(), ); - - spawn_monitored_task!(aggregator.run()); + tasks.spawn(monitored_future!(aggregator.run())); let last_signature_index = epoch_store .get_last_checkpoint_signature_index() @@ -2291,7 +2247,8 @@ impl CheckpointService { last_signature_index, metrics, }); - (service, exit_snd) + + (service, tasks) } #[cfg(test)] @@ -2302,7 +2259,7 @@ impl CheckpointService { ) -> SuiResult { use crate::authority::authority_per_epoch_store::ConsensusCommitOutput; - let mut output = ConsensusCommitOutput::new(); + let mut output = ConsensusCommitOutput::new(0); epoch_store.write_pending_checkpoint(&mut output, &checkpoint)?; let mut batch = epoch_store.db_batch_for_test(); output.write_to_batch(epoch_store, &mut batch)?; @@ -2404,6 +2361,7 @@ mod tests { use super::*; use crate::authority::test_authority_builder::TestAuthorityBuilder; use futures::future::BoxFuture; + use futures::FutureExt as _; use shared_crypto::intent::{Intent, IntentScope}; use std::collections::{BTreeMap, HashMap}; use std::ops::Deref; @@ -2535,7 +2493,7 @@ mod tests { &epoch_store, )); - let (checkpoint_service, _exit) = CheckpointService::spawn( + let (checkpoint_service, _tasks) = CheckpointService::spawn( state.clone(), checkpoint_store, epoch_store.clone(), diff --git a/crates/sui-core/src/consensus_handler.rs b/crates/sui-core/src/consensus_handler.rs index d00b8273f1d02..45e1a269a7876 100644 --- a/crates/sui-core/src/consensus_handler.rs +++ b/crates/sui-core/src/consensus_handler.rs @@ -45,9 +45,7 @@ use crate::{ }, checkpoints::{CheckpointService, CheckpointServiceNotify}, consensus_throughput_calculator::ConsensusThroughputCalculator, - consensus_types::consensus_output_api::{ - parse_block_transactions, ConsensusCommitAPI, ParsedTransaction, - }, + consensus_types::consensus_output_api::{parse_block_transactions, ConsensusCommitAPI}, execution_cache::ObjectCacheRead, scoring_decision::update_low_scoring_authorities, transaction_manager::TransactionManager, @@ -192,7 +190,7 @@ impl ConsensusHandler { impl ConsensusHandler { #[instrument(level = "debug", skip_all)] async fn handle_consensus_commit(&mut self, consensus_commit: impl ConsensusCommitAPI) { - let _scope = monitored_scope("HandleConsensusOutput"); + let _scope = monitored_scope("ConsensusCommitHandler::handle_consensus_commit"); let last_committed_round = self.last_consensus_stats.index.last_committed_round; @@ -295,7 +293,7 @@ impl ConsensusHandler { .inc(); { - let span = trace_span!("process_consensus_certs"); + let span = trace_span!("ConsensusHandler::HandleCommit::process_consensus_txns"); let _guard = span.enter(); for (authority_index, parsed_transactions) in consensus_commit.transactions() { // TODO: consider only messages within 1~3 rounds of the leader? @@ -497,14 +495,8 @@ impl MysticetiConsensusHandler { tasks.spawn(monitored_future!(async move { while let Some(blocks_and_rejected_transactions) = transaction_receiver.recv().await { - let parsed_transactions = blocks_and_rejected_transactions - .into_iter() - .flat_map(|(block, rejected_transactions)| { - parse_block_transactions(&block, &rejected_transactions) - }) - .collect::>(); consensus_transaction_handler - .handle_consensus_transactions(parsed_transactions) + .handle_consensus_transactions(blocks_and_rejected_transactions) .await; } })); @@ -886,7 +878,18 @@ impl ConsensusTransactionHandler { self.enabled } - pub async fn handle_consensus_transactions(&self, parsed_transactions: Vec) { + pub async fn handle_consensus_transactions( + &self, + blocks_and_rejected_transactions: Vec<(VerifiedBlock, Vec)>, + ) { + let _scope = monitored_scope("ConsensusTransactionHandler::handle_consensus_transactions"); + + let parsed_transactions = blocks_and_rejected_transactions + .into_iter() + .flat_map(|(block, rejected_transactions)| { + parse_block_transactions(&block, &rejected_transactions) + }) + .collect::>(); let mut pending_consensus_transactions = vec![]; let executable_transactions: Vec<_> = parsed_transactions .into_iter() @@ -911,13 +914,10 @@ impl ConsensusTransactionHandler { return None; } pending_consensus_transactions.push(parsed.transaction.clone()); - let tx = VerifiedTransaction::new_from_verified(*tx.clone()); + let tx = VerifiedTransaction::new_unchecked(*tx.clone()); Some(VerifiedExecutableTransaction::new_from_consensus( tx, self.epoch_store.epoch(), - parsed.round, - parsed.authority, - parsed.transaction_index, )) } _ => None, @@ -984,7 +984,7 @@ mod tests { }, checkpoints::CheckpointServiceNoop, consensus_adapter::consensus_tests::{ - test_certificates, test_gas_objects, test_user_transaction, + test_certificates_with_gas_objects, test_user_transaction, }, post_consensus_tx_reorder::PostConsensusTxReorder, }; @@ -992,13 +992,27 @@ mod tests { #[tokio::test] pub async fn test_consensus_commit_handler() { // GIVEN - let mut objects = test_gas_objects(); - let shared_object = Object::shared_for_testing(); - objects.push(shared_object.clone()); + // 1 account keypair + let (sender, keypair) = deterministic_random_account_key(); + // 12 gas objects. + let gas_objects: Vec = (0..12) + .map(|_| Object::with_id_owner_for_testing(ObjectID::random(), sender)) + .collect(); + // 4 owned objects. + let owned_objects: Vec = (0..4) + .map(|_| Object::with_id_owner_for_testing(ObjectID::random(), sender)) + .collect(); + // 6 shared objects. + let shared_objects: Vec = (0..6) + .map(|_| Object::shared_for_testing()) + .collect::>(); + let mut all_objects = gas_objects.clone(); + all_objects.extend(owned_objects.clone()); + all_objects.extend(shared_objects.clone()); let network_config = sui_swarm_config::network_config_builder::ConfigBuilder::new_with_temp_dir() - .with_objects(objects.clone()) + .with_objects(all_objects.clone()) .build(); let state = TestAuthorityBuilder::new() @@ -1025,18 +1039,58 @@ mod tests { Arc::new(throughput_calculator), ); - // AND - // Create test transactions - let transactions = test_certificates(&state, shared_object).await; - let mut blocks = Vec::new(); + // AND create test user transactions alternating between owned and shared input. + let mut user_transactions = vec![]; + for (i, gas_object) in gas_objects[0..8].iter().enumerate() { + let input_object = if i % 2 == 0 { + owned_objects.get(i / 2).unwrap().clone() + } else { + shared_objects.get(i / 2).unwrap().clone() + }; + let transaction = test_user_transaction( + &state, + sender, + &keypair, + gas_object.clone(), + vec![input_object], + ) + .await; + user_transactions.push(transaction); + } - for (i, transaction) in transactions.iter().enumerate() { - let transaction_bytes: Vec = bcs::to_bytes( - &ConsensusTransaction::new_certificate_message(&state.name, transaction.clone()), + // AND create 4 certified transactions with remaining gas objects and 2 shared objects. + // Having more txns on the same shared object may get deferred. + let certified_transactions = [ + test_certificates_with_gas_objects( + &state, + &gas_objects[8..10], + shared_objects[4].clone(), + ) + .await, + test_certificates_with_gas_objects( + &state, + &gas_objects[10..12], + shared_objects[5].clone(), ) - .unwrap(); + .await, + ] + .concat(); - // AND create block for each transaction + // AND create block for each user and certified transaction + let mut blocks = Vec::new(); + for (i, consensus_transaction) in user_transactions + .iter() + .map(|t| { + ConsensusTransaction::new_user_transaction_message(&state.name, t.inner().clone()) + }) + .chain( + certified_transactions + .iter() + .map(|t| ConsensusTransaction::new_certificate_message(&state.name, t.clone())), + ) + .enumerate() + { + let transaction_bytes = bcs::to_bytes(&consensus_transaction).unwrap(); let block = VerifiedBlock::new_for_test( TestBlock::new(100 + i as u32, (i % consensus_committee.size()) as u32) .set_transactions(vec![Transaction::new(transaction_bytes)]) @@ -1046,7 +1100,7 @@ mod tests { blocks.push(block); } - // AND create the consensus output + // AND create the consensus commit let leader_block = blocks[0].clone(); let committed_sub_dag = CommittedSubDag::new( leader_block.reference(), @@ -1057,14 +1111,14 @@ mod tests { vec![], ); - // AND processing the consensus output once + // AND process the consensus commit once consensus_handler .handle_consensus_commit(committed_sub_dag.clone()) .await; - // AND capturing the consensus stats + // THEN check the consensus stats let num_blocks = blocks.len(); - let num_transactions = transactions.len(); + let num_transactions = user_transactions.len() + certified_transactions.len(); let last_consensus_stats_1 = consensus_handler.last_consensus_stats.clone(); assert_eq!( last_consensus_stats_1.index.transaction_index, @@ -1082,6 +1136,39 @@ mod tests { num_transactions as u64 ); + // THEN check for execution status of user transactions. + for (i, t) in user_transactions.iter().enumerate() { + let digest = t.digest(); + if let Ok(Ok(_)) = tokio::time::timeout( + std::time::Duration::from_secs(10), + state.notify_read_effects(*digest), + ) + .await + { + // Effects exist as expected. + } else { + panic!("User transaction {} {} did not execute", i, digest); + } + } + + // THEN check for execution status of certified transactions. + for (i, t) in certified_transactions.iter().enumerate() { + let digest = t.digest(); + if let Ok(Ok(_)) = tokio::time::timeout( + std::time::Duration::from_secs(10), + state.notify_read_effects(*digest), + ) + .await + { + // Effects exist as expected. + } else { + panic!("Certified transaction {} {} did not execute", i, digest); + } + } + + // THEN check for no inflight or suspended transactions. + state.transaction_manager().check_empty_for_testing(); + // WHEN processing the same output multiple times // THEN the consensus stats do not update for _ in 0..2 { @@ -1178,10 +1265,10 @@ mod tests { // AND process the transactions from consensus output. transaction_handler - .handle_consensus_transactions(parse_block_transactions(&block, &rejected_transactions)) + .handle_consensus_transactions(vec![(block.clone(), rejected_transactions.clone())]) .await; - // THEN check for execution status of transactions. + // THEN check for status of transactions that should have been executed. for (i, t) in transactions.iter().enumerate() { // Do not expect shared transactions or rejected transactions to be executed. if i % 2 == 1 || rejected_transactions.contains(&(i as TransactionIndex)) { diff --git a/crates/sui-core/src/consensus_types/consensus_output_api.rs b/crates/sui-core/src/consensus_types/consensus_output_api.rs index e0c2eeebc16e4..c6d7d7c662599 100644 --- a/crates/sui-core/src/consensus_types/consensus_output_api.rs +++ b/crates/sui-core/src/consensus_types/consensus_output_api.rs @@ -6,7 +6,7 @@ use consensus_core::{BlockAPI, CommitDigest, TransactionIndex, VerifiedBlock}; use sui_protocol_config::ProtocolConfig; use sui_types::{ digests::ConsensusCommitDigest, - messages_consensus::{AuthorityIndex, ConsensusTransaction, Round}, + messages_consensus::{AuthorityIndex, ConsensusTransaction}, }; pub(crate) struct ParsedTransaction { @@ -16,12 +16,6 @@ pub(crate) struct ParsedTransaction { pub(crate) rejected: bool, // Bytes length of the serialized transaction pub(crate) serialized_len: usize, - // Consensus round of the block containing the transaction. - pub(crate) round: Round, - // Authority index of the block containing the transaction. - pub(crate) authority: AuthorityIndex, - // Transaction index in the block. - pub(crate) transaction_index: TransactionIndex, } pub(crate) trait ConsensusCommitAPI: Display { @@ -136,9 +130,6 @@ pub(crate) fn parse_block_transactions( transaction, rejected, serialized_len: tx.data().len(), - round, - authority, - transaction_index: index as TransactionIndex, } }) .collect() diff --git a/crates/sui-core/src/epoch/randomness.rs b/crates/sui-core/src/epoch/randomness.rs index 952ea5952d8f1..4c1ac20c73e62 100644 --- a/crates/sui-core/src/epoch/randomness.rs +++ b/crates/sui-core/src/epoch/randomness.rs @@ -896,7 +896,7 @@ mod tests { } } for i in 0..randomness_managers.len() { - let mut output = ConsensusCommitOutput::new(); + let mut output = ConsensusCommitOutput::new(0); for (j, dkg_message) in dkg_messages.iter().cloned().enumerate() { randomness_managers[i] .add_message(&epoch_stores[j].name, dkg_message) @@ -926,7 +926,7 @@ mod tests { } } for i in 0..randomness_managers.len() { - let mut output = ConsensusCommitOutput::new(); + let mut output = ConsensusCommitOutput::new(0); for (j, dkg_confirmation) in dkg_confirmations.iter().cloned().enumerate() { randomness_managers[i] .add_confirmation(&mut output, &epoch_stores[j].name, dkg_confirmation) @@ -1028,7 +1028,7 @@ mod tests { } } for i in 0..randomness_managers.len() { - let mut output = ConsensusCommitOutput::new(); + let mut output = ConsensusCommitOutput::new(0); for (j, dkg_message) in dkg_messages.iter().cloned().enumerate() { randomness_managers[i] .add_message(&epoch_stores[j].name, dkg_message) diff --git a/crates/sui-core/src/execution_cache.rs b/crates/sui-core/src/execution_cache.rs index 83cca46df4ccb..c26c57134568c 100644 --- a/crates/sui-core/src/execution_cache.rs +++ b/crates/sui-core/src/execution_cache.rs @@ -8,6 +8,7 @@ use crate::authority::epoch_start_configuration::EpochStartConfiguration; use crate::authority::AuthorityStore; use crate::state_accumulator::AccumulatorStore; use crate::transaction_outputs::TransactionOutputs; +use mysten_common::fatal; use sui_types::bridge::Bridge; use futures::{future::BoxFuture, FutureExt}; @@ -587,6 +588,13 @@ pub trait TransactionCacheRead: Send + Sync { digests: &'a [TransactionDigest], ) -> BoxFuture<'a, SuiResult>>; + /// Wait until the effects of the given transactions are available and return them. + /// WARNING: If calling this on a transaction that could be reverted, you must be + /// sure that this function cannot be called during reconfiguration. The best way to + /// do this is to wrap your future in EpochStore::within_alive_epoch. Holding an + /// ExecutionLockReadGuard would also prevent reconfig from happening while waiting, + /// but this is very dangerous, as it could prevent reconfiguration from ever + /// occurring! fn notify_read_executed_effects<'a>( &'a self, digests: &'a [TransactionDigest], @@ -597,7 +605,7 @@ pub trait TransactionCacheRead: Send + Sync { self.multi_get_effects(&digests).map(|effects| { effects .into_iter() - .map(|e| e.expect("digests must exist")) + .map(|e| e.unwrap_or_else(|| fatal!("digests must exist"))) .collect() }) } diff --git a/crates/sui-core/src/execution_cache/writeback_cache.rs b/crates/sui-core/src/execution_cache/writeback_cache.rs index 8431429b683db..5ff74be1c0182 100644 --- a/crates/sui-core/src/execution_cache/writeback_cache.rs +++ b/crates/sui-core/src/execution_cache/writeback_cache.rs @@ -463,15 +463,37 @@ impl WritebackCache { trace!(?object_id, ?version, ?object, "inserting object entry"); fail_point_async!("write_object_entry"); self.metrics.record_cache_write("object"); - self.dirty - .objects - .entry(*object_id) - .or_default() - .insert(version, object.clone()); + + // We must hold the lock for the object entry while inserting to the + // object_by_id_cache. Otherwise, a surprising bug can occur: + // + // 1. A thread executing TX1 can write object (O,1) to the dirty set and then pause. + // 2. TX2, which reads (O,1) can begin executing, because TransactionManager immediately + // schedules transactions if their inputs are available. It does not matter that TX1 + // hasn't finished executing yet. + // 3. TX2 can write (O,2) to both the dirty set and the object_by_id_cache. + // 4. The thread executing TX1 can resume and write (O,1) to the object_by_id_cache. + // + // Now, any subsequent attempt to get the latest version of O will return (O,1) instead of + // (O,2). + // + // This seems very unlikely, but it may be possible under the following circumstances: + // - While a thread is unlikely to pause for so long, moka cache uses optimistic + // lock-free algorithms that have retry loops. Possibly, under high contention, this + // code might spin for a surprisingly long time. + // - Additionally, many concurrent re-executions of the same tx could happen due to + // the tx finalizer, plus checkpoint executor, consensus, and RPCs from fullnodes. + let mut entry = self.dirty.objects.entry(*object_id).or_default(); + self.cached.object_by_id_cache.insert( *object_id, - Arc::new(Mutex::new(LatestObjectCacheEntry::Object(version, object))), + Arc::new(Mutex::new(LatestObjectCacheEntry::Object( + version, + object.clone(), + ))), ); + + entry.insert(version, object); } async fn write_marker_value( diff --git a/crates/sui-core/src/execution_driver.rs b/crates/sui-core/src/execution_driver.rs index ca433a8a4d383..e0bfa1523b736 100644 --- a/crates/sui-core/src/execution_driver.rs +++ b/crates/sui-core/src/execution_driver.rs @@ -12,6 +12,7 @@ use rand::{ Rng, SeedableRng, }; use sui_macros::fail_point_async; +use sui_protocol_config::Chain; use tokio::{ sync::{mpsc::UnboundedReceiver, oneshot, Semaphore}, time::sleep, @@ -44,6 +45,18 @@ pub async fn execution_process( let limit = Arc::new(Semaphore::new(num_cpus::get())); let mut rng = StdRng::from_rng(&mut OsRng).unwrap(); + let is_mainnet = { + let Some(state) = authority_state.upgrade() else { + info!("Authority state has shutdown. Exiting ..."); + return; + }; + + state + .get_chain_identifier() + .map(|chain_id| chain_id.chain()) + == Some(Chain::Mainnet) + }; + // Loop whenever there is a signal that a new transactions is ready to process. loop { let _scope = monitored_scope("ExecutionDriver::loop"); @@ -86,6 +99,16 @@ pub async fn execution_process( let digest = *certificate.digest(); trace!(?digest, "Pending certificate execution activated."); + if epoch_store.epoch() != certificate.epoch() { + info!( + ?digest, + cur_epoch = epoch_store.epoch(), + cert_epoch = certificate.epoch(), + "Ignoring certificate from previous epoch." + ); + continue; + } + let limit = limit.clone(); // hold semaphore permit until task completes. unwrap ok because we never close // the semaphore in this context. @@ -122,7 +145,9 @@ pub async fn execution_process( .try_execute_immediately(&certificate, expected_effects_digest, &epoch_store_clone) .await; if let Err(e) = res { - if attempts == EXECUTION_MAX_ATTEMPTS { + // Tighten this check everywhere except mainnet - if we don't see an increase in + // these crashes we will remove the retries. + if !is_mainnet || attempts == EXECUTION_MAX_ATTEMPTS { panic!("Failed to execute certified transaction {digest:?} after {attempts} attempts! error={e} certificate={certificate:?}"); } // Assume only transient failure can happen. Permanent failure is probably diff --git a/crates/sui-core/src/generate_format.rs b/crates/sui-core/src/generate_format.rs index 21d00d1a10da3..17d90318317a9 100644 --- a/crates/sui-core/src/generate_format.rs +++ b/crates/sui-core/src/generate_format.rs @@ -131,7 +131,7 @@ fn get_registry() -> Result { let sig2: GenericSignature = Signature::new_secure(&msg, &kp2).into(); let sig3: GenericSignature = Signature::new_secure(&msg, &kp3).into(); let sig4: GenericSignature = GenericSignature::from_str("BQNNMTczMTgwODkxMjU5NTI0MjE3MzYzNDIyNjM3MTc5MzI3MTk0Mzc3MTc4NDQyODI0MTAxODc5NTc5ODQ3NTE5Mzk5NDI4OTgyNTEyNTBNMTEzNzM5NjY2NDU0NjkxMjI1ODIwNzQwODIyOTU5ODUzODgyNTg4NDA2ODE2MTgyNjg1OTM5NzY2OTczMjU4OTIyODA5MTU2ODEyMDcBMQMCTDU5Mzk4NzExNDczNDg4MzQ5OTczNjE3MjAxMjIyMzg5ODAxNzcxNTIzMDMyNzQzMTEwNDcyNDk5MDU5NDIzODQ5MTU3Njg2OTA4OTVMNDUzMzU2ODI3MTEzNDc4NTI3ODczMTIzNDU3MDM2MTQ4MjY1MTk5Njc0MDc5MTg4ODI4NTg2NDk2Njg4NDAzMjcxNzA0OTgxMTcwOAJNMTA1NjQzODcyODUwNzE1NTU0Njk3NTM5OTA2NjE0MTA4NDAxMTg2MzU5MjU0NjY1OTcwMzcwMTgwNTg3NzAwNDEzNDc1MTg0NjEzNjhNMTI1OTczMjM1NDcyNzc1NzkxNDQ2OTg0OTYzNzIyNDI2MTUzNjgwODU4MDEzMTMzNDMxNTU3MzU1MTEzMzAwMDM4ODQ3Njc5NTc4NTQCATEBMANNMTU3OTE1ODk0NzI1NTY4MjYyNjMyMzE2NDQ3Mjg4NzMzMzc2MjkwMTUyNjk5ODQ2OTk0MDQwNzM2MjM2MDMzNTI1Mzc2Nzg4MTMxNzFMNDU0Nzg2NjQ5OTI0ODg4MTQ0OTY3NjE2MTE1ODAyNDc0ODA2MDQ4NTM3MzI1MDAyOTQyMzkwNDExMzAxNzQyMjUzOTAzNzE2MjUyNwExMXdpYVhOeklqb2lhSFIwY0hNNkx5OXBaQzUwZDJsMFkyZ3VkSFl2YjJGMWRHZ3lJaXcCMmV5SmhiR2NpT2lKU1V6STFOaUlzSW5SNWNDSTZJa3BYVkNJc0ltdHBaQ0k2SWpFaWZRTTIwNzk0Nzg4NTU5NjIwNjY5NTk2MjA2NDU3MDIyOTY2MTc2OTg2Njg4NzI3ODc2MTI4MjIzNjI4MTEzOTE2MzgwOTI3NTAyNzM3OTExCgAAAAAAAABhAG6Bf8BLuaIEgvF8Lx2jVoRWKKRIlaLlEJxgvqwq5nDX+rvzJxYAUFd7KeQBd9upNx+CHpmINkfgj26jcHbbqAy5xu4WMO8+cRFEpkjbBruyKE9ydM++5T/87lA8waSSAA==").unwrap(); - let sig5: GenericSignature = GenericSignature::from_str("BiVJlg3liA6MaHQ0Fw9kdmBbj+SuuaKGMseZXPO6gx2XYx0AAAAAigF7InR5cGUiOiJ3ZWJhdXRobi5nZXQiLCJjaGFsbGVuZ2UiOiJBQUFBdF9taklCMXZiVnBZTTZXVjZZX29peDZKOGFOXzlzYjhTS0ZidWtCZmlRdyIsIm9yaWdpbiI6Imh0dHA6Ly9sb2NhbGhvc3Q6NTE3MyIsImNyb3NzT3JpZ2luIjpmYWxzZX1iApjskL9Xyfopyg9Av7MSrcchSpfWqAYoJ+qfSId4gNmoQ1YNgj2alDpRIbq9kthmyGY25+k24FrW114PEoy5C+8DPRcOCTtACi3ZywtZ4UILhwV+Suh79rWtbKqDqhBQwxM=").unwrap(); + let sig5: GenericSignature = GenericSignature::from_str("BiVYDmenOnqS+thmz5m5SrZnWaKXZLVxgh+rri6LHXs25B0AAAAAnQF7InR5cGUiOiJ3ZWJhdXRobi5nZXQiLCAiY2hhbGxlbmdlIjoiQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQSIsIm9yaWdpbiI6Imh0dHA6Ly9sb2NhbGhvc3Q6NTE3MyIsImNyb3NzT3JpZ2luIjpmYWxzZSwgInVua25vd24iOiAidW5rbm93biJ9YgJMwqcOmZI7F/N+K5SMe4DRYCb4/cDWW68SFneSHoD2GxKKhksbpZ5rZpdrjSYABTCsFQQBpLORzTvbj4edWKd/AsEBeovrGvHR9Ku7critg6k7qvfFlPUngujXfEzXd8Eg").unwrap(); let multi_sig = MultiSig::combine( vec![sig1.clone(), sig2.clone(), sig3.clone(), sig4.clone()], diff --git a/crates/sui-core/src/post_consensus_tx_reorder.rs b/crates/sui-core/src/post_consensus_tx_reorder.rs index 9781777055b63..7f6d05fd7d61f 100644 --- a/crates/sui-core/src/post_consensus_tx_reorder.rs +++ b/crates/sui-core/src/post_consensus_tx_reorder.rs @@ -25,7 +25,7 @@ impl PostConsensusTxReorder { } fn order_by_gas_price(transactions: &mut [VerifiedSequencedConsensusTransaction]) { - let _scope = monitored_scope("HandleConsensusOutput::order_by_gas_price"); + let _scope = monitored_scope("ConsensusCommitHandler::order_by_gas_price"); transactions.sort_by_key(|txn| { // Reverse order, so that transactions with higher gas price are put to the beginning. std::cmp::Reverse({ diff --git a/crates/sui-core/src/safe_client.rs b/crates/sui-core/src/safe_client.rs index e1f01bfa46831..c0c7a53086858 100644 --- a/crates/sui-core/src/safe_client.rs +++ b/crates/sui-core/src/safe_client.rs @@ -68,10 +68,11 @@ impl SafeClientMetricsBase { registry, ) .unwrap(), + // Address label is removed to reduce high cardinality, can be added back if needed latency: register_histogram_vec_with_registry!( "safe_client_latency", - "RPC latency observed by safe client aggregator, group by address and method", - &["address", "method"], + "RPC latency observed by safe client aggregator, group by method", + &["method"], mysten_metrics::COARSE_LATENCY_SEC_BUCKETS.to_vec(), registry, ) @@ -113,16 +114,16 @@ impl SafeClientMetrics { let handle_transaction_latency = metrics_base .latency - .with_label_values(&[&validator_address, "handle_transaction"]); + .with_label_values(&["handle_transaction"]); let handle_certificate_latency = metrics_base .latency - .with_label_values(&[&validator_address, "handle_certificate"]); + .with_label_values(&["handle_certificate"]); let handle_obj_info_latency = metrics_base .latency - .with_label_values(&[&validator_address, "handle_object_info_request"]); + .with_label_values(&["handle_object_info_request"]); let handle_tx_info_latency = metrics_base .latency - .with_label_values(&[&validator_address, "handle_transaction_info_request"]); + .with_label_values(&["handle_transaction_info_request"]); Self { total_requests_handle_transaction_info_request, diff --git a/crates/sui-core/src/transaction_input_loader.rs b/crates/sui-core/src/transaction_input_loader.rs index b9f1028598c88..ee70de4829e7c 100644 --- a/crates/sui-core/src/transaction_input_loader.rs +++ b/crates/sui-core/src/transaction_input_loader.rs @@ -1,13 +1,16 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::execution_cache::ObjectCacheRead; +use crate::{ + authority::authority_per_epoch_store::CertLockGuard, execution_cache::ObjectCacheRead, +}; use itertools::izip; +use mysten_common::fatal; use once_cell::unsync::OnceCell; use std::collections::HashMap; use std::sync::Arc; use sui_types::{ - base_types::{EpochId, ObjectID, ObjectRef, SequenceNumber, TransactionDigest}, + base_types::{EpochId, ObjectRef, TransactionDigest}, error::{SuiError, SuiResult, UserInputError}, storage::{GetSharedLocks, ObjectKey}, transaction::{ @@ -126,10 +129,11 @@ impl TransactionInputLoader { &self, shared_lock_store: &impl GetSharedLocks, tx_key: &TransactionKey, + _tx_lock: &CertLockGuard, // see below for why this is needed input_object_kinds: &[InputObjectKind], epoch_id: EpochId, ) -> SuiResult { - let shared_locks_cell: OnceCell> = OnceCell::new(); + let shared_locks_cell: OnceCell>> = OnceCell::new(); let mut results = vec![None; input_object_kinds.len()]; let mut object_keys = Vec::with_capacity(input_object_kinds.len()); @@ -153,17 +157,22 @@ impl TransactionInputLoader { fetches.push((i, input)); } InputObjectKind::SharedMoveObject { id, .. } => { - let shared_locks = shared_locks_cell.get_or_try_init(|| { - Ok::, SuiError>( + let shared_locks = shared_locks_cell + .get_or_init(|| { shared_lock_store - .get_shared_locks(tx_key)? - .into_iter() - .collect(), - ) - })?; - // If we can't find the locked version, it means - // 1. either we have a bug that skips shared object version assignment - // 2. or we have some DB corruption + .get_shared_locks(tx_key) + .expect("loading shared locks should not fail") + .map(|locks| locks.into_iter().collect()) + }) + .as_ref() + .unwrap_or_else(|| { + // Important to hold the _tx_lock here - otherwise it would be possible + // for a concurrent execution of the same tx to enter this point after the + // first execution has finished and the shared locks have been deleted. + fatal!("Failed to get shared locks for transaction {tx_key:?}"); + }); + + // If we find a set of locks but an object is missing, it indicates a serious inconsistency: let version = shared_locks.get(id).unwrap_or_else(|| { panic!("Shared object locks should have been set. key: {tx_key:?}, obj id: {id:?}") }); diff --git a/crates/sui-core/src/transaction_manager.rs b/crates/sui-core/src/transaction_manager.rs index 0ffd0c7a92b51..88290bb4caaaa 100644 --- a/crates/sui-core/src/transaction_manager.rs +++ b/crates/sui-core/src/transaction_manager.rs @@ -9,6 +9,7 @@ use std::{ }; use lru::LruCache; +use mysten_common::fatal; use mysten_metrics::monitored_scope; use parking_lot::RwLock; use sui_types::{ @@ -414,7 +415,7 @@ impl TransactionManager { .transaction_cache_read .is_tx_already_executed(&digest) .unwrap_or_else(|err| { - panic!("Failed to check if tx is already executed: {:?}", err) + fatal!("Failed to check if tx is already executed: {:?}", err) }) { self.metrics @@ -432,7 +433,7 @@ impl TransactionManager { let mut receiving_objects: HashSet = HashSet::new(); let certs: Vec<_> = certs .into_iter() - .map(|(cert, fx_digest)| { + .filter_map(|(cert, fx_digest)| { let input_object_kinds = cert .data() .intent_message() @@ -440,7 +441,24 @@ impl TransactionManager { .input_objects() .expect("input_objects() cannot fail"); let mut input_object_keys = - epoch_store.get_input_object_keys(&cert.key(), &input_object_kinds); + match epoch_store.get_input_object_keys(&cert.key(), &input_object_kinds) { + Ok(keys) => keys, + Err(e) => { + // Because we do not hold the transaction lock during enqueue, it is possible + // that the transaction was executed and the shared version assignments deleted + // since the earlier check. This is a rare race condition, and it is better to + // handle it ad-hoc here than to hold tx locks for every cert for the duration + // of this function in order to remove the race. + if self + .transaction_cache_read + .is_tx_already_executed(cert.digest()) + .expect("is_tx_already_executed cannot fail") + { + return None; + } + fatal!("Failed to get input object keys: {:?}", e); + } + }; if input_object_kinds.len() != input_object_keys.len() { error!("Duplicated input objects: {:?}", input_object_kinds); @@ -467,7 +485,7 @@ impl TransactionManager { } } - (cert, fx_digest, input_object_keys) + Some((cert, fx_digest, input_object_keys)) }) .collect(); diff --git a/crates/sui-core/src/unit_tests/authority_tests.rs b/crates/sui-core/src/unit_tests/authority_tests.rs index 70ec3e454c5de..86651b7221293 100644 --- a/crates/sui-core/src/unit_tests/authority_tests.rs +++ b/crates/sui-core/src/unit_tests/authority_tests.rs @@ -11,7 +11,6 @@ use move_binary_format::{ }; use move_core_types::identifier::IdentStr; use move_core_types::language_storage::StructTag; -use move_core_types::parser::parse_type_tag; use move_core_types::{ account_address::AccountAddress, ident_str, identifier::Identifier, language_storage::TypeTag, }; @@ -24,6 +23,7 @@ use rand::{ use serde_json::json; use std::collections::HashSet; use std::fs; +use std::str::FromStr; use std::{convert::TryInto, env}; use sui_json_rpc_types::{ @@ -1198,9 +1198,18 @@ async fn test_handle_transfer_transaction_bad_signature() { let server_handle = server.spawn_for_test().await.unwrap(); - let client = NetworkAuthorityClient::connect(server_handle.address()) - .await - .unwrap(); + let client = NetworkAuthorityClient::connect( + server_handle.address(), + Some( + authority_state + .config + .network_key_pair() + .public() + .to_owned(), + ), + ) + .await + .unwrap(); let (_unknown_address, unknown_key): (_, AccountKeyPair) = get_key_pair(); let mut bad_signature_transfer_transaction = transfer_transaction.clone().into_inner(); @@ -3674,7 +3683,7 @@ async fn test_dynamic_field_struct_name_parsing() { assert!(matches!(fields[0].type_, DynamicFieldType::DynamicField)); assert_eq!(json!({"name_str": "Test Name"}), fields[0].name.value); assert_eq!( - parse_type_tag("0x0::object_basics::Name").unwrap(), + TypeTag::from_str("0x0::object_basics::Name").unwrap(), fields[0].name.type_ ) } @@ -3686,7 +3695,10 @@ async fn test_dynamic_field_bytearray_name_parsing() { assert_eq!(fields.len(), 1); assert!(matches!(fields[0].type_, DynamicFieldType::DynamicField)); - assert_eq!(parse_type_tag("vector").unwrap(), fields[0].name.type_); + assert_eq!( + TypeTag::from_str("vector").unwrap(), + fields[0].name.type_ + ); assert_eq!(json!("Test Name".as_bytes()), fields[0].name.value); } @@ -3697,7 +3709,7 @@ async fn test_dynamic_field_address_name_parsing() { assert_eq!(fields.len(), 1); assert!(matches!(fields[0].type_, DynamicFieldType::DynamicField)); - assert_eq!(parse_type_tag("address").unwrap(), fields[0].name.type_); + assert_eq!(TypeTag::from_str("address").unwrap(), fields[0].name.type_); assert_eq!(json!(sender), fields[0].name.value); } @@ -3709,7 +3721,7 @@ async fn test_dynamic_object_field_struct_name_parsing() { assert!(matches!(fields[0].type_, DynamicFieldType::DynamicObject)); assert_eq!(json!({"name_str": "Test Name"}), fields[0].name.value); assert_eq!( - parse_type_tag("0x0::object_basics::Name").unwrap(), + TypeTag::from_str("0x0::object_basics::Name").unwrap(), fields[0].name.type_ ) } @@ -3721,7 +3733,10 @@ async fn test_dynamic_object_field_bytearray_name_parsing() { assert_eq!(fields.len(), 1); assert!(matches!(fields[0].type_, DynamicFieldType::DynamicObject)); - assert_eq!(parse_type_tag("vector").unwrap(), fields[0].name.type_); + assert_eq!( + TypeTag::from_str("vector").unwrap(), + fields[0].name.type_ + ); assert_eq!(json!("Test Name".as_bytes()), fields[0].name.value); } @@ -3732,7 +3747,7 @@ async fn test_dynamic_object_field_address_name_parsing() { assert_eq!(fields.len(), 1); assert!(matches!(fields[0].type_, DynamicFieldType::DynamicObject)); - assert_eq!(parse_type_tag("address").unwrap(), fields[0].name.type_); + assert_eq!(TypeTag::from_str("address").unwrap(), fields[0].name.type_); assert_eq!(json!(sender), fields[0].name.value); } @@ -4742,6 +4757,7 @@ async fn test_shared_object_transaction_ok() { .epoch_store_for_testing() .get_shared_locks(&certificate.key()) .expect("Reading shared locks should not fail") + .expect("Locks should be set") .into_iter() .find_map(|(object_id, version)| { if object_id == shared_object_id { @@ -4858,6 +4874,7 @@ async fn test_consensus_commit_prologue_generation() { .epoch_store_for_testing() .get_shared_locks(txn_key) .unwrap() + .expect("locks should be set") .iter() .filter_map(|(id, seq)| { if id == &SUI_CLOCK_OBJECT_ID { @@ -5903,6 +5920,7 @@ async fn test_consensus_handler_per_object_congestion_control( } } protocol_config.set_max_deferral_rounds_for_congestion_control_for_testing(1000); // Set to a large number so that we don't hit this limit. + protocol_config.set_max_txn_cost_overage_per_object_in_commit_for_testing(0); let authority = TestAuthorityBuilder::new() .with_reference_gas_price(1000) .with_protocol_config(protocol_config) @@ -6131,6 +6149,7 @@ async fn test_consensus_handler_congestion_control_transaction_cancellation() { protocol_config .set_max_accumulated_txn_cost_per_object_in_mysticeti_commit_for_testing(100_000_000); protocol_config.set_max_deferral_rounds_for_congestion_control_for_testing(2); + protocol_config.set_max_txn_cost_overage_per_object_in_commit_for_testing(0); let authority = TestAuthorityBuilder::new() .with_reference_gas_price(1000) .with_protocol_config(protocol_config) @@ -6216,6 +6235,7 @@ async fn test_consensus_handler_congestion_control_transaction_cancellation() { .epoch_store_for_testing() .get_shared_locks(&cancelled_txn.key()) .expect("Reading shared locks should not fail") + .expect("locks should be set") .into_iter() .collect::>(); assert_eq!( @@ -6234,6 +6254,7 @@ async fn test_consensus_handler_congestion_control_transaction_cancellation() { .read_objects_for_execution( authority.epoch_store_for_testing().as_ref(), &cancelled_txn.key(), + &CertLockGuard::dummy_for_tests(), &cancelled_txn .data() .transaction_data() diff --git a/crates/sui-core/src/unit_tests/congestion_control_tests.rs b/crates/sui-core/src/unit_tests/congestion_control_tests.rs index 24a6defc2b407..53016c0b38ef9 100644 --- a/crates/sui-core/src/unit_tests/congestion_control_tests.rs +++ b/crates/sui-core/src/unit_tests/congestion_control_tests.rs @@ -297,13 +297,18 @@ async fn test_congestion_control_execution_cancellation() { // Initialize shared object queue so that any transaction touches shared_object_1 should result in congestion and cancellation. register_fail_point_arg("initial_congestion_tracker", move || { - Some( - SharedObjectCongestionTracker::new_with_initial_value_for_test( - &[(shared_object_1.0, 10)], - PerObjectCongestionControlMode::TotalGasBudget, - Some(1000), // Not used. + Some(SharedObjectCongestionTracker::new( + [(shared_object_1.0, 10)], + PerObjectCongestionControlMode::TotalGasBudget, + Some( + test_setup + .protocol_config + .max_accumulated_txn_cost_per_object_in_mysticeti_commit(), ), - ) + Some(1000), // Not used. + None, // Not used. + 0, // Disable overage. + )) }); // Runs a transaction that touches shared_object_1, shared_object_2 and a owned object. diff --git a/crates/sui-core/src/unit_tests/consensus_tests.rs b/crates/sui-core/src/unit_tests/consensus_tests.rs index 46247ae04edf1..47f4c404c85b2 100644 --- a/crates/sui-core/src/unit_tests/consensus_tests.rs +++ b/crates/sui-core/src/unit_tests/consensus_tests.rs @@ -9,18 +9,13 @@ use crate::checkpoints::CheckpointServiceNoop; use crate::consensus_handler::SequencedConsensusTransaction; use fastcrypto::traits::KeyPair; use move_core_types::{account_address::AccountAddress, ident_str}; -use narwhal_types::Transactions; -use narwhal_types::TransactionsServer; -use narwhal_types::{Empty, TransactionProto}; use rand::rngs::StdRng; use rand::SeedableRng; -use sui_network::tonic; use sui_types::crypto::{deterministic_random_account_key, AccountKeyPair}; use sui_types::gas::GasCostSummary; use sui_types::messages_checkpoint::{ CheckpointContents, CheckpointSignatureMessage, CheckpointSummary, SignedCheckpointSummary, }; -use sui_types::multiaddr::Multiaddr; use sui_types::transaction::TEST_ONLY_GAS_UNIT_FOR_OBJECT_BASICS; use sui_types::utils::{make_committee_key, to_sender_signed_transaction}; use sui_types::SUI_FRAMEWORK_PACKAGE_ID; @@ -29,8 +24,6 @@ use sui_types::{ object::Object, transaction::{CallArg, CertifiedTransaction, ObjectArg, TransactionData, VerifiedTransaction}, }; -use tokio::sync::mpsc::channel; -use tokio::sync::mpsc::{Receiver, Sender}; /// Fixture: a few test gas objects. pub fn test_gas_objects() -> Vec { @@ -47,10 +40,19 @@ pub fn test_gas_objects() -> Vec { GAS_OBJECTS.with(|v| v.clone()) } -/// Fixture: a few test certificates containing a shared object. +/// Fixture: create a few test certificates containing a shared object. pub async fn test_certificates( authority: &AuthorityState, shared_object: Object, +) -> Vec { + test_certificates_with_gas_objects(authority, &test_gas_objects(), shared_object).await +} + +/// Fixture: create a few test certificates containing a shared object using specified gas objects. +pub async fn test_certificates_with_gas_objects( + authority: &AuthorityState, + gas_objects: &[Object], + shared_object: Object, ) -> Vec { let epoch_store = authority.load_epoch_store_one_call_per_task(); let (sender, keypair) = deterministic_random_account_key(); @@ -62,7 +64,7 @@ pub async fn test_certificates( initial_shared_version: shared_object.version(), mutable: true, }; - for gas_object in test_gas_objects() { + for gas_object in gas_objects { // Object digest may be different in genesis than originally generated. let gas_object = authority .get_object(&gas_object.id()) @@ -401,45 +403,3 @@ async fn submit_checkpoint_signature_to_consensus_adapter() { .unwrap(); waiter.await.unwrap(); } - -pub struct ConsensusMockServer { - sender: Sender, -} - -impl ConsensusMockServer { - pub fn spawn(address: Multiaddr) -> Receiver { - let (sender, receiver) = channel(1); - tokio::spawn(async move { - let config = mysten_network::config::Config::new(); - let mock = Self { sender }; - config - .server_builder() - .add_service(TransactionsServer::new(mock)) - .bind(&address) - .await - .unwrap() - .serve() - .await - }); - receiver - } -} - -#[tonic::async_trait] -impl Transactions for ConsensusMockServer { - /// Submit a Transactions - async fn submit_transaction( - &self, - request: tonic::Request, - ) -> Result, tonic::Status> { - self.sender.send(request.into_inner()).await.unwrap(); - Ok(tonic::Response::new(Empty {})) - } - /// Submit a Transactions - async fn submit_transaction_stream( - &self, - _request: tonic::Request>, - ) -> Result, tonic::Status> { - unimplemented!() - } -} diff --git a/crates/sui-core/src/unit_tests/move_package_upgrade_tests.rs b/crates/sui-core/src/unit_tests/move_package_upgrade_tests.rs index eed9dcf2af4be..4ca5c82492a5f 100644 --- a/crates/sui-core/src/unit_tests/move_package_upgrade_tests.rs +++ b/crates/sui-core/src/unit_tests/move_package_upgrade_tests.rs @@ -15,7 +15,12 @@ use sui_types::{ MOVE_STDLIB_PACKAGE_ID, SUI_FRAMEWORK_PACKAGE_ID, }; -use std::{collections::BTreeSet, path::PathBuf, str::FromStr, sync::Arc}; +use std::{ + collections::BTreeSet, + path::{Path, PathBuf}, + str::FromStr, + sync::Arc, +}; use sui_types::effects::{TransactionEffects, TransactionEffectsAPI}; use sui_types::error::{SuiError, UserInputError}; use sui_types::execution_config_utils::to_binary_config; @@ -48,11 +53,62 @@ macro_rules! move_call { } } +enum FileOverlay<'a> { + Remove(&'a str), + Add { + file_name: &'a str, + contents: &'a str, + }, +} + +fn build_upgrade_test_modules_with_overlay( + base_pkg: &str, + overlay: FileOverlay<'_>, +) -> (Vec, Vec>) { + // Root temp dirs under `move_upgrade` directory so that dependency paths remain correct. + let mut tmp_dir_root_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + tmp_dir_root_path.extend(["src", "unit_tests", "data", "move_upgrade"]); + + let tmp_dir = tempfile::TempDir::new_in(tmp_dir_root_path).unwrap(); + let tmp_dir_path = tmp_dir.path(); + + let mut copy_options = fs_extra::dir::CopyOptions::new(); + copy_options.copy_inside = true; + copy_options.content_only = true; + let source_dir = pkg_path_of(base_pkg); + fs_extra::dir::copy(source_dir, tmp_dir_path, ©_options).unwrap(); + + match overlay { + FileOverlay::Remove(file_name) => { + let file_path = tmp_dir_path.join(format!("sources/{}", file_name)); + std::fs::remove_file(file_path).unwrap(); + } + FileOverlay::Add { + file_name, + contents, + } => { + let new_file_path = tmp_dir_path.join(format!("sources/{}", file_name)); + std::fs::write(new_file_path, contents).unwrap(); + } + } + + build_pkg_at_path(tmp_dir_path) +} + fn build_upgrade_test_modules(test_dir: &str) -> (Vec, Vec>) { + let path = pkg_path_of(test_dir); + build_pkg_at_path(&path) +} + +fn pkg_path_of(pkg_name: &str) -> PathBuf { let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - path.extend(["src", "unit_tests", "data", "move_upgrade", test_dir]); + path.extend(["src", "unit_tests", "data", "move_upgrade", pkg_name]); + path +} + +fn build_pkg_at_path(path: &Path) -> (Vec, Vec>) { let with_unpublished_deps = false; - let package = BuildConfig::new_for_testing().build(&path).unwrap(); + let package = BuildConfig::new_for_testing().build(path).unwrap(); ( package.get_package_digest(with_unpublished_deps).to_vec(), package.get_package_bytes(with_unpublished_deps), @@ -457,6 +513,116 @@ async fn test_upgrade_package_compatible_in_dep_only_mode() { ); } +#[tokio::test] +async fn test_upgrade_package_add_new_module_in_dep_only_mode_pre_v68() { + // Allow new modules in deps-only mode for this test. + let _guard = ProtocolConfig::apply_overrides_for_testing(|_, mut config| { + config.set_disallow_new_modules_in_deps_only_packages_for_testing(false); + config + }); + + let mut runner = UpgradeStateRunner::new("move_upgrade/base").await; + let base_pkg = "dep_only_upgrade"; + assert_valid_dep_only_upgrade(&mut runner, base_pkg).await; + let (digest, modules) = build_upgrade_test_modules_with_overlay( + base_pkg, + FileOverlay::Add { + file_name: "new_module.move", + contents: "module base_addr::new_module;", + }, + ); + let effects = runner + .upgrade( + UpgradePolicy::DEP_ONLY, + digest, + modules, + vec![SUI_FRAMEWORK_PACKAGE_ID, MOVE_STDLIB_PACKAGE_ID], + ) + .await; + + assert!(effects.status().is_ok(), "{:#?}", effects.status()); +} + +#[tokio::test] +async fn test_upgrade_package_invalid_dep_only_upgrade_pre_v68() { + let _guard = ProtocolConfig::apply_overrides_for_testing(|_, mut config| { + config.set_disallow_new_modules_in_deps_only_packages_for_testing(false); + config + }); + + let mut runner = UpgradeStateRunner::new("move_upgrade/base").await; + let base_pkg = "dep_only_upgrade"; + assert_valid_dep_only_upgrade(&mut runner, base_pkg).await; + let overlays = [ + FileOverlay::Add { + file_name: "new_friend_module.move", + contents: r#" +module base_addr::new_friend_module; +public fun friend_call(): u64 { base_addr::base::friend_fun(1) } + "#, + }, + FileOverlay::Remove("friend_module.move"), + ]; + for overlay in overlays { + let (digest, modules) = build_upgrade_test_modules_with_overlay(base_pkg, overlay); + let effects = runner + .upgrade( + UpgradePolicy::DEP_ONLY, + digest, + modules, + vec![SUI_FRAMEWORK_PACKAGE_ID, MOVE_STDLIB_PACKAGE_ID], + ) + .await; + + assert_eq!( + effects.into_status().unwrap_err().0, + ExecutionFailureStatus::PackageUpgradeError { + upgrade_error: PackageUpgradeError::IncompatibleUpgrade + }, + ); + } +} + +#[tokio::test] +async fn test_invalid_dep_only_upgrades() { + let mut runner = UpgradeStateRunner::new("move_upgrade/base").await; + let base_pkg = "dep_only_upgrade"; + assert_valid_dep_only_upgrade(&mut runner, base_pkg).await; + let overlays = [ + FileOverlay::Add { + file_name: "new_module.move", + contents: "module base_addr::new_module;", + }, + FileOverlay::Add { + file_name: "new_friend_module.move", + contents: r#" +module base_addr::new_friend_module; +public fun friend_call(): u64 { base_addr::base::friend_fun(1) } + "#, + }, + FileOverlay::Remove("friend_module.move"), + ]; + + for overlay in overlays { + let (digest, modules) = build_upgrade_test_modules_with_overlay(base_pkg, overlay); + let effects = runner + .upgrade( + UpgradePolicy::DEP_ONLY, + digest, + modules, + vec![SUI_FRAMEWORK_PACKAGE_ID, MOVE_STDLIB_PACKAGE_ID], + ) + .await; + + assert_eq!( + effects.into_status().unwrap_err().0, + ExecutionFailureStatus::PackageUpgradeError { + upgrade_error: PackageUpgradeError::IncompatibleUpgrade + }, + ); + } +} + #[tokio::test] async fn test_upgrade_package_compatible_in_additive_mode() { let mut runner = UpgradeStateRunner::new("move_upgrade/base").await; @@ -572,18 +738,7 @@ async fn test_upgrade_package_additive_dep_only_mode() { #[tokio::test] async fn test_upgrade_package_dep_only_mode() { let mut runner = UpgradeStateRunner::new("move_upgrade/base").await; - - let (digest, modules) = build_upgrade_test_modules("dep_only_upgrade"); - let effects = runner - .upgrade( - UpgradePolicy::DEP_ONLY, - digest, - modules, - vec![SUI_FRAMEWORK_PACKAGE_ID, MOVE_STDLIB_PACKAGE_ID], - ) - .await; - - assert!(effects.status().is_ok(), "{:#?}", effects.status()); + assert_valid_dep_only_upgrade(&mut runner, "dep_only_upgrade").await; } #[tokio::test] @@ -1432,3 +1587,17 @@ async fn test_upgrade_more_than_max_packages_error() { } ); } + +async fn assert_valid_dep_only_upgrade(runner: &mut UpgradeStateRunner, package_name: &str) { + let (digest, modules) = build_upgrade_test_modules(package_name); + let effects = runner + .upgrade( + UpgradePolicy::DEP_ONLY, + digest, + modules, + vec![SUI_FRAMEWORK_PACKAGE_ID, MOVE_STDLIB_PACKAGE_ID], + ) + .await; + + assert!(effects.status().is_ok(), "{:#?}", effects.status()); +} diff --git a/crates/sui-core/src/unit_tests/server_tests.rs b/crates/sui-core/src/unit_tests/server_tests.rs index 0a2e83627b26d..fce54971bc7f3 100644 --- a/crates/sui-core/src/unit_tests/server_tests.rs +++ b/crates/sui-core/src/unit_tests/server_tests.rs @@ -19,13 +19,22 @@ async fn test_simple_request() { let authority_state = init_state_with_object_id(sender, object_id).await; // The following two fields are only needed for shared objects (not by this bench). - let server = AuthorityServer::new_for_test(authority_state); + let server = AuthorityServer::new_for_test(authority_state.clone()); let server_handle = server.spawn_for_test().await.unwrap(); - let client = NetworkAuthorityClient::connect(server_handle.address()) - .await - .unwrap(); + let client = NetworkAuthorityClient::connect( + server_handle.address(), + Some( + authority_state + .config + .network_key_pair() + .public() + .to_owned(), + ), + ) + .await + .unwrap(); let req = ObjectInfoRequest::latest_object_info_request(object_id, LayoutGenerationOption::Generate); diff --git a/crates/sui-core/src/unit_tests/transaction_tests.rs b/crates/sui-core/src/unit_tests/transaction_tests.rs index 053e494af838f..69b4eade03a6c 100644 --- a/crates/sui-core/src/unit_tests/transaction_tests.rs +++ b/crates/sui-core/src/unit_tests/transaction_tests.rs @@ -440,9 +440,18 @@ async fn do_transaction_test_impl( let server_handle = server.spawn_for_test().await.unwrap(); - let client = NetworkAuthorityClient::connect(server_handle.address()) - .await - .unwrap(); + let client = NetworkAuthorityClient::connect( + server_handle.address(), + Some( + authority_state + .config + .network_key_pair() + .public() + .to_owned(), + ), + ) + .await + .unwrap(); post_sign_mutations(&mut transfer_transaction); post_sign_mutations(&mut move_call_transaction); @@ -1033,9 +1042,18 @@ async fn setup_zklogin_network( let server_handle = server.spawn_for_test().await.unwrap(); - let client = NetworkAuthorityClient::connect(server_handle.address()) - .await - .unwrap(); + let client = NetworkAuthorityClient::connect( + server_handle.address(), + Some( + authority_state + .config + .network_key_pair() + .public() + .to_owned(), + ), + ) + .await + .unwrap(); ( object_ids, gas_object_ids, @@ -1326,9 +1344,18 @@ async fn execute_transaction_assert_err( let server_handle = server.spawn_for_test().await.unwrap(); - let client = NetworkAuthorityClient::connect(server_handle.address()) - .await - .unwrap(); + let client = NetworkAuthorityClient::connect( + server_handle.address(), + Some( + authority_state + .config + .network_key_pair() + .public() + .to_owned(), + ), + ) + .await + .unwrap(); let err = client .handle_transaction(txn.clone(), Some(make_socket_addr())) .await; @@ -1378,9 +1405,18 @@ async fn test_oversized_txn() { let server_handle = server.spawn_for_test().await.unwrap(); - let client = NetworkAuthorityClient::connect(server_handle.address()) - .await - .unwrap(); + let client = NetworkAuthorityClient::connect( + server_handle.address(), + Some( + authority_state + .config + .network_key_pair() + .public() + .to_owned(), + ), + ) + .await + .unwrap(); let res = client .handle_transaction(txn, Some(make_socket_addr())) @@ -1429,9 +1465,18 @@ async fn test_very_large_certificate() { let server_handle = server.spawn_for_test().await.unwrap(); - let client = NetworkAuthorityClient::connect(server_handle.address()) - .await - .unwrap(); + let client = NetworkAuthorityClient::connect( + server_handle.address(), + Some( + authority_state + .config + .network_key_pair() + .public() + .to_owned(), + ), + ) + .await + .unwrap(); let socket_addr = make_socket_addr(); let auth_sig = client @@ -1511,9 +1556,18 @@ async fn test_handle_certificate_errors() { let server_handle = server.spawn_for_test().await.unwrap(); - let client = NetworkAuthorityClient::connect(server_handle.address()) - .await - .unwrap(); + let client = NetworkAuthorityClient::connect( + server_handle.address(), + Some( + authority_state + .config + .network_key_pair() + .public() + .to_owned(), + ), + ) + .await + .unwrap(); // Test handle certificate from the wrong epoch let epoch_store = authority_state.epoch_store_for_testing(); @@ -1681,9 +1735,12 @@ async fn test_handle_soft_bundle_certificates() { let server = AuthorityServer::new_for_test_with_consensus_adapter(authority.clone(), adapter); let _metrics = server.metrics.clone(); let server_handle = server.spawn_for_test().await.unwrap(); - let client = NetworkAuthorityClient::connect(server_handle.address()) - .await - .unwrap(); + let client = NetworkAuthorityClient::connect( + server_handle.address(), + Some(authority.config.network_key_pair().public().to_owned()), + ) + .await + .unwrap(); let signed_tx_into_certificate = |transaction: Transaction| async { let epoch_store = authority.load_epoch_store_one_call_per_task(); @@ -1836,9 +1893,12 @@ async fn test_handle_soft_bundle_certificates_errors() { let authority = server.state.clone(); let _metrics = server.metrics.clone(); let server_handle = server.spawn_for_test().await.unwrap(); - let client = NetworkAuthorityClient::connect(server_handle.address()) - .await - .unwrap(); + let client = NetworkAuthorityClient::connect( + server_handle.address(), + Some(authority.config.network_key_pair().public().to_owned()), + ) + .await + .unwrap(); let signed_tx_into_certificate = |transaction: Transaction| async { let epoch_store = authority.load_epoch_store_one_call_per_task(); diff --git a/crates/sui-data-ingestion-core/src/reader.rs b/crates/sui-data-ingestion-core/src/reader.rs index 50a948b8d85d9..54f4c8540e0e3 100644 --- a/crates/sui-data-ingestion-core/src/reader.rs +++ b/crates/sui-data-ingestion-core/src/reader.rs @@ -46,22 +46,25 @@ pub struct CheckpointReader { #[derive(Clone)] pub struct ReaderOptions { - pub tick_interal_ms: u64, + pub tick_internal_ms: u64, pub timeout_secs: u64, /// number of maximum concurrent requests to the remote store. Increase it for backfills pub batch_size: usize, pub data_limit: usize, pub upper_limit: Option, + /// Whether to delete processed checkpoint files from the local directory. + pub gc_checkpoint_files: bool, } impl Default for ReaderOptions { fn default() -> Self { Self { - tick_interal_ms: 100, + tick_internal_ms: 100, timeout_secs: 5, batch_size: 10, data_limit: 0, upper_limit: None, + gc_checkpoint_files: true, } } } @@ -76,25 +79,19 @@ impl CheckpointReader { /// Represents a single iteration of the reader. /// Reads files in a local directory, validates them, and forwards `CheckpointData` to the executor. async fn read_local_files(&self) -> Result>> { - let mut files = vec![]; - for entry in fs::read_dir(self.path.clone())? { - let entry = entry?; - let filename = entry.file_name(); - if let Some(sequence_number) = Self::checkpoint_number_from_file_path(&filename) { - if sequence_number >= self.current_checkpoint_number { - files.push((sequence_number, entry.path())); - } - } - } - files.sort(); - debug!("unprocessed local files {:?}", files); let mut checkpoints = vec![]; - for (_, filename) in files.iter().take(MAX_CHECKPOINTS_IN_PROGRESS) { - let checkpoint = Blob::from_bytes::>(&fs::read(filename)?)?; - if self.exceeds_capacity(checkpoint.checkpoint_summary.sequence_number) { + for offset in 0..MAX_CHECKPOINTS_IN_PROGRESS { + let sequence_number = self.current_checkpoint_number + offset as u64; + if self.exceeds_capacity(sequence_number) { break; } - checkpoints.push(checkpoint); + match fs::read(self.path.join(format!("{}.chk", sequence_number))) { + Ok(bytes) => checkpoints.push(Blob::from_bytes::>(&bytes)?), + Err(err) => match err.kind() { + std::io::ErrorKind::NotFound => break, + _ => Err(err)?, + }, + } } Ok(checkpoints) } @@ -294,9 +291,12 @@ impl CheckpointReader { /// Cleans the local directory by removing all processed checkpoint files. fn gc_processed_files(&mut self, watermark: CheckpointSequenceNumber) -> Result<()> { - info!("cleaning processed files, watermark is {}", watermark); self.data_limiter.gc(watermark); self.last_pruned_watermark = watermark; + if !self.options.gc_checkpoint_files { + return Ok(()); + } + info!("cleaning processed files, watermark is {}", watermark); for entry in fs::read_dir(self.path.clone())? { let entry = entry?; let filename = entry.file_name(); @@ -384,7 +384,7 @@ impl CheckpointReader { Some(gc_checkpoint_number) = self.processed_receiver.recv() => { self.gc_processed_files(gc_checkpoint_number).expect("Failed to clean the directory"); } - Ok(Some(_)) | Err(_) = timeout(Duration::from_millis(self.options.tick_interal_ms), inotify_recv.recv()) => { + Ok(Some(_)) | Err(_) = timeout(Duration::from_millis(self.options.tick_internal_ms), inotify_recv.recv()) => { self.sync().await.expect("Failed to read checkpoint files"); } } diff --git a/crates/sui-data-ingestion-core/src/tests.rs b/crates/sui-data-ingestion-core/src/tests.rs index 4465b0c31c381..4963deeb3b51f 100644 --- a/crates/sui-data-ingestion-core/src/tests.rs +++ b/crates/sui-data-ingestion-core/src/tests.rs @@ -40,7 +40,7 @@ async fn run( duration: Option, ) -> Result { let options = ReaderOptions { - tick_interal_ms: 10, + tick_internal_ms: 10, batch_size: 1, ..Default::default() }; diff --git a/crates/sui-data-ingestion/Cargo.toml b/crates/sui-data-ingestion/Cargo.toml index 2450691f4cdd6..6a80d6c5c6b90 100644 --- a/crates/sui-data-ingestion/Cargo.toml +++ b/crates/sui-data-ingestion/Cargo.toml @@ -31,6 +31,7 @@ tracing.workspace = true sui-archival.workspace = true sui-storage.workspace = true sui-data-ingestion-core.workspace = true +sui-kvstore.workspace = true sui-types.workspace = true tempfile.workspace = true url.workspace = true diff --git a/crates/sui-data-ingestion/src/main.rs b/crates/sui-data-ingestion/src/main.rs index 0a03b9af29591..71e49acb6f154 100644 --- a/crates/sui-data-ingestion/src/main.rs +++ b/crates/sui-data-ingestion/src/main.rs @@ -12,6 +12,7 @@ use sui_data_ingestion::{ }; use sui_data_ingestion_core::{DataIngestionMetrics, ReaderOptions}; use sui_data_ingestion_core::{IndexerExecutor, WorkerPool}; +use sui_kvstore::{BigTableClient, KvWorker}; use tokio::signal; use tokio::sync::oneshot; @@ -21,6 +22,7 @@ enum Task { Archival(ArchivalConfig), Blob(BlobTaskConfig), KV(KVStoreTaskConfig), + BigTableKV(BigTableTaskConfig), } #[derive(Serialize, Deserialize, Clone, Debug)] @@ -40,6 +42,11 @@ struct ProgressStoreConfig { pub table_name: String, } +#[derive(Serialize, Deserialize, Clone, Debug)] +struct BigTableTaskConfig { + instance_id: String, +} + #[derive(Debug, Clone, Serialize, Deserialize)] struct IndexerConfig { path: PathBuf, @@ -146,6 +153,15 @@ async fn main() -> Result<()> { ); executor.register(worker_pool).await?; } + Task::BigTableKV(kv_config) => { + let client = BigTableClient::new_remote(kv_config.instance_id, false, None).await?; + let worker_pool = WorkerPool::new( + KvWorker { client }, + task_config.name, + task_config.concurrency, + ); + executor.register(worker_pool).await?; + } }; } let reader_options = ReaderOptions { diff --git a/crates/sui-deepbook-indexer/src/migrations/00000000000000_diesel_initial_setup/down.sql b/crates/sui-deepbook-indexer/src/migrations/00000000000000_diesel_initial_setup/down.sql index 454dcfecb7003..b86f980c0eacb 100644 --- a/crates/sui-deepbook-indexer/src/migrations/00000000000000_diesel_initial_setup/down.sql +++ b/crates/sui-deepbook-indexer/src/migrations/00000000000000_diesel_initial_setup/down.sql @@ -3,5 +3,12 @@ DROP TABLE IF EXISTS order_updates; DROP TABLE IF EXISTS order_fills; DROP TABLE IF EXISTS flashloans; DROP TABLE IF EXISTS pool_prices; +DROP TABLE IF EXISTS balances; +DROP TABLE IF EXISTS trade_params_update; +DROP TABLE IF EXISTS stakes; +DROP TABLE IF EXISTS proposals; +DROP TABLE IF EXISTS votes; +DROP TABLE IF EXISTS rebates; DROP TABLE IF EXISTS sui_error_transactions; DROP TABLE IF EXISTS progress_store; +DROP TABLE IF EXISTS pools; \ No newline at end of file diff --git a/crates/sui-deepbook-indexer/src/migrations/00000000000000_diesel_initial_setup/up.sql b/crates/sui-deepbook-indexer/src/migrations/00000000000000_diesel_initial_setup/up.sql index 04d9951f6e66a..20f7d6a5172ca 100644 --- a/crates/sui-deepbook-indexer/src/migrations/00000000000000_diesel_initial_setup/up.sql +++ b/crates/sui-deepbook-indexer/src/migrations/00000000000000_diesel_initial_setup/up.sql @@ -2,11 +2,12 @@ CREATE TABLE IF NOT EXISTS order_updates ( - id SERIAL PRIMARY KEY, + event_digest TEXT PRIMARY KEY, digest TEXT NOT NULL, sender TEXT NOT NULL, checkpoint BIGINT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, + checkpoint_timestamp_ms BIGINT NOT NULL, package TEXT NOT NULL, status TEXT NOT NULL, pool_id TEXT NOT NULL, @@ -24,11 +25,12 @@ CREATE TABLE IF NOT EXISTS order_updates CREATE TABLE IF NOT EXISTS order_fills ( - id SERIAL PRIMARY KEY, + event_digest TEXT PRIMARY KEY, digest TEXT NOT NULL, sender TEXT NOT NULL, checkpoint BIGINT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, + checkpoint_timestamp_ms BIGINT NOT NULL, package TEXT NOT NULL, pool_id TEXT NOT NULL, maker_order_id TEXT NOT NULL, @@ -50,11 +52,12 @@ CREATE TABLE IF NOT EXISTS order_fills CREATE TABLE IF NOT EXISTS flashloans ( - id SERIAL PRIMARY KEY, + event_digest TEXT PRIMARY KEY, digest TEXT NOT NULL, sender TEXT NOT NULL, checkpoint BIGINT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, + checkpoint_timestamp_ms BIGINT NOT NULL, package TEXT NOT NULL, borrow BOOLEAN NOT NULL, pool_id TEXT NOT NULL, @@ -64,11 +67,12 @@ CREATE TABLE IF NOT EXISTS flashloans CREATE TABLE IF NOT EXISTS pool_prices ( - id SERIAL PRIMARY KEY, + event_digest TEXT PRIMARY KEY, digest TEXT NOT NULL, sender TEXT NOT NULL, checkpoint BIGINT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, + checkpoint_timestamp_ms BIGINT NOT NULL, package TEXT NOT NULL, target_pool TEXT NOT NULL, reference_pool TEXT NOT NULL, @@ -77,11 +81,12 @@ CREATE TABLE IF NOT EXISTS pool_prices CREATE TABLE IF NOT EXISTS balances ( - id SERIAL PRIMARY KEY, + event_digest TEXT PRIMARY KEY, digest TEXT NOT NULL, sender TEXT NOT NULL, checkpoint BIGINT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, + checkpoint_timestamp_ms BIGINT NOT NULL, package TEXT NOT NULL, balance_manager_id TEXT NOT NULL, asset TEXT NOT NULL, @@ -91,11 +96,12 @@ CREATE TABLE IF NOT EXISTS balances CREATE TABLE IF NOT EXISTS trade_params_update ( - id SERIAL PRIMARY KEY, + event_digest TEXT PRIMARY KEY, digest TEXT NOT NULL, sender TEXT NOT NULL, checkpoint BIGINT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, + checkpoint_timestamp_ms BIGINT NOT NULL, package TEXT NOT NULL, pool_id TEXT NOT NULL, taker_fee BIGINT NOT NULL, @@ -105,11 +111,12 @@ CREATE TABLE IF NOT EXISTS trade_params_update CREATE TABLE IF NOT EXISTS stakes ( - id SERIAL PRIMARY KEY, + event_digest TEXT PRIMARY KEY, digest TEXT NOT NULL, sender TEXT NOT NULL, checkpoint BIGINT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, + checkpoint_timestamp_ms BIGINT NOT NULL, package TEXT NOT NULL, pool_id TEXT NOT NULL, balance_manager_id TEXT NOT NULL, @@ -120,11 +127,12 @@ CREATE TABLE IF NOT EXISTS stakes CREATE TABLE IF NOT EXISTS proposals ( - id SERIAL PRIMARY KEY, + event_digest TEXT PRIMARY KEY, digest TEXT NOT NULL, sender TEXT NOT NULL, checkpoint BIGINT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, + checkpoint_timestamp_ms BIGINT NOT NULL, package TEXT NOT NULL, pool_id TEXT NOT NULL, balance_manager_id TEXT NOT NULL, @@ -136,11 +144,12 @@ CREATE TABLE IF NOT EXISTS proposals CREATE TABLE IF NOT EXISTS votes ( - id SERIAL PRIMARY KEY, + event_digest TEXT PRIMARY KEY, digest TEXT NOT NULL, sender TEXT NOT NULL, checkpoint BIGINT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, + checkpoint_timestamp_ms BIGINT NOT NULL, package TEXT NOT NULL, pool_id TEXT NOT NULL, balance_manager_id TEXT NOT NULL, @@ -152,11 +161,12 @@ CREATE TABLE IF NOT EXISTS votes CREATE TABLE IF NOT EXISTS rebates ( - id SERIAL PRIMARY KEY, + event_digest TEXT PRIMARY KEY, digest TEXT NOT NULL, sender TEXT NOT NULL, checkpoint BIGINT NOT NULL, timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL, + checkpoint_timestamp_ms BIGINT NOT NULL, package TEXT NOT NULL, pool_id TEXT NOT NULL, balance_manager_id TEXT NOT NULL, diff --git a/crates/sui-deepbook-indexer/src/models.rs b/crates/sui-deepbook-indexer/src/models.rs index d0d57671e3bce..a6e30debcef38 100644 --- a/crates/sui-deepbook-indexer/src/models.rs +++ b/crates/sui-deepbook-indexer/src/models.rs @@ -2,22 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 use diesel::data_types::PgTimestamp; -use diesel::{Identifiable, Insertable, Queryable, Selectable}; +use diesel::{Identifiable, Insertable, Queryable, QueryableByName, Selectable}; use serde::Serialize; use sui_indexer_builder::{Task, LIVE_TASK_TARGET_CHECKPOINT}; use crate::schema::{ - balances, flashloans, order_fills, order_updates, pool_prices, pools, progress_store, - proposals, rebates, stakes, sui_error_transactions, trade_params_update, votes, + balances, balances_summary, flashloans, order_fills, order_updates, pool_prices, pools, + progress_store, proposals, rebates, stakes, sui_error_transactions, trade_params_update, votes, }; #[derive(Queryable, Selectable, Insertable, Identifiable, Debug)] -#[diesel(table_name = order_updates, primary_key(digest))] +#[diesel(table_name = order_updates, primary_key(event_digest))] pub struct OrderUpdate { + pub event_digest: String, pub digest: String, pub sender: String, pub checkpoint: i64, + pub checkpoint_timestamp_ms: i64, pub package: String, pub status: String, pub pool_id: String, @@ -34,11 +36,13 @@ pub struct OrderUpdate { } #[derive(Queryable, Selectable, Insertable, Identifiable, Debug)] -#[diesel(table_name = order_fills, primary_key(digest))] +#[diesel(table_name = order_fills, primary_key(event_digest))] pub struct OrderFill { + pub event_digest: String, pub digest: String, pub sender: String, pub checkpoint: i64, + pub checkpoint_timestamp_ms: i64, pub package: String, pub pool_id: String, pub maker_order_id: String, // u128 @@ -65,12 +69,22 @@ pub struct OrderFillSummary { pub base_quantity: i64, } +#[derive(QueryableByName, Debug, Serialize)] +#[diesel(table_name = balances_summary)] +pub struct BalancesSummary { + pub asset: String, + pub amount: i64, + pub deposit: bool, +} + #[derive(Queryable, Selectable, Insertable, Identifiable, Debug)] -#[diesel(table_name = flashloans, primary_key(digest))] +#[diesel(table_name = flashloans, primary_key(event_digest))] pub struct Flashloan { + pub event_digest: String, pub digest: String, pub sender: String, pub checkpoint: i64, + pub checkpoint_timestamp_ms: i64, pub package: String, pub pool_id: String, pub borrow_quantity: i64, @@ -79,11 +93,13 @@ pub struct Flashloan { } #[derive(Queryable, Selectable, Insertable, Identifiable, Debug)] -#[diesel(table_name = pool_prices, primary_key(digest))] +#[diesel(table_name = pool_prices, primary_key(event_digest))] pub struct PoolPrice { + pub event_digest: String, pub digest: String, pub sender: String, pub checkpoint: i64, + pub checkpoint_timestamp_ms: i64, pub package: String, pub target_pool: String, pub reference_pool: String, @@ -91,11 +107,13 @@ pub struct PoolPrice { } #[derive(Queryable, Selectable, Insertable, Identifiable, Debug)] -#[diesel(table_name = balances, primary_key(digest))] +#[diesel(table_name = balances, primary_key(event_digest))] pub struct Balances { + pub event_digest: String, pub digest: String, pub sender: String, pub checkpoint: i64, + pub checkpoint_timestamp_ms: i64, pub package: String, pub balance_manager_id: String, pub asset: String, @@ -104,12 +122,15 @@ pub struct Balances { } #[derive(Queryable, Selectable, Insertable, Identifiable, Debug)] -#[diesel(table_name = proposals, primary_key(digest))] +#[diesel(table_name = proposals, primary_key(event_digest))] pub struct Proposals { + pub event_digest: String, pub digest: String, pub sender: String, pub checkpoint: i64, + pub checkpoint_timestamp_ms: i64, pub package: String, + pub pool_id: String, pub balance_manager_id: String, pub epoch: i64, pub taker_fee: i64, @@ -118,11 +139,13 @@ pub struct Proposals { } #[derive(Queryable, Selectable, Insertable, Identifiable, Debug)] -#[diesel(table_name = rebates, primary_key(digest))] +#[diesel(table_name = rebates, primary_key(event_digest))] pub struct Rebates { + pub event_digest: String, pub digest: String, pub sender: String, pub checkpoint: i64, + pub checkpoint_timestamp_ms: i64, pub package: String, pub pool_id: String, pub balance_manager_id: String, @@ -131,11 +154,13 @@ pub struct Rebates { } #[derive(Queryable, Selectable, Insertable, Identifiable, Debug)] -#[diesel(table_name = stakes, primary_key(digest))] +#[diesel(table_name = stakes, primary_key(event_digest))] pub struct Stakes { + pub event_digest: String, pub digest: String, pub sender: String, pub checkpoint: i64, + pub checkpoint_timestamp_ms: i64, pub package: String, pub pool_id: String, pub balance_manager_id: String, @@ -145,11 +170,13 @@ pub struct Stakes { } #[derive(Queryable, Selectable, Insertable, Identifiable, Debug)] -#[diesel(table_name = trade_params_update, primary_key(digest))] +#[diesel(table_name = trade_params_update, primary_key(event_digest))] pub struct TradeParamsUpdate { + pub event_digest: String, pub digest: String, pub sender: String, pub checkpoint: i64, + pub checkpoint_timestamp_ms: i64, pub package: String, pub pool_id: String, pub taker_fee: i64, @@ -158,11 +185,13 @@ pub struct TradeParamsUpdate { } #[derive(Queryable, Selectable, Insertable, Identifiable, Debug)] -#[diesel(table_name = votes, primary_key(digest))] +#[diesel(table_name = votes, primary_key(event_digest))] pub struct Votes { + pub event_digest: String, pub digest: String, pub sender: String, pub checkpoint: i64, + pub checkpoint_timestamp_ms: i64, pub package: String, pub pool_id: String, pub balance_manager_id: String, diff --git a/crates/sui-deepbook-indexer/src/schema.rs b/crates/sui-deepbook-indexer/src/schema.rs index 61abd7987b533..849dd530c28f7 100644 --- a/crates/sui-deepbook-indexer/src/schema.rs +++ b/crates/sui-deepbook-indexer/src/schema.rs @@ -3,12 +3,13 @@ // @generated automatically by Diesel CLI. diesel::table! { - balances (id) { - id -> Int4, + balances (event_digest) { + event_digest -> Text, digest -> Text, sender -> Text, checkpoint -> Int8, timestamp -> Timestamp, + checkpoint_timestamp_ms -> Int8, package -> Text, balance_manager_id -> Text, asset -> Text, @@ -18,12 +19,13 @@ diesel::table! { } diesel::table! { - flashloans (id) { - id -> Int4, + flashloans (event_digest) { + event_digest -> Text, digest -> Text, sender -> Text, checkpoint -> Int8, timestamp -> Timestamp, + checkpoint_timestamp_ms -> Int8, package -> Text, borrow -> Bool, pool_id -> Text, @@ -33,12 +35,13 @@ diesel::table! { } diesel::table! { - order_fills (id) { - id -> Int4, + order_fills (event_digest) { + event_digest -> Text, digest -> Text, sender -> Text, checkpoint -> Int8, timestamp -> Timestamp, + checkpoint_timestamp_ms -> Int8, package -> Text, pool_id -> Text, maker_order_id -> Text, @@ -60,12 +63,13 @@ diesel::table! { } diesel::table! { - order_updates (id) { - id -> Int4, + order_updates (event_digest) { + event_digest -> Text, digest -> Text, sender -> Text, checkpoint -> Int8, timestamp -> Timestamp, + checkpoint_timestamp_ms -> Int8, package -> Text, status -> Text, pool_id -> Text, @@ -83,12 +87,13 @@ diesel::table! { } diesel::table! { - pool_prices (id) { - id -> Int4, + pool_prices (event_digest) { + event_digest -> Text, digest -> Text, sender -> Text, checkpoint -> Int8, timestamp -> Timestamp, + checkpoint_timestamp_ms -> Int8, package -> Text, target_pool -> Text, reference_pool -> Text, @@ -124,12 +129,13 @@ diesel::table! { } diesel::table! { - proposals (id) { - id -> Int4, + proposals (event_digest) { + event_digest -> Text, digest -> Text, sender -> Text, checkpoint -> Int8, timestamp -> Timestamp, + checkpoint_timestamp_ms -> Int8, package -> Text, pool_id -> Text, balance_manager_id -> Text, @@ -141,12 +147,13 @@ diesel::table! { } diesel::table! { - rebates (id) { - id -> Int4, + rebates (event_digest) { + event_digest -> Text, digest -> Text, sender -> Text, checkpoint -> Int8, timestamp -> Timestamp, + checkpoint_timestamp_ms -> Int8, package -> Text, pool_id -> Text, balance_manager_id -> Text, @@ -156,12 +163,13 @@ diesel::table! { } diesel::table! { - stakes (id) { - id -> Int4, + stakes (event_digest) { + event_digest -> Text, digest -> Text, sender -> Text, checkpoint -> Int8, timestamp -> Timestamp, + checkpoint_timestamp_ms -> Int8, package -> Text, pool_id -> Text, balance_manager_id -> Text, @@ -184,12 +192,13 @@ diesel::table! { } diesel::table! { - trade_params_update (id) { - id -> Int4, + trade_params_update (event_digest) { + event_digest -> Text, digest -> Text, sender -> Text, checkpoint -> Int8, timestamp -> Timestamp, + checkpoint_timestamp_ms -> Int8, package -> Text, pool_id -> Text, taker_fee -> Int8, @@ -199,12 +208,13 @@ diesel::table! { } diesel::table! { - votes (id) { - id -> Int4, + votes (event_digest) { + event_digest -> Text, digest -> Text, sender -> Text, checkpoint -> Int8, timestamp -> Timestamp, + checkpoint_timestamp_ms -> Int8, package -> Text, pool_id -> Text, balance_manager_id -> Text, @@ -230,3 +240,11 @@ diesel::allow_tables_to_appear_in_same_query!( trade_params_update, votes, ); + +diesel::table! { + balances_summary (asset) { + asset -> Text, + amount -> Int8, + deposit -> Bool, + } +} diff --git a/crates/sui-deepbook-indexer/src/server.rs b/crates/sui-deepbook-indexer/src/server.rs index 1f2a5a9985be6..0d2b1ff85092f 100644 --- a/crates/sui-deepbook-indexer/src/server.rs +++ b/crates/sui-deepbook-indexer/src/server.rs @@ -3,8 +3,8 @@ use crate::{ error::DeepBookError, - models::{OrderFillSummary, Pools}, - schema, + models::{BalancesSummary, OrderFillSummary, Pools}, + schema::{self}, sui_deepbook_indexer::PgDeepbookPersistent, }; use axum::{ @@ -18,14 +18,18 @@ use diesel::BoolExpressionMethods; use diesel::QueryDsl; use diesel::{ExpressionMethods, SelectableHelper}; use diesel_async::RunQueryDsl; -use std::net::SocketAddr; use std::time::{SystemTime, UNIX_EPOCH}; +use std::{collections::HashMap, net::SocketAddr}; use tokio::{net::TcpListener, task::JoinHandle}; pub const GET_POOLS_PATH: &str = "/get_pools"; -pub const GET_24HR_VOLUME_PATH: &str = "/get_24hr_volume/:pool_id"; +pub const GET_24HR_VOLUME_PATH: &str = "/get_24hr_volume/:pool_ids"; pub const GET_24HR_VOLUME_BY_BALANCE_MANAGER_ID: &str = "/get_24hr_volume_by_balance_manager_id/:pool_id/:balance_manager_id"; +pub const GET_HISTORICAL_VOLUME_PATH: &str = + "/get_historical_volume/:pool_ids/:start_time/:end_time"; +pub const GET_NET_DEPOSITS: &str = "/get_net_deposits/:asset_ids/:timestamp"; +pub const GET_MANAGER_BALANCE: &str = "/get_manager_balance/:manager_id"; pub fn run_server(socket_address: SocketAddr, state: PgDeepbookPersistent) -> JoinHandle<()> { tokio::spawn(async move { @@ -39,10 +43,13 @@ pub(crate) fn make_router(state: PgDeepbookPersistent) -> Router { .route("/", get(health_check)) .route(GET_POOLS_PATH, get(get_pools)) .route(GET_24HR_VOLUME_PATH, get(get_24hr_volume)) + .route(GET_HISTORICAL_VOLUME_PATH, get(get_historical_volume)) .route( GET_24HR_VOLUME_BY_BALANCE_MANAGER_ID, get(get_24hr_volume_by_balance_manager_id), ) + .route(GET_MANAGER_BALANCE, get(get_manager_balance)) + .route(GET_NET_DEPOSITS, get(get_net_deposits)) .with_state(state) } @@ -85,22 +92,61 @@ async fn get_pools( } async fn get_24hr_volume( - Path(pool_id): Path, + Path(pool_ids): Path, State(state): State, -) -> Result, DeepBookError> { +) -> Result>, DeepBookError> { let connection = &mut state.pool.get().await?; let unix_ts = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() .as_millis() as i64; let day_ago = unix_ts - 24 * 60 * 60 * 1000; - let vols: Vec = schema::order_fills::table - .select(schema::order_fills::base_quantity) - .filter(schema::order_fills::pool_id.eq(pool_id)) + + let pool_ids_list: Vec = pool_ids.split(',').map(|s| s.to_string()).collect(); + + let results: Vec<(String, i64)> = schema::order_fills::table + .select(( + schema::order_fills::pool_id, + schema::order_fills::base_quantity, + )) + .filter(schema::order_fills::pool_id.eq_any(pool_ids_list)) .filter(schema::order_fills::onchain_timestamp.gt(day_ago)) .load(connection) .await?; - Ok(Json(vols.into_iter().map(|v| v as u64).sum())) + + let mut volume_by_pool = HashMap::new(); + for (pool_id, volume) in results { + *volume_by_pool.entry(pool_id).or_insert(0) += volume as u64; + } + + Ok(Json(volume_by_pool)) +} + +async fn get_historical_volume( + Path((pool_ids, start_time, end_time)): Path<(String, i64, i64)>, + State(state): State, +) -> Result>, DeepBookError> { + let connection = &mut state.pool.get().await?; + + let pool_ids_list: Vec = pool_ids.split(',').map(|s| s.to_string()).collect(); + + let results: Vec<(String, i64)> = schema::order_fills::table + .select(( + schema::order_fills::pool_id, + schema::order_fills::base_quantity, + )) + .filter(schema::order_fills::pool_id.eq_any(pool_ids_list)) + .filter(schema::order_fills::onchain_timestamp.between(start_time, end_time)) + .load(connection) + .await?; + + // Aggregate volume by pool + let mut volume_by_pool = HashMap::new(); + for (pool_id, volume) in results { + *volume_by_pool.entry(pool_id).or_insert(0) += volume as u64; + } + + Ok(Json(volume_by_pool)) } async fn get_24hr_volume_by_balance_manager_id( @@ -142,3 +188,71 @@ async fn get_24hr_volume_by_balance_manager_id( Ok(Json(vec![maker_vol, taker_vol])) } + +async fn get_manager_balance( + Path(manager_id): Path, + State(state): State, +) -> Result>, DeepBookError> { + let connection = &mut state.pool.get().await?; + + // Query to get the balance for all assets for the specified manager_id + let query = format!( + "SELECT asset, SUM(CASE WHEN deposit THEN amount ELSE -amount END)::bigint AS amount, deposit FROM balances \ + WHERE balance_manager_id = '{}' GROUP BY asset, deposit", + manager_id + ); + + let results: Vec = diesel::sql_query(query).load(connection).await?; + + // Aggregate results into a HashMap as {asset: balance} + let mut manager_balances = HashMap::new(); + for result in results { + let mut asset = result.asset; + if !asset.starts_with("0x") { + asset.insert_str(0, "0x"); + } + manager_balances.insert(asset, result.amount); + } + + Ok(Json(manager_balances)) +} + +#[debug_handler] +async fn get_net_deposits( + Path((asset_ids, timestamp)): Path<(String, String)>, + State(state): State, +) -> Result>, DeepBookError> { + let connection = &mut state.pool.get().await?; + let mut query = + "SELECT asset, SUM(amount)::bigint AS amount, deposit FROM balances WHERE checkpoint_timestamp_ms < " + .to_string(); + query.push_str(×tamp); + query.push_str("000 AND asset in ("); + for asset in asset_ids.split(",") { + if asset.starts_with("0x") { + let len = asset.len(); + query.push_str(&format!("'{}',", &asset[2..len])); + } else { + query.push_str(&format!("'{}',", asset)); + } + } + query.pop(); + query.push_str(") GROUP BY asset, deposit"); + + let results: Vec = diesel::sql_query(query).load(connection).await?; + let mut net_deposits = HashMap::new(); + for result in results { + let mut asset = result.asset; + if !asset.starts_with("0x") { + asset.insert_str(0, "0x"); + } + let amount = result.amount; + if result.deposit { + *net_deposits.entry(asset).or_insert(0) += amount; + } else { + *net_deposits.entry(asset).or_insert(0) -= amount; + } + } + + Ok(Json(net_deposits)) +} diff --git a/crates/sui-deepbook-indexer/src/sui_deepbook_indexer.rs b/crates/sui-deepbook-indexer/src/sui_deepbook_indexer.rs index 92eac3b0ad449..0e71f8040ba2a 100644 --- a/crates/sui-deepbook-indexer/src/sui_deepbook_indexer.rs +++ b/crates/sui-deepbook-indexer/src/sui_deepbook_indexer.rs @@ -318,19 +318,23 @@ impl DataMapper for SuiDeepBookDataMapper { match &data.events { Some(events) => { let processed_sui_events = - events.data.iter().try_fold(vec![], |mut result, ev| { - if let Some(data) = process_sui_event( - ev, - &data, - checkpoint_num, - // timestamp_ms, - self.package_id, - )? { - result.push(data); - } - Ok::<_, anyhow::Error>(result) - })?; - + events + .data + .iter() + .enumerate() + .try_fold(vec![], |mut result, (i, ev)| { + if let Some(data) = process_sui_event( + ev, + i, + &data, + checkpoint_num, + timestamp_ms, + self.package_id, + )? { + result.push(data); + } + Ok::<_, anyhow::Error>(result) + })?; if !processed_sui_events.is_empty() { info!( "SUI: Extracted {} deepbook data entries for tx {}.", @@ -367,9 +371,10 @@ impl DataMapper for SuiDeepBookDataMapper { fn process_sui_event( ev: &Event, + event_index: usize, tx: &CheckpointTransaction, checkpoint: u64, - // timestamp_ms: u64, + checkpoint_timestamp_ms: u64, package_id: ObjectID, ) -> Result, anyhow::Error> { Ok(if ev.type_.address == *package_id { @@ -383,10 +388,14 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::OrderUpdate(OrderUpdate { digest: tx.transaction.digest().to_string(), sender: tx.transaction.sender_address().to_string(), + event_digest, checkpoint, + checkpoint_timestamp_ms, package, status: OrderUpdateStatus::Placed, pool_id: move_event.pool_id.to_string(), @@ -414,10 +423,14 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::OrderUpdate(OrderUpdate { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, status: OrderUpdateStatus::Modified, pool_id: move_event.pool_id.to_string(), @@ -445,10 +458,14 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::OrderUpdate(OrderUpdate { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, status: OrderUpdateStatus::Canceled, pool_id: move_event.pool_id.to_string(), @@ -477,10 +494,14 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::OrderUpdate(OrderUpdate { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, status: OrderUpdateStatus::Expired, pool_id: move_event.pool_id.to_string(), @@ -509,10 +530,14 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::OrderFill(OrderFill { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, pool_id: move_event.pool_id.to_string(), maker_order_id: move_event.maker_order_id, @@ -544,10 +569,14 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::Flashloan(Flashloan { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, pool_id: move_event.pool_id.to_string(), borrow_quantity: move_event.borrow_quantity, @@ -567,10 +596,14 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::PoolPrice(PoolPrice { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, target_pool: move_event.target_pool.to_string(), conversion_rate: move_event.conversion_rate, @@ -589,10 +622,14 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::Balances(Balances { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, balance_manager_id: move_event.balance_manager_id.to_string(), asset: move_event.asset.to_string(), @@ -612,11 +649,16 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::Proposals(Proposals { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, + pool_id: move_event.pool_id.to_string(), balance_manager_id: move_event.balance_manager_id.to_string(), epoch: move_event.epoch, taker_fee: move_event.taker_fee, @@ -636,10 +678,14 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::Rebates(Rebates { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, pool_id: move_event.pool_id.to_string(), balance_manager_id: move_event.balance_manager_id.to_string(), @@ -659,10 +705,14 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::Stakes(Stakes { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, pool_id: move_event.pool_id.to_string(), balance_manager_id: move_event.balance_manager_id.to_string(), @@ -683,6 +733,8 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let shared_objects = &tx.input_objects; let mut pool_id = "0x0".to_string(); for obj in shared_objects.iter() { @@ -697,8 +749,10 @@ fn process_sui_event( } let txn_data = Some(ProcessedTxnData::TradeParamsUpdate(TradeParamsUpdate { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, pool_id, taker_fee: move_event.taker_fee, @@ -718,10 +772,14 @@ fn process_sui_event( } else { "".to_string() }; + let mut event_digest = tx.transaction.digest().to_string(); + event_digest.push_str(&event_index.to_string()); let txn_data = Some(ProcessedTxnData::Votes(Votes { digest: tx.transaction.digest().to_string(), + event_digest, sender: tx.transaction.sender_address().to_string(), checkpoint, + checkpoint_timestamp_ms, package, pool_id: move_event.pool_id.to_string(), balance_manager_id: move_event.balance_manager_id.to_string(), diff --git a/crates/sui-deepbook-indexer/src/types.rs b/crates/sui-deepbook-indexer/src/types.rs index 251299f0b4870..c17fbe01dc7af 100644 --- a/crates/sui-deepbook-indexer/src/types.rs +++ b/crates/sui-deepbook-indexer/src/types.rs @@ -55,9 +55,11 @@ impl Display for OrderUpdateStatus { #[derive(Clone, Debug)] pub struct OrderUpdate { - pub(crate) digest: String, + pub digest: String, + pub(crate) event_digest: String, pub(crate) sender: String, pub(crate) checkpoint: u64, + pub(crate) checkpoint_timestamp_ms: u64, pub(crate) package: String, pub(crate) status: OrderUpdateStatus, pub(crate) pool_id: String, @@ -77,8 +79,10 @@ impl OrderUpdate { pub(crate) fn to_db(&self) -> DBOrderUpdate { DBOrderUpdate { digest: self.digest.clone(), + event_digest: self.event_digest.clone(), sender: self.sender.clone(), checkpoint: self.checkpoint as i64, + checkpoint_timestamp_ms: self.checkpoint_timestamp_ms as i64, package: self.package.clone(), status: self.status.clone().to_string(), pool_id: self.pool_id.clone(), @@ -99,8 +103,10 @@ impl OrderUpdate { #[derive(Clone, Debug)] pub struct OrderFill { pub(crate) digest: String, + pub(crate) event_digest: String, pub(crate) sender: String, pub(crate) checkpoint: u64, + pub(crate) checkpoint_timestamp_ms: u64, pub(crate) package: String, pub(crate) pool_id: String, pub(crate) maker_order_id: u128, @@ -124,8 +130,10 @@ impl OrderFill { pub(crate) fn to_db(&self) -> DBOrderFill { DBOrderFill { digest: self.digest.clone(), + event_digest: self.event_digest.clone(), sender: self.sender.clone(), checkpoint: self.checkpoint as i64, + checkpoint_timestamp_ms: self.checkpoint_timestamp_ms as i64, package: self.package.clone(), pool_id: self.pool_id.clone(), maker_order_id: BigDecimal::from(self.maker_order_id).to_string(), @@ -150,8 +158,10 @@ impl OrderFill { #[derive(Clone, Debug)] pub struct Flashloan { pub(crate) digest: String, + pub(crate) event_digest: String, pub(crate) sender: String, pub(crate) checkpoint: u64, + pub(crate) checkpoint_timestamp_ms: u64, pub(crate) package: String, pub(crate) borrow: bool, pub(crate) pool_id: String, @@ -163,8 +173,10 @@ impl Flashloan { pub(crate) fn to_db(&self) -> DBFlashloan { DBFlashloan { digest: self.digest.clone(), + event_digest: self.event_digest.clone(), sender: self.sender.clone(), checkpoint: self.checkpoint as i64, + checkpoint_timestamp_ms: self.checkpoint_timestamp_ms as i64, package: self.package.clone(), borrow: self.borrow, pool_id: self.pool_id.clone(), @@ -177,8 +189,10 @@ impl Flashloan { #[derive(Clone, Debug)] pub struct PoolPrice { pub(crate) digest: String, + pub(crate) event_digest: String, pub(crate) sender: String, pub(crate) checkpoint: u64, + pub(crate) checkpoint_timestamp_ms: u64, pub(crate) package: String, pub(crate) target_pool: String, pub(crate) reference_pool: String, @@ -189,8 +203,10 @@ impl PoolPrice { pub(crate) fn to_db(&self) -> DBPoolPrice { DBPoolPrice { digest: self.digest.clone(), + event_digest: self.event_digest.clone(), sender: self.sender.clone(), checkpoint: self.checkpoint as i64, + checkpoint_timestamp_ms: self.checkpoint_timestamp_ms as i64, package: self.package.clone(), target_pool: self.target_pool.clone(), reference_pool: self.reference_pool.clone(), @@ -201,22 +217,26 @@ impl PoolPrice { #[derive(Clone, Debug)] pub struct Balances { - pub digest: String, - pub sender: String, - pub checkpoint: u64, - pub package: String, - pub balance_manager_id: String, - pub asset: String, - pub amount: u64, - pub deposit: bool, + pub(crate) digest: String, + pub(crate) event_digest: String, + pub(crate) sender: String, + pub(crate) checkpoint: u64, + pub(crate) checkpoint_timestamp_ms: u64, + pub(crate) package: String, + pub(crate) balance_manager_id: String, + pub(crate) asset: String, + pub(crate) amount: u64, + pub(crate) deposit: bool, } impl Balances { pub(crate) fn to_db(&self) -> DBBalances { DBBalances { digest: self.digest.clone(), + event_digest: self.event_digest.clone(), sender: self.sender.clone(), checkpoint: self.checkpoint as i64, + checkpoint_timestamp_ms: self.checkpoint_timestamp_ms as i64, package: self.package.clone(), balance_manager_id: self.balance_manager_id.clone(), asset: self.asset.clone(), @@ -229,9 +249,12 @@ impl Balances { #[derive(Clone, Debug)] pub struct Proposals { pub(crate) digest: String, + pub(crate) event_digest: String, pub(crate) sender: String, pub(crate) checkpoint: u64, + pub(crate) checkpoint_timestamp_ms: u64, pub(crate) package: String, + pub(crate) pool_id: String, pub(crate) balance_manager_id: String, pub(crate) epoch: u64, pub(crate) taker_fee: u64, @@ -243,9 +266,12 @@ impl Proposals { pub(crate) fn to_db(&self) -> DBProposals { DBProposals { digest: self.digest.clone(), + event_digest: self.event_digest.clone(), sender: self.sender.clone(), checkpoint: self.checkpoint as i64, + checkpoint_timestamp_ms: self.checkpoint_timestamp_ms as i64, package: self.package.clone(), + pool_id: self.pool_id.clone(), balance_manager_id: self.balance_manager_id.clone(), epoch: self.epoch as i64, taker_fee: self.taker_fee as i64, @@ -258,8 +284,10 @@ impl Proposals { #[derive(Clone, Debug)] pub struct Rebates { pub(crate) digest: String, + pub(crate) event_digest: String, pub(crate) sender: String, pub(crate) checkpoint: u64, + pub(crate) checkpoint_timestamp_ms: u64, pub(crate) package: String, pub(crate) pool_id: String, pub(crate) balance_manager_id: String, @@ -271,8 +299,10 @@ impl Rebates { pub(crate) fn to_db(&self) -> DBRebates { DBRebates { digest: self.digest.clone(), + event_digest: self.event_digest.clone(), sender: self.sender.clone(), checkpoint: self.checkpoint as i64, + checkpoint_timestamp_ms: self.checkpoint_timestamp_ms as i64, package: self.package.clone(), pool_id: self.pool_id.clone(), balance_manager_id: self.balance_manager_id.clone(), @@ -285,8 +315,10 @@ impl Rebates { #[derive(Clone, Debug)] pub struct Stakes { pub(crate) digest: String, + pub(crate) event_digest: String, pub(crate) sender: String, pub(crate) checkpoint: u64, + pub(crate) checkpoint_timestamp_ms: u64, pub(crate) package: String, pub(crate) pool_id: String, pub(crate) balance_manager_id: String, @@ -299,8 +331,10 @@ impl Stakes { pub(crate) fn to_db(&self) -> DBStakes { DBStakes { digest: self.digest.clone(), + event_digest: self.event_digest.clone(), sender: self.sender.clone(), checkpoint: self.checkpoint as i64, + checkpoint_timestamp_ms: self.checkpoint_timestamp_ms as i64, package: self.package.clone(), pool_id: self.pool_id.clone(), balance_manager_id: self.balance_manager_id.clone(), @@ -314,8 +348,10 @@ impl Stakes { #[derive(Clone, Debug)] pub struct TradeParamsUpdate { pub(crate) digest: String, + pub(crate) event_digest: String, pub(crate) sender: String, pub(crate) checkpoint: u64, + pub(crate) checkpoint_timestamp_ms: u64, pub(crate) package: String, pub(crate) pool_id: String, pub(crate) taker_fee: u64, @@ -327,8 +363,10 @@ impl TradeParamsUpdate { pub(crate) fn to_db(&self) -> DBTradeParamsUpdate { DBTradeParamsUpdate { digest: self.digest.clone(), + event_digest: self.event_digest.clone(), sender: self.sender.clone(), checkpoint: self.checkpoint as i64, + checkpoint_timestamp_ms: self.checkpoint_timestamp_ms as i64, package: self.package.clone(), pool_id: self.pool_id.clone(), taker_fee: self.taker_fee as i64, @@ -341,8 +379,10 @@ impl TradeParamsUpdate { #[derive(Clone, Debug)] pub struct Votes { pub(crate) digest: String, + pub(crate) event_digest: String, pub(crate) sender: String, pub(crate) checkpoint: u64, + pub(crate) checkpoint_timestamp_ms: u64, pub(crate) package: String, pub(crate) pool_id: String, pub(crate) balance_manager_id: String, @@ -356,8 +396,10 @@ impl Votes { pub(crate) fn to_db(&self) -> DBVotes { DBVotes { digest: self.digest.clone(), + event_digest: self.event_digest.clone(), sender: self.sender.clone(), checkpoint: self.checkpoint as i64, + checkpoint_timestamp_ms: self.checkpoint_timestamp_ms as i64, package: self.package.clone(), pool_id: self.pool_id.clone(), balance_manager_id: self.balance_manager_id.clone(), diff --git a/crates/sui-e2e-tests/Cargo.toml b/crates/sui-e2e-tests/Cargo.toml index 9d4370817302d..7cfa4e5d1353b 100644 --- a/crates/sui-e2e-tests/Cargo.toml +++ b/crates/sui-e2e-tests/Cargo.toml @@ -31,6 +31,8 @@ anyhow.workspace = true async-trait.workspace = true clap.workspace = true serde_json.workspace = true +reqwest.workspace = true +prost.workspace = true move-binary-format.workspace = true move-package.workspace = true @@ -39,7 +41,6 @@ fastcrypto.workspace = true fastcrypto-zkp.workspace = true move-core-types.workspace = true -sui-bridge.workspace = true sui-core.workspace = true sui-framework.workspace = true sui-json-rpc.workspace = true diff --git a/crates/sui-e2e-tests/tests/passkey_e2e_tests.rs b/crates/sui-e2e-tests/tests/passkey_e2e_tests.rs index 505f240d82edc..30016d0d3a604 100644 --- a/crates/sui-e2e-tests/tests/passkey_e2e_tests.rs +++ b/crates/sui-e2e-tests/tests/passkey_e2e_tests.rs @@ -1,6 +1,6 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use fastcrypto::{hash::HashFunction, traits::ToFromBytes}; +use fastcrypto::traits::ToFromBytes; use p256::pkcs8::DecodePublicKey; use passkey_authenticator::{Authenticator, UserValidationMethod}; use passkey_client::Client; @@ -15,11 +15,12 @@ use passkey_types::{ }, Bytes, Passkey, }; -use shared_crypto::intent::{Intent, IntentMessage, INTENT_PREFIX_LENGTH}; +use shared_crypto::intent::{Intent, IntentMessage}; use std::net::SocketAddr; use sui_core::authority_client::AuthorityAPI; use sui_macros::sim_test; use sui_test_transaction_builder::TestTransactionBuilder; +use sui_types::crypto::Signature; use sui_types::error::UserInputError; use sui_types::error::{SuiError, SuiResult}; use sui_types::signature::GenericSignature; @@ -30,10 +31,6 @@ use sui_types::{ passkey_authenticator::{to_signing_message, PasskeyAuthenticator}, transaction::TransactionData, }; -use sui_types::{ - crypto::{DefaultHash, Signature}, - passkey_authenticator::to_signing_digest, -}; use test_cluster::TestCluster; use test_cluster::TestClusterBuilder; use url::Url; @@ -159,23 +156,22 @@ async fn create_credential_and_sign_test_tx( // Compute the challenge = blake2b_hash(intent_msg(tx)) for passkey credential request. // If change_intent, mangle the intent bytes. If change_tx, mangle the hashed tx bytes. - let mut extended = [0; INTENT_PREFIX_LENGTH + DefaultHash::OUTPUT_SIZE]; - let passkey_digest = if change_intent { - extended[..INTENT_PREFIX_LENGTH].copy_from_slice(&Intent::personal_message().to_bytes()); - extended[INTENT_PREFIX_LENGTH..].copy_from_slice(&to_signing_digest(&intent_msg)); - extended + let passkey_challenge = if change_intent { + to_signing_message(&IntentMessage::new( + Intent::personal_message(), + intent_msg.value.clone(), + )) + .to_vec() } else if change_tx { - extended[..INTENT_PREFIX_LENGTH].copy_from_slice(&intent_msg.intent.to_bytes()); - extended[INTENT_PREFIX_LENGTH..].copy_from_slice(&random_vec(32)); - extended + random_vec(32) } else { - to_signing_message(&intent_msg) + to_signing_message(&intent_msg).to_vec() }; // Request a signature from passkey with challenge set to passkey_digest. let credential_request = CredentialRequestOptions { public_key: PublicKeyCredentialRequestOptions { - challenge: Bytes::from(passkey_digest.to_vec()), + challenge: Bytes::from(passkey_challenge), timeout: None, rp_id: Some(String::from(origin.domain().unwrap())), allow_credentials: None, diff --git a/crates/sui-e2e-tests/tests/rest/checkpoints.rs b/crates/sui-e2e-tests/tests/rest/checkpoints.rs new file mode 100644 index 0000000000000..d72ac52afdf61 --- /dev/null +++ b/crates/sui-e2e-tests/tests/rest/checkpoints.rs @@ -0,0 +1,182 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use prost::Message; +use sui_macros::sim_test; +use sui_rest_api::client::sdk::Client; +use sui_rest_api::client::Client as CoreClient; +use sui_rest_api::{CheckpointResponse, ListCheckpointsQueryParameters}; +use sui_sdk_types::types::SignedCheckpointSummary; +use test_cluster::TestClusterBuilder; + +use crate::transfer_coin; + +#[sim_test] +async fn list_checkpoint() { + let test_cluster = TestClusterBuilder::new().build().await; + + let _transaction_digest = transfer_coin(&test_cluster.wallet).await; + + let client = Client::new(test_cluster.rpc_url()).unwrap(); + let core_client = CoreClient::new(test_cluster.rpc_url()); + + let checkpoints = client + .list_checkpoints(&ListCheckpointsQueryParameters::default()) + .await + .unwrap() + .into_inner(); + + assert!(!checkpoints.is_empty()); + + let _latest = client.get_latest_checkpoint().await.unwrap().into_inner(); + + let _latest = core_client.get_latest_checkpoint().await.unwrap(); + + let client = reqwest::Client::new(); + let url = format!("{}/v2/checkpoints", test_cluster.rpc_url()); + // Make sure list works with json + let _checkpoints = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_JSON) + .send() + .await + .unwrap() + .json::>() + .await + .unwrap(); + + // Make sure list works with protobuf + let bytes = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_PROTOBUF) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _checkpoints = sui_rest_api::proto::ListCheckpointResponse::decode(bytes).unwrap(); + + // TODO remove this once the BCS format is no longer accepted and clients have migrated to the + // protobuf version + // Make sure list works with BCS and the old format of only a SignedCheckpoint with no contents + let bytes = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_BCS) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _checkpoints = bcs::from_bytes::>(&bytes).unwrap(); +} + +#[sim_test] +async fn get_checkpoint() { + let test_cluster = TestClusterBuilder::new().build().await; + + let _transaction_digest = transfer_coin(&test_cluster.wallet).await; + + let client = Client::new(test_cluster.rpc_url()).unwrap(); + + let latest = client.get_latest_checkpoint().await.unwrap().into_inner(); + let _ = client + .get_checkpoint(latest.checkpoint.sequence_number) + .await + .unwrap(); + + let client = reqwest::Client::new(); + let url = format!( + "{}/v2/checkpoints/{}", + test_cluster.rpc_url(), + latest.checkpoint.sequence_number + ); + // Make sure list works with json + let _checkpoints = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_JSON) + .send() + .await + .unwrap() + .json::() + .await + .unwrap(); + + // Make sure it works with protobuf + let bytes = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_PROTOBUF) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _checkpoints = sui_rest_api::proto::GetCheckpointResponse::decode(bytes).unwrap(); + + // TODO remove this once the BCS format is no longer accepted and clients have migrated to the + // protobuf version + let bytes = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_BCS) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _checkpoints = bcs::from_bytes::(&bytes).unwrap(); +} + +#[sim_test] +async fn get_full_checkpoint() { + let test_cluster = TestClusterBuilder::new().build().await; + + let _transaction_digest = transfer_coin(&test_cluster.wallet).await; + + let client = Client::new(test_cluster.rpc_url()).unwrap(); + let core_client = CoreClient::new(test_cluster.rpc_url()); + + let latest = client.get_latest_checkpoint().await.unwrap().into_inner(); + let _ = client + .get_full_checkpoint(latest.checkpoint.sequence_number) + .await + .unwrap(); + let _ = core_client + .get_full_checkpoint(latest.checkpoint.sequence_number) + .await + .unwrap(); + + let client = reqwest::Client::new(); + let url = format!( + "{}/v2/checkpoints/{}/full", + test_cluster.rpc_url(), + latest.checkpoint.sequence_number + ); + // Make sure it works with protobuf + let bytes = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_PROTOBUF) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _checkpoints = sui_rest_api::proto::FullCheckpoint::decode(bytes).unwrap(); + + // TODO remove this once the BCS format is no longer accepted and clients have migrated to the + // protobuf version + let bytes = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_BCS) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _checkpoints = + bcs::from_bytes::(&bytes).unwrap(); +} diff --git a/crates/sui-e2e-tests/tests/rest/committee.rs b/crates/sui-e2e-tests/tests/rest/committee.rs new file mode 100644 index 0000000000000..6e4b2227483fe --- /dev/null +++ b/crates/sui-e2e-tests/tests/rest/committee.rs @@ -0,0 +1,65 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use prost::Message; +use sui_macros::sim_test; +use sui_rest_api::client::sdk::Client; +use sui_sdk_types::types::ValidatorCommittee; +use test_cluster::TestClusterBuilder; + +#[sim_test] +async fn get_committee() { + let test_cluster = TestClusterBuilder::new().build().await; + + let client = Client::new(test_cluster.rpc_url()).unwrap(); + + let _committee = client.get_committee(0).await.unwrap(); + let _committee = client.get_current_committee().await.unwrap(); + + async fn raw_request(url: &str) { + let client = reqwest::Client::new(); + + // Make sure list works with json + let _object = client + .get(url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_JSON) + .send() + .await + .unwrap() + .json::() + .await + .unwrap(); + + // Make sure it works with protobuf + let bytes = client + .get(url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_PROTOBUF) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _object = sui_rest_api::proto::ValidatorCommittee::decode(bytes).unwrap(); + + // TODO remove this once the BCS format is no longer accepted and clients have migrated to the + // protobuf version + let bytes = client + .get(url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_BCS) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _object = bcs::from_bytes::(&bytes).unwrap(); + } + + let url = format!("{}/v2/system/committee", test_cluster.rpc_url(),); + + raw_request(&url).await; + + let url = format!("{}/v2/system/committee/0", test_cluster.rpc_url()); + raw_request(&url).await; +} diff --git a/crates/sui-e2e-tests/tests/rest.rs b/crates/sui-e2e-tests/tests/rest/execute.rs similarity index 100% rename from crates/sui-e2e-tests/tests/rest.rs rename to crates/sui-e2e-tests/tests/rest/execute.rs diff --git a/crates/sui-e2e-tests/tests/rest/main.rs b/crates/sui-e2e-tests/tests/rest/main.rs new file mode 100644 index 0000000000000..da5c338f13c82 --- /dev/null +++ b/crates/sui-e2e-tests/tests/rest/main.rs @@ -0,0 +1,27 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +mod checkpoints; +mod committee; +mod execute; +mod objects; +mod resolve; +mod transactions; + +async fn transfer_coin( + context: &sui_sdk::wallet_context::WalletContext, +) -> sui_sdk_types::types::TransactionDigest { + let gas_price = context.get_reference_gas_price().await.unwrap(); + let accounts_and_objs = context.get_all_accounts_and_gas_objects().await.unwrap(); + let sender = accounts_and_objs[0].0; + let receiver = accounts_and_objs[1].0; + let gas_object = accounts_and_objs[0].1[0]; + let object_to_send = accounts_and_objs[0].1[1]; + let txn = context.sign_transaction( + &sui_test_transaction_builder::TestTransactionBuilder::new(sender, gas_object, gas_price) + .transfer(object_to_send, receiver) + .build(), + ); + let resp = context.execute_transaction_must_succeed(txn).await; + resp.digest.into() +} diff --git a/crates/sui-e2e-tests/tests/rest/objects.rs b/crates/sui-e2e-tests/tests/rest/objects.rs new file mode 100644 index 0000000000000..afbcddd1f0fae --- /dev/null +++ b/crates/sui-e2e-tests/tests/rest/objects.rs @@ -0,0 +1,79 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use prost::Message; +use sui_macros::sim_test; +use sui_rest_api::client::sdk::Client; +use sui_rest_api::client::Client as CoreClient; +use sui_rest_api::ObjectResponse; +use sui_sdk_types::types::Object; +use test_cluster::TestClusterBuilder; + +#[sim_test] +async fn get_object() { + let test_cluster = TestClusterBuilder::new().build().await; + + let client = Client::new(test_cluster.rpc_url()).unwrap(); + let core_client = CoreClient::new(test_cluster.rpc_url()); + + let _object = client.get_object("0x5".parse().unwrap()).await.unwrap(); + let _object = core_client + .get_object("0x5".parse().unwrap()) + .await + .unwrap(); + + let _object = client + .get_object_with_version("0x5".parse().unwrap(), 1) + .await + .unwrap(); + let _object = core_client + .get_object_with_version("0x5".parse().unwrap(), 1.into()) + .await + .unwrap(); + + async fn raw_request(url: &str) { + let client = reqwest::Client::new(); + + // Make sure list works with json + let _object = client + .get(url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_JSON) + .send() + .await + .unwrap() + .json::() + .await + .unwrap(); + + // Make sure it works with protobuf + let bytes = client + .get(url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_PROTOBUF) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _object = sui_rest_api::proto::GetObjectResponse::decode(bytes).unwrap(); + + // TODO remove this once the BCS format is no longer accepted and clients have migrated to the + // protobuf version + let bytes = client + .get(url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_BCS) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _object = bcs::from_bytes::(&bytes).unwrap(); + } + + let url = format!("{}/v2/objects/0x5", test_cluster.rpc_url()); + raw_request(&url).await; + + let url = format!("{}/v2/objects/0x5/version/1", test_cluster.rpc_url()); + raw_request(&url).await; +} diff --git a/crates/sui-e2e-tests/tests/rest/resolve.rs b/crates/sui-e2e-tests/tests/rest/resolve.rs new file mode 100644 index 0000000000000..482b022e52a6e --- /dev/null +++ b/crates/sui-e2e-tests/tests/rest/resolve.rs @@ -0,0 +1,437 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use shared_crypto::intent::Intent; +use sui_keys::keystore::AccountKeystore; +use sui_macros::sim_test; +use sui_rest_api::client::reqwest::StatusCode; +use sui_rest_api::transactions::ResolveTransactionQueryParameters; +use sui_rest_api::Client; +use sui_rest_api::ExecuteTransactionQueryParameters; +use sui_sdk_types::types::Argument; +use sui_sdk_types::types::Command; +use sui_sdk_types::types::TransactionExpiration; +use sui_sdk_types::types::UnresolvedGasPayment; +use sui_sdk_types::types::UnresolvedInputArgument; +use sui_sdk_types::types::UnresolvedProgrammableTransaction; +use sui_sdk_types::types::UnresolvedTransaction; +use sui_sdk_types::types::UnresolvedValue; +use sui_types::base_types::SuiAddress; +use sui_types::effects::TransactionEffectsAPI; +use test_cluster::TestClusterBuilder; + +#[sim_test] +async fn resolve_transaction_simple_transfer() { + let test_cluster = TestClusterBuilder::new().build().await; + + let client = Client::new(test_cluster.rpc_url()); + let recipient = SuiAddress::random_for_testing_only(); + + let (sender, mut gas) = test_cluster.wallet.get_one_account().await.unwrap(); + gas.sort_by_key(|object_ref| object_ref.0); + let obj_to_send = gas.first().unwrap().0; + + let unresolved_transaction = UnresolvedTransaction { + ptb: UnresolvedProgrammableTransaction { + inputs: vec![ + UnresolvedInputArgument { + object_id: Some(obj_to_send.into()), + ..Default::default() + }, + UnresolvedInputArgument { + value: Some(UnresolvedValue::String(recipient.to_string())), + ..Default::default() + }, + ], + commands: vec![Command::TransferObjects( + sui_sdk_types::types::TransferObjects { + objects: vec![Argument::Input(0)], + address: Argument::Input(1), + }, + )], + }, + sender: sender.into(), + gas_payment: None, + expiration: TransactionExpiration::None, + }; + + let resolved = client + .inner() + .resolve_transaction_with_parameters( + &unresolved_transaction, + &ResolveTransactionQueryParameters { + simulate: true, + ..Default::default() + }, + ) + .await + .unwrap() + .into_inner(); + + let signed_transaction = test_cluster + .wallet + .sign_transaction(&resolved.transaction.try_into().unwrap()); + let effects = client + .execute_transaction( + &ExecuteTransactionQueryParameters::default(), + &signed_transaction, + ) + .await + .unwrap() + .effects; + + assert!(effects.status().is_ok()); + assert_eq!( + resolved.simulation.unwrap().effects, + effects.try_into().unwrap() + ); +} + +#[sim_test] +async fn resolve_transaction_transfer_with_sponsor() { + let test_cluster = TestClusterBuilder::new().build().await; + + let client = Client::new(test_cluster.rpc_url()); + let recipient = SuiAddress::random_for_testing_only(); + + let (sender, gas) = test_cluster.wallet.get_one_account().await.unwrap(); + let obj_to_send = gas.first().unwrap().0; + let sponsor = test_cluster.wallet.get_addresses()[1]; + + let unresolved_transaction = UnresolvedTransaction { + ptb: UnresolvedProgrammableTransaction { + inputs: vec![ + UnresolvedInputArgument { + object_id: Some(obj_to_send.into()), + ..Default::default() + }, + UnresolvedInputArgument { + value: Some(UnresolvedValue::String(recipient.to_string())), + ..Default::default() + }, + ], + commands: vec![Command::TransferObjects( + sui_sdk_types::types::TransferObjects { + objects: vec![Argument::Input(0)], + address: Argument::Input(1), + }, + )], + }, + sender: sender.into(), + gas_payment: Some(UnresolvedGasPayment { + objects: vec![], + owner: sponsor.into(), + price: None, + budget: None, + }), + expiration: TransactionExpiration::None, + }; + + let resolved = client + .inner() + .resolve_transaction_with_parameters( + &unresolved_transaction, + &ResolveTransactionQueryParameters { + simulate: true, + ..Default::default() + }, + ) + .await + .unwrap() + .into_inner(); + + let transaction_data = resolved.transaction.clone().try_into().unwrap(); + let sender_sig = test_cluster + .wallet + .config + .keystore + .sign_secure(&sender, &transaction_data, Intent::sui_transaction()) + .unwrap(); + let sponsor_sig = test_cluster + .wallet + .config + .keystore + .sign_secure(&sponsor, &transaction_data, Intent::sui_transaction()) + .unwrap(); + + let signed_transaction = sui_types::transaction::Transaction::from_data( + transaction_data, + vec![sender_sig, sponsor_sig], + ); + let effects = client + .execute_transaction( + &ExecuteTransactionQueryParameters::default(), + &signed_transaction, + ) + .await + .unwrap() + .effects; + + assert!(effects.status().is_ok()); + assert_eq!( + resolved.simulation.unwrap().effects, + effects.try_into().unwrap() + ); +} + +#[sim_test] +async fn resolve_transaction_borrowed_shared_object() { + let test_cluster = TestClusterBuilder::new().build().await; + + let client = Client::new(test_cluster.rpc_url()); + + let sender = test_cluster.wallet.get_addresses()[0]; + + let unresolved_transaction = UnresolvedTransaction { + ptb: UnresolvedProgrammableTransaction { + inputs: vec![UnresolvedInputArgument { + object_id: Some("0x6".parse().unwrap()), + ..Default::default() + }], + commands: vec![Command::MoveCall(sui_sdk_types::types::MoveCall { + package: "0x2".parse().unwrap(), + module: "clock".parse().unwrap(), + function: "timestamp_ms".parse().unwrap(), + type_arguments: vec![], + arguments: vec![Argument::Input(0)], + })], + }, + sender: sender.into(), + gas_payment: None, + expiration: TransactionExpiration::None, + }; + + let resolved = client + .inner() + .resolve_transaction_with_parameters( + &unresolved_transaction, + &ResolveTransactionQueryParameters { + simulate: true, + ..Default::default() + }, + ) + .await + .unwrap() + .into_inner(); + + let signed_transaction = test_cluster + .wallet + .sign_transaction(&resolved.transaction.try_into().unwrap()); + let effects = client + .execute_transaction( + &ExecuteTransactionQueryParameters::default(), + &signed_transaction, + ) + .await + .unwrap() + .effects; + + assert!(effects.status().is_ok()); +} + +#[sim_test] +async fn resolve_transaction_mutable_shared_object() { + let test_cluster = TestClusterBuilder::new().build().await; + + let client = Client::new(test_cluster.rpc_url()); + + let (sender, mut gas) = test_cluster.wallet.get_one_account().await.unwrap(); + gas.sort_by_key(|object_ref| object_ref.0); + let obj_to_stake = gas.first().unwrap().0; + let validator_address = client + .inner() + .get_system_state_summary() + .await + .unwrap() + .inner() + .active_validators + .first() + .unwrap() + .address; + + let unresolved_transaction = UnresolvedTransaction { + ptb: UnresolvedProgrammableTransaction { + inputs: vec![ + UnresolvedInputArgument { + object_id: Some("0x5".parse().unwrap()), + ..Default::default() + }, + UnresolvedInputArgument { + object_id: Some(obj_to_stake.into()), + ..Default::default() + }, + UnresolvedInputArgument { + value: Some(UnresolvedValue::String(validator_address.to_string())), + ..Default::default() + }, + ], + commands: vec![Command::MoveCall(sui_sdk_types::types::MoveCall { + package: "0x3".parse().unwrap(), + module: "sui_system".parse().unwrap(), + function: "request_add_stake".parse().unwrap(), + type_arguments: vec![], + arguments: vec![Argument::Input(0), Argument::Input(1), Argument::Input(2)], + })], + }, + sender: sender.into(), + gas_payment: None, + expiration: TransactionExpiration::None, + }; + + let resolved = client + .inner() + .resolve_transaction_with_parameters( + &unresolved_transaction, + &ResolveTransactionQueryParameters { + simulate: true, + ..Default::default() + }, + ) + .await + .unwrap() + .into_inner(); + + let signed_transaction = test_cluster + .wallet + .sign_transaction(&resolved.transaction.try_into().unwrap()); + let effects = client + .execute_transaction( + &ExecuteTransactionQueryParameters::default(), + &signed_transaction, + ) + .await + .unwrap() + .effects; + + assert!(effects.status().is_ok()); + assert_eq!( + resolved.simulation.unwrap().effects, + effects.try_into().unwrap() + ); +} + +#[sim_test] +async fn resolve_transaction_insufficient_gas() { + let test_cluster = TestClusterBuilder::new().build().await; + let client = Client::new(test_cluster.rpc_url()); + + // Test the case where we don't have enough coins/gas for the required budget + let unresolved_transaction = UnresolvedTransaction { + ptb: UnresolvedProgrammableTransaction { + inputs: vec![UnresolvedInputArgument { + object_id: Some("0x6".parse().unwrap()), + ..Default::default() + }], + commands: vec![Command::MoveCall(sui_sdk_types::types::MoveCall { + package: "0x2".parse().unwrap(), + module: "clock".parse().unwrap(), + function: "timestamp_ms".parse().unwrap(), + type_arguments: vec![], + arguments: vec![Argument::Input(0)], + })], + }, + sender: SuiAddress::random_for_testing_only().into(), // random account with no gas + gas_payment: None, + expiration: TransactionExpiration::None, + }; + + let error = client + .inner() + .resolve_transaction(&unresolved_transaction) + .await + .unwrap_err(); + + assert_eq!(error.status(), Some(StatusCode::BAD_REQUEST)); + assert_contains( + error.message().unwrap_or_default(), + "unable to select sufficient gas", + ); +} + +fn assert_contains(haystack: &str, needle: &str) { + if !haystack.contains(needle) { + panic!("{haystack:?} does not contain {needle:?}"); + } +} + +#[sim_test] +async fn resolve_transaction_with_raw_json() { + let test_cluster = TestClusterBuilder::new().build().await; + + let client = Client::new(test_cluster.rpc_url()); + let recipient = SuiAddress::random_for_testing_only(); + + let (sender, mut gas) = test_cluster.wallet.get_one_account().await.unwrap(); + gas.sort_by_key(|object_ref| object_ref.0); + let obj_to_send = gas.first().unwrap().0; + + let unresolved_transaction = serde_json::json!({ + "inputs": [ + { + "object_id": obj_to_send + }, + { + "value": 1 + }, + { + "value": recipient + } + ], + + "commands": [ + { + "command": "split_coins", + "coin": { "input": 0 }, + "amounts": [ + { + "input": 1, + }, + { + "input": 1, + } + ] + }, + { + "command": "transfer_objects", + "objects": [ + { "result": [0, 1] }, + { "result": [0, 0] } + ], + "address": { "input": 2 } + } + ], + + "sender": sender + }); + + let resolved = client + .inner() + .resolve_transaction_with_parameters( + &serde_json::from_value(unresolved_transaction).unwrap(), + &ResolveTransactionQueryParameters { + simulate: true, + ..Default::default() + }, + ) + .await + .unwrap() + .into_inner(); + + let signed_transaction = test_cluster + .wallet + .sign_transaction(&resolved.transaction.try_into().unwrap()); + let effects = client + .execute_transaction( + &ExecuteTransactionQueryParameters::default(), + &signed_transaction, + ) + .await + .unwrap() + .effects; + + assert!(effects.status().is_ok(), "{:?}", effects.status()); + assert_eq!( + resolved.simulation.unwrap().effects, + effects.try_into().unwrap() + ); +} diff --git a/crates/sui-e2e-tests/tests/rest/transactions.rs b/crates/sui-e2e-tests/tests/rest/transactions.rs new file mode 100644 index 0000000000000..13b2d9925ff73 --- /dev/null +++ b/crates/sui-e2e-tests/tests/rest/transactions.rs @@ -0,0 +1,118 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use prost::Message; +use sui_macros::sim_test; +use sui_rest_api::client::sdk::Client; +use sui_rest_api::transactions::{ListTransactionsQueryParameters, TransactionResponse}; +use test_cluster::TestClusterBuilder; + +use crate::transfer_coin; + +#[sim_test] +async fn get_transaction() { + let test_cluster = TestClusterBuilder::new().build().await; + + let transaction_digest = transfer_coin(&test_cluster.wallet).await; + + let client = Client::new(test_cluster.rpc_url()).unwrap(); + + let _transaction = client.get_transaction(&transaction_digest).await.unwrap(); + + let client = reqwest::Client::new(); + let url = format!( + "{}/v2/transactions/{}", + test_cluster.rpc_url(), + transaction_digest, + ); + // Make sure it works with json + let _transaction = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_JSON) + .send() + .await + .unwrap() + .json::() + .await + .unwrap(); + + // Make sure it works with protobuf + let bytes = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_PROTOBUF) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _transaction = sui_rest_api::proto::GetTransactionResponse::decode(bytes).unwrap(); + + // TODO remove this once the BCS format is no longer accepted and clients have migrated to the + // protobuf version + let bytes = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_BCS) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _transaction = bcs::from_bytes::(&bytes).unwrap(); +} + +#[sim_test] +async fn list_checkpoint() { + let test_cluster = TestClusterBuilder::new().build().await; + + let _transaction_digest = transfer_coin(&test_cluster.wallet).await; + + let client = Client::new(test_cluster.rpc_url()).unwrap(); + + let transactions = client + .list_transactions(&ListTransactionsQueryParameters::default()) + .await + .unwrap() + .into_inner(); + + assert!(!transactions.is_empty()); + + let client = reqwest::Client::new(); + let url = format!("{}/v2/transactions", test_cluster.rpc_url()); + // Make sure it works with json + let _transactions = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_JSON) + .send() + .await + .unwrap() + .json::>() + .await + .unwrap(); + + // Make sure it works with protobuf + let bytes = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_PROTOBUF) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _transactions = sui_rest_api::proto::ListTransactionsResponse::decode(bytes).unwrap(); + + // TODO remove this once the BCS format is no longer accepted and clients have migrated to the + // protobuf version + let bytes = client + .get(&url) + .header(reqwest::header::ACCEPT, sui_rest_api::APPLICATION_BCS) + .send() + .await + .unwrap() + .bytes() + .await + .unwrap(); + let _transactions = bcs::from_bytes::>(&bytes).unwrap(); +} diff --git a/crates/sui-e2e-tests/tests/traffic_control_tests.rs b/crates/sui-e2e-tests/tests/traffic_control_tests.rs index 46d1ab050245d..27985b8b1d589 100644 --- a/crates/sui-e2e-tests/tests/traffic_control_tests.rs +++ b/crates/sui-e2e-tests/tests/traffic_control_tests.rs @@ -608,6 +608,63 @@ async fn test_fullnode_traffic_control_error_blocked() -> Result<(), anyhow::Err panic!("Expected spam policy to trigger within {txn_count} requests"); } +#[tokio::test] +async fn test_fullnode_traffic_control_error_blocked() -> Result<(), anyhow::Error> { + let txn_count = 5; + let policy_config = PolicyConfig { + connection_blocklist_ttl_sec: 3, + error_policy_type: PolicyType::TestNConnIP(txn_count - 1), + dry_run: false, + ..Default::default() + }; + let test_cluster = TestClusterBuilder::new() + .with_fullnode_policy_config(Some(policy_config)) + .build() + .await; + + let jsonrpc_client = &test_cluster.fullnode_handle.rpc_client; + let context = test_cluster.wallet; + + let mut txns = batch_make_transfer_transactions(&context, txn_count as usize).await; + assert!( + txns.len() >= txn_count as usize, + "Expect at least {} txns. Do we generate enough gas objects during genesis?", + txn_count, + ); + + // it should take no more than 4 requests to be added to the blocklist + for _ in 0..txn_count { + let txn = txns.swap_remove(0); + let tx_digest = txn.digest(); + let (tx_bytes, _signatures) = txn.to_tx_bytes_and_signatures(); + // create invalid (empty) client signature + let signatures: Vec = vec![]; + let params = rpc_params![ + tx_bytes, + signatures, + SuiTransactionBlockResponseOptions::new(), + ExecuteTransactionRequestType::WaitForLocalExecution + ]; + let response: RpcResult = jsonrpc_client + .request("sui_executeTransactionBlock", params.clone()) + .await; + if let Err(err) = response { + if err.to_string().contains("Too many requests") { + return Ok(()); + } + } else { + let SuiTransactionBlockResponse { + digest, + confirmed_local_execution, + .. + } = response.unwrap(); + assert_eq!(&digest, tx_digest); + assert!(confirmed_local_execution.unwrap()); + } + } + panic!("Expected spam policy to trigger within {txn_count} requests"); +} + #[tokio::test] async fn test_validator_traffic_control_error_delegated() -> Result<(), anyhow::Error> { let n = 5; diff --git a/crates/sui-faucet/Cargo.toml b/crates/sui-faucet/Cargo.toml index fee9e1d93af61..fea887f1371cc 100644 --- a/crates/sui-faucet/Cargo.toml +++ b/crates/sui-faucet/Cargo.toml @@ -10,6 +10,7 @@ publish = false anyhow.workspace = true async-trait.workspace = true axum.workspace = true +bin-version.workspace = true clap.workspace = true thiserror.workspace = true tokio = { workspace = true, features = ["full"] } diff --git a/crates/sui-faucet/src/main.rs b/crates/sui-faucet/src/main.rs index 89d49cf34c6a9..210b5ddb07d1f 100644 --- a/crates/sui-faucet/src/main.rs +++ b/crates/sui-faucet/src/main.rs @@ -12,6 +12,9 @@ use tracing::info; const CONCURRENCY_LIMIT: usize = 30; const PROM_PORT_ADDR: &str = "0.0.0.0:9184"; +// Define the `GIT_REVISION` and `VERSION` consts +bin_version::bin_version!(); + #[tokio::main] async fn main() -> Result<(), anyhow::Error> { // initialize tracing @@ -38,6 +41,10 @@ async fn main() -> Result<(), anyhow::Error> { info!("Starting Prometheus HTTP endpoint at {}", prom_binding); let registry_service = mysten_metrics::start_prometheus_server(prom_binding); let prometheus_registry = registry_service.default_registry(); + prometheus_registry + .register(mysten_metrics::uptime_metric("faucet", VERSION, "unknown")) + .unwrap(); + let app_state = Arc::new(AppState { faucet: SimpleFaucet::new( context, diff --git a/crates/sui-faucet/src/metrics.rs b/crates/sui-faucet/src/metrics.rs index 9571701835e20..b28902a0854a7 100644 --- a/crates/sui-faucet/src/metrics.rs +++ b/crates/sui-faucet/src/metrics.rs @@ -135,47 +135,83 @@ impl FaucetMetrics { impl MetricsCallbackProvider for RequestMetrics { fn on_request(&self, path: String) { + let normalized_path = normalize_path(&path); + if !is_path_tracked(normalized_path) { + return; + } + self.total_requests_received - .with_label_values(&[path.as_str()]) + .with_label_values(&[normalized_path]) .inc(); } fn on_response(&self, path: String, latency: Duration, _status: u16, grpc_status_code: Code) { + let normalized_path = normalize_path(&path); + if !is_path_tracked(normalized_path) { + return; + } + self.process_latency - .with_label_values(&[path.as_str()]) + .with_label_values(&[normalized_path]) .observe(latency.as_secs_f64()); match grpc_status_code { Code::Ok => { self.total_requests_succeeded - .with_label_values(&[path.as_str()]) + .with_label_values(&[normalized_path]) .inc(); } Code::Unavailable | Code::ResourceExhausted => { self.total_requests_shed - .with_label_values(&[path.as_str()]) + .with_label_values(&[normalized_path]) .inc(); } _ => { self.total_requests_failed - .with_label_values(&[path.as_str()]) + .with_label_values(&[normalized_path]) .inc(); } } } fn on_start(&self, path: &str) { + let normalized_path = normalize_path(path); + if !is_path_tracked(normalized_path) { + return; + } + self.current_requests_in_flight - .with_label_values(&[path]) + .with_label_values(&[normalized_path]) .inc(); } fn on_drop(&self, path: &str) { + let normalized_path = normalize_path(path); + if !is_path_tracked(normalized_path) { + return; + } + self.total_requests_disconnected - .with_label_values(&[path]) + .with_label_values(&[normalized_path]) .inc(); self.current_requests_in_flight - .with_label_values(&[path]) + .with_label_values(&[normalized_path]) .dec(); } } + +/// Normalizes the given path to handle variations across different deployments. +/// Specifically, it trims dynamic segments from the `/v1/status/` endpoint. +pub fn normalize_path(path: &str) -> &str { + if path.starts_with("/v1/status/") { + return "/v1/status"; + } + + path +} + +/// Determines whether the given path should be tracked for metrics collection. +/// Only specified paths relevant to monitoring are included. +pub fn is_path_tracked(path: &str) -> bool { + matches!(path, "/v1/gas" | "/gas" | "/v1/status") +} diff --git a/crates/sui-faucet/src/metrics_layer.rs b/crates/sui-faucet/src/metrics_layer.rs index 7e7dfe3569af9..0b8106603d359 100644 --- a/crates/sui-faucet/src/metrics_layer.rs +++ b/crates/sui-faucet/src/metrics_layer.rs @@ -13,7 +13,7 @@ use prometheus::{HistogramTimer, Registry}; use tower::{load_shed::error::Overloaded, BoxError, Layer, Service, ServiceExt}; use tracing::{error, info, warn}; -use crate::metrics::RequestMetrics; +use crate::metrics::{is_path_tracked, normalize_path, RequestMetrics}; use http::Request; /// Tower Layer for tracking metrics in Prometheus related to number, success-rate and latency of @@ -81,16 +81,19 @@ where let future = Box::pin(async move { let resp = inner.oneshot(req).await; - match &resp { - Ok(resp) if !resp.status().is_success() => { - metrics.failed(None, Some(resp.status())) - } - Ok(_) => metrics.succeeded(), - Err(err) => { - if err.is::() { - metrics.shed(); - } else { - metrics.failed(Some(err), None); + + if let Some(metrics) = metrics { + match &resp { + Ok(resp) if !resp.status().is_success() => { + metrics.failed(None, Some(resp.status())) + } + Ok(_) => metrics.succeeded(), + Err(err) => { + if err.is::() { + metrics.shed(); + } else { + metrics.failed(Some(err), None); + } } } } @@ -110,25 +113,31 @@ impl Future for RequestMetricsFuture { } impl MetricsGuard { - fn new(metrics: Arc, path: &str) -> Self { + fn new(metrics: Arc, path: &str) -> Option { + let normalized_path = normalize_path(path); + + if !is_path_tracked(normalized_path) { + return None; + } + metrics .total_requests_received - .with_label_values(&[path]) + .with_label_values(&[normalized_path]) .inc(); metrics .current_requests_in_flight - .with_label_values(&[path]) + .with_label_values(&[normalized_path]) .inc(); - MetricsGuard { + Some(MetricsGuard { timer: Some( metrics .process_latency - .with_label_values(&[path]) + .with_label_values(&[normalized_path]) .start_timer(), ), metrics, - path: path.to_string(), - } + path: normalized_path.to_string(), + }) } fn succeeded(mut self) { @@ -183,22 +192,28 @@ impl MetricsGuard { impl Drop for MetricsGuard { fn drop(&mut self) { - self.metrics + if self + .metrics .current_requests_in_flight - .with_label_values(&[&self.path]) - .dec(); - - // Request was still in flight when the guard was dropped, implying the client disconnected. - if let Some(timer) = self.timer.take() { - let elapsed = timer.stop_and_record(); + .get_metric_with_label_values(&[&self.path]) + .is_ok() + { self.metrics - .total_requests_disconnected + .current_requests_in_flight .with_label_values(&[&self.path]) - .inc(); - info!( - "Request disconnected for path {} in {:.2}s", - self.path, elapsed - ); + .dec(); + + if let Some(timer) = self.timer.take() { + let elapsed = timer.stop_and_record(); + self.metrics + .total_requests_disconnected + .with_label_values(&[&self.path]) + .inc(); + info!( + "Request disconnected for path {} in {:.2}s", + self.path, elapsed + ); + } } } } diff --git a/crates/sui-field-count-derive/Cargo.toml b/crates/sui-field-count-derive/Cargo.toml new file mode 100644 index 0000000000000..40f188937500f --- /dev/null +++ b/crates/sui-field-count-derive/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "sui-field-count-derive" +version.workspace = true +authors = ["Mysten Labs "] +license = "Apache-2.0" +publish = false +edition = "2021" + +[lib] +proc-macro = true + +[dependencies] +syn.workspace = true +quote.workspace = true diff --git a/crates/sui-field-count-derive/src/lib.rs b/crates/sui-field-count-derive/src/lib.rs new file mode 100644 index 0000000000000..a34c2ef1d3ff6 --- /dev/null +++ b/crates/sui-field-count-derive/src/lib.rs @@ -0,0 +1,29 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +use proc_macro::TokenStream; +use quote::quote; +use syn::{parse_macro_input, DeriveInput}; + +#[proc_macro_derive(FieldCount)] +pub fn field_count_derive(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + let name = input.ident; + let generics = input.generics; + let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); + + let fields_count = if let syn::Data::Struct(data_struct) = input.data { + data_struct.fields.len() + } else { + panic!("FieldCount can only be derived for structs"); + }; + + let expanded = quote! { + impl #impl_generics FieldCount for #name #ty_generics #where_clause { + fn field_count() -> usize { + #fields_count + } + } + }; + + TokenStream::from(expanded) +} diff --git a/crates/sui-field-count-main/Cargo.toml b/crates/sui-field-count-main/Cargo.toml new file mode 100644 index 0000000000000..df609bcd40b0a --- /dev/null +++ b/crates/sui-field-count-main/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "sui-field-count-main" +version.workspace = true +authors = ["Mysten Labs "] +license = "Apache-2.0" +publish = false +edition = "2021" diff --git a/crates/sui-field-count-main/src/lib.rs b/crates/sui-field-count-main/src/lib.rs new file mode 100644 index 0000000000000..6476f99af9085 --- /dev/null +++ b/crates/sui-field-count-main/src/lib.rs @@ -0,0 +1,6 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +pub trait FieldCount { + fn field_count() -> usize; +} diff --git a/crates/sui-field-count/Cargo.toml b/crates/sui-field-count/Cargo.toml new file mode 100644 index 0000000000000..5006aa3afb2d9 --- /dev/null +++ b/crates/sui-field-count/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "sui-field-count" +version.workspace = true +authors = ["Mysten Labs "] +license = "Apache-2.0" +publish = false +edition = "2021" + +[dependencies] +sui-field-count-derive.workspace = true +sui-field-count-main.workspace = true diff --git a/crates/sui-field-count/src/lib.rs b/crates/sui-field-count/src/lib.rs new file mode 100644 index 0000000000000..9a71ea4aa83c4 --- /dev/null +++ b/crates/sui-field-count/src/lib.rs @@ -0,0 +1,5 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +pub use sui_field_count_derive::*; +pub use sui_field_count_main::*; diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/66/0x0000000000000000000000000000000000000000000000000000000000000001 b/crates/sui-framework-snapshot/bytecode_snapshot/66/0x0000000000000000000000000000000000000000000000000000000000000001 new file mode 100644 index 0000000000000..b9845e76190ce Binary files /dev/null and b/crates/sui-framework-snapshot/bytecode_snapshot/66/0x0000000000000000000000000000000000000000000000000000000000000001 differ diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/66/0x0000000000000000000000000000000000000000000000000000000000000002 b/crates/sui-framework-snapshot/bytecode_snapshot/66/0x0000000000000000000000000000000000000000000000000000000000000002 new file mode 100644 index 0000000000000..0153f6cc52752 Binary files /dev/null and b/crates/sui-framework-snapshot/bytecode_snapshot/66/0x0000000000000000000000000000000000000000000000000000000000000002 differ diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/66/0x0000000000000000000000000000000000000000000000000000000000000003 b/crates/sui-framework-snapshot/bytecode_snapshot/66/0x0000000000000000000000000000000000000000000000000000000000000003 new file mode 100644 index 0000000000000..1b524192a948f Binary files /dev/null and b/crates/sui-framework-snapshot/bytecode_snapshot/66/0x0000000000000000000000000000000000000000000000000000000000000003 differ diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/66/0x000000000000000000000000000000000000000000000000000000000000000b b/crates/sui-framework-snapshot/bytecode_snapshot/66/0x000000000000000000000000000000000000000000000000000000000000000b new file mode 100644 index 0000000000000..48d06a9c45a89 Binary files /dev/null and b/crates/sui-framework-snapshot/bytecode_snapshot/66/0x000000000000000000000000000000000000000000000000000000000000000b differ diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/66/0x000000000000000000000000000000000000000000000000000000000000dee9 b/crates/sui-framework-snapshot/bytecode_snapshot/66/0x000000000000000000000000000000000000000000000000000000000000dee9 new file mode 100644 index 0000000000000..d6568ff2626fa Binary files /dev/null and b/crates/sui-framework-snapshot/bytecode_snapshot/66/0x000000000000000000000000000000000000000000000000000000000000dee9 differ diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/67/0x0000000000000000000000000000000000000000000000000000000000000001 b/crates/sui-framework-snapshot/bytecode_snapshot/67/0x0000000000000000000000000000000000000000000000000000000000000001 new file mode 100644 index 0000000000000..8b84a69c97c00 Binary files /dev/null and b/crates/sui-framework-snapshot/bytecode_snapshot/67/0x0000000000000000000000000000000000000000000000000000000000000001 differ diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/67/0x0000000000000000000000000000000000000000000000000000000000000002 b/crates/sui-framework-snapshot/bytecode_snapshot/67/0x0000000000000000000000000000000000000000000000000000000000000002 new file mode 100644 index 0000000000000..0153f6cc52752 Binary files /dev/null and b/crates/sui-framework-snapshot/bytecode_snapshot/67/0x0000000000000000000000000000000000000000000000000000000000000002 differ diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/67/0x0000000000000000000000000000000000000000000000000000000000000003 b/crates/sui-framework-snapshot/bytecode_snapshot/67/0x0000000000000000000000000000000000000000000000000000000000000003 new file mode 100644 index 0000000000000..1b524192a948f Binary files /dev/null and b/crates/sui-framework-snapshot/bytecode_snapshot/67/0x0000000000000000000000000000000000000000000000000000000000000003 differ diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/67/0x000000000000000000000000000000000000000000000000000000000000000b b/crates/sui-framework-snapshot/bytecode_snapshot/67/0x000000000000000000000000000000000000000000000000000000000000000b new file mode 100644 index 0000000000000..48d06a9c45a89 Binary files /dev/null and b/crates/sui-framework-snapshot/bytecode_snapshot/67/0x000000000000000000000000000000000000000000000000000000000000000b differ diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/67/0x000000000000000000000000000000000000000000000000000000000000dee9 b/crates/sui-framework-snapshot/bytecode_snapshot/67/0x000000000000000000000000000000000000000000000000000000000000dee9 new file mode 100644 index 0000000000000..d6568ff2626fa Binary files /dev/null and b/crates/sui-framework-snapshot/bytecode_snapshot/67/0x000000000000000000000000000000000000000000000000000000000000dee9 differ diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/68/0x0000000000000000000000000000000000000000000000000000000000000001 b/crates/sui-framework-snapshot/bytecode_snapshot/68/0x0000000000000000000000000000000000000000000000000000000000000001 new file mode 100644 index 0000000000000..39348f37acc90 Binary files /dev/null and b/crates/sui-framework-snapshot/bytecode_snapshot/68/0x0000000000000000000000000000000000000000000000000000000000000001 differ diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/68/0x0000000000000000000000000000000000000000000000000000000000000002 b/crates/sui-framework-snapshot/bytecode_snapshot/68/0x0000000000000000000000000000000000000000000000000000000000000002 new file mode 100644 index 0000000000000..ae6e7d936c827 Binary files /dev/null and b/crates/sui-framework-snapshot/bytecode_snapshot/68/0x0000000000000000000000000000000000000000000000000000000000000002 differ diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/68/0x0000000000000000000000000000000000000000000000000000000000000003 b/crates/sui-framework-snapshot/bytecode_snapshot/68/0x0000000000000000000000000000000000000000000000000000000000000003 new file mode 100644 index 0000000000000..bbddd71deebaa Binary files /dev/null and b/crates/sui-framework-snapshot/bytecode_snapshot/68/0x0000000000000000000000000000000000000000000000000000000000000003 differ diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/68/0x000000000000000000000000000000000000000000000000000000000000000b b/crates/sui-framework-snapshot/bytecode_snapshot/68/0x000000000000000000000000000000000000000000000000000000000000000b new file mode 100644 index 0000000000000..48d06a9c45a89 Binary files /dev/null and b/crates/sui-framework-snapshot/bytecode_snapshot/68/0x000000000000000000000000000000000000000000000000000000000000000b differ diff --git a/crates/sui-framework-snapshot/bytecode_snapshot/68/0x000000000000000000000000000000000000000000000000000000000000dee9 b/crates/sui-framework-snapshot/bytecode_snapshot/68/0x000000000000000000000000000000000000000000000000000000000000dee9 new file mode 100644 index 0000000000000..d6568ff2626fa Binary files /dev/null and b/crates/sui-framework-snapshot/bytecode_snapshot/68/0x000000000000000000000000000000000000000000000000000000000000dee9 differ diff --git a/crates/sui-framework-snapshot/manifest.json b/crates/sui-framework-snapshot/manifest.json index 9a70b136f33ce..fcb4cfb612836 100644 --- a/crates/sui-framework-snapshot/manifest.json +++ b/crates/sui-framework-snapshot/manifest.json @@ -486,5 +486,35 @@ "0x000000000000000000000000000000000000000000000000000000000000dee9", "0x000000000000000000000000000000000000000000000000000000000000000b" ] + }, + "66": { + "git_revision": "86fa6e86b62a", + "package_ids": [ + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000002", + "0x0000000000000000000000000000000000000000000000000000000000000003", + "0x000000000000000000000000000000000000000000000000000000000000dee9", + "0x000000000000000000000000000000000000000000000000000000000000000b" + ] + }, + "67": { + "git_revision": "3ada97c109cc", + "package_ids": [ + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000002", + "0x0000000000000000000000000000000000000000000000000000000000000003", + "0x000000000000000000000000000000000000000000000000000000000000dee9", + "0x000000000000000000000000000000000000000000000000000000000000000b" + ] + }, + "68": { + "git_revision": "ef0d78c638e3", + "package_ids": [ + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000002", + "0x0000000000000000000000000000000000000000000000000000000000000003", + "0x000000000000000000000000000000000000000000000000000000000000dee9", + "0x000000000000000000000000000000000000000000000000000000000000000b" + ] } } \ No newline at end of file diff --git a/crates/sui-framework/docs/move-stdlib/vector.md b/crates/sui-framework/docs/move-stdlib/vector.md index 0832eebeb7d27..ec746effa9560 100644 --- a/crates/sui-framework/docs/move-stdlib/vector.md +++ b/crates/sui-framework/docs/move-stdlib/vector.md @@ -314,7 +314,7 @@ Pushes all of the elements of the other vector into the lhspublic fun append<Element>(lhs: &mut vector<Element>, mut other: vector<Element>) { other.reverse(); - while (!other.is_empty()) lhs.push_back(other.pop_back()); + while (other.length() != 0) lhs.push_back(other.pop_back()); other.destroy_empty(); } @@ -504,7 +504,7 @@ Aborts if i is out of bounds.
public fun swap_remove<Element>(v: &mut vector<Element>, i: u64): Element {
-    assert!(!v.is_empty(), EINDEX_OUT_OF_BOUNDS);
+    assert!(v.length() != 0, EINDEX_OUT_OF_BOUNDS);
     let last_idx = v.length() - 1;
     v.swap(i, last_idx);
     v.pop_back()
diff --git a/crates/sui-framework/docs/sui-framework/bls12381.md b/crates/sui-framework/docs/sui-framework/bls12381.md
index 5950e23b99f8f..e1bb511793f89 100644
--- a/crates/sui-framework/docs/sui-framework/bls12381.md
+++ b/crates/sui-framework/docs/sui-framework/bls12381.md
@@ -9,6 +9,7 @@ Group operations of BLS12-381.
 -  [Struct `G1`](#0x2_bls12381_G1)
 -  [Struct `G2`](#0x2_bls12381_G2)
 -  [Struct `GT`](#0x2_bls12381_GT)
+-  [Struct `UncompressedG1`](#0x2_bls12381_UncompressedG1)
 -  [Constants](#@Constants_0)
 -  [Function `bls12381_min_sig_verify`](#0x2_bls12381_bls12381_min_sig_verify)
 -  [Function `bls12381_min_pk_verify`](#0x2_bls12381_bls12381_min_pk_verify)
@@ -32,6 +33,7 @@ Group operations of BLS12-381.
 -  [Function `g1_neg`](#0x2_bls12381_g1_neg)
 -  [Function `hash_to_g1`](#0x2_bls12381_hash_to_g1)
 -  [Function `g1_multi_scalar_multiplication`](#0x2_bls12381_g1_multi_scalar_multiplication)
+-  [Function `g1_to_uncompressed_g1`](#0x2_bls12381_g1_to_uncompressed_g1)
 -  [Function `g2_from_bytes`](#0x2_bls12381_g2_from_bytes)
 -  [Function `g2_identity`](#0x2_bls12381_g2_identity)
 -  [Function `g2_generator`](#0x2_bls12381_g2_generator)
@@ -50,6 +52,8 @@ Group operations of BLS12-381.
 -  [Function `gt_div`](#0x2_bls12381_gt_div)
 -  [Function `gt_neg`](#0x2_bls12381_gt_neg)
 -  [Function `pairing`](#0x2_bls12381_pairing)
+-  [Function `uncompressed_g1_to_g1`](#0x2_bls12381_uncompressed_g1_to_g1)
+-  [Function `uncompressed_g1_sum`](#0x2_bls12381_uncompressed_g1_sum)
 
 
 
use 0x2::group_ops;
@@ -149,6 +153,33 @@ Group operations of BLS12-381.
 
 
 
+
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `UncompressedG1` + + + +
struct UncompressedG1
+
+ + +
Fields @@ -278,6 +309,15 @@ Group operations of BLS12-381. + + + + +
const UNCOMPRESSED_G1_TYPE: u8 = 4;
+
+ + + ## Function `bls12381_min_sig_verify` @@ -835,6 +875,31 @@ Aborts with EInputTooLong if the vectors are larger than 32 (may in +
+ + + +## Function `g1_to_uncompressed_g1` + +Convert an Element<G1> to uncompressed form. + + +
public fun g1_to_uncompressed_g1(e: &group_ops::Element<bls12381::G1>): group_ops::Element<bls12381::UncompressedG1>
+
+ + + +
+Implementation + + +
public fun g1_to_uncompressed_g1(e: &Element<G1>): Element<UncompressedG1> {
+    group_ops::convert(G1_TYPE, UNCOMPRESSED_G1_TYPE, e)
+}
+
+ + +
@@ -1280,4 +1345,56 @@ Returns e2 / e1, fails if scalar is zero. + + + + +## Function `uncompressed_g1_to_g1` + +UncompressedG1 group operations /// +Create a Element<G1> from its uncompressed form. + + +
public fun uncompressed_g1_to_g1(e: &group_ops::Element<bls12381::UncompressedG1>): group_ops::Element<bls12381::G1>
+
+ + + +
+Implementation + + +
public fun uncompressed_g1_to_g1(e: &Element<UncompressedG1>): Element<G1> {
+    group_ops::convert(UNCOMPRESSED_G1_TYPE, G1_TYPE, e)
+}
+
+ + + +
+ + + +## Function `uncompressed_g1_sum` + +Compute the sum of a list of uncompressed elements. +This is significantly faster and cheaper than summing the elements. + + +
public fun uncompressed_g1_sum(terms: &vector<group_ops::Element<bls12381::UncompressedG1>>): group_ops::Element<bls12381::UncompressedG1>
+
+ + + +
+Implementation + + +
public fun uncompressed_g1_sum(terms: &vector<Element<UncompressedG1>>): Element<UncompressedG1> {
+    group_ops::sum(UNCOMPRESSED_G1_TYPE, terms)
+}
+
+ + +
diff --git a/crates/sui-framework/docs/sui-framework/group_ops.md b/crates/sui-framework/docs/sui-framework/group_ops.md index de2f6a81a5828..84cbd99078404 100644 --- a/crates/sui-framework/docs/sui-framework/group_ops.md +++ b/crates/sui-framework/docs/sui-framework/group_ops.md @@ -17,6 +17,8 @@ Generic Move and native functions for group operations. - [Function `hash_to`](#0x2_group_ops_hash_to) - [Function `multi_scalar_multiplication`](#0x2_group_ops_multi_scalar_multiplication) - [Function `pairing`](#0x2_group_ops_pairing) +- [Function `convert`](#0x2_group_ops_convert) +- [Function `sum`](#0x2_group_ops_sum) - [Function `internal_validate`](#0x2_group_ops_internal_validate) - [Function `internal_add`](#0x2_group_ops_internal_add) - [Function `internal_sub`](#0x2_group_ops_internal_sub) @@ -25,6 +27,8 @@ Generic Move and native functions for group operations. - [Function `internal_hash_to`](#0x2_group_ops_internal_hash_to) - [Function `internal_multi_scalar_mul`](#0x2_group_ops_internal_multi_scalar_mul) - [Function `internal_pairing`](#0x2_group_ops_internal_pairing) +- [Function `internal_convert`](#0x2_group_ops_internal_convert) +- [Function `internal_sum`](#0x2_group_ops_internal_sum) - [Function `set_as_prefix`](#0x2_group_ops_set_as_prefix) @@ -364,6 +368,54 @@ Aborts with EInputTooLo + + + + +## Function `convert` + + + +
public(friend) fun convert<From, To>(from_type_: u8, to_type_: u8, e: &group_ops::Element<From>): group_ops::Element<To>
+
+ + + +
+Implementation + + +
public(package) fun convert<From, To>(from_type_: u8, to_type_: u8, e: &Element<From>): Element<To> {
+    Element<To> { bytes: internal_convert(from_type_, to_type_, &e.bytes) }
+}
+
+ + + +
+ + + +## Function `sum` + + + +
public(friend) fun sum<G>(type_: u8, terms: &vector<group_ops::Element<G>>): group_ops::Element<G>
+
+ + + +
+Implementation + + +
public(package) fun sum<G>(type_: u8, terms: &vector<Element<G>>): Element<G> {
+    Element<G> { bytes: internal_sum(type_, &(*terms).map!(|x| x.bytes)) }
+}
+
+ + +
@@ -544,6 +596,50 @@ Aborts with EInputTooLo + + + + +## Function `internal_convert` + + + +
fun internal_convert(from_type_: u8, to_type_: u8, e: &vector<u8>): vector<u8>
+
+ + + +
+Implementation + + +
native fun internal_convert(from_type_: u8, to_type_: u8, e: &vector<u8>): vector<u8>;
+
+ + + +
+ + + +## Function `internal_sum` + + + +
fun internal_sum(type_: u8, e: &vector<vector<u8>>): vector<u8>
+
+ + + +
+Implementation + + +
native fun internal_sum(type_: u8, e: &vector<vector<u8>>): vector<u8>;
+
+ + +
diff --git a/crates/sui-framework/docs/sui-framework/vec_map.md b/crates/sui-framework/docs/sui-framework/vec_map.md index 96b7c340f3f7f..ec5749359e1c4 100644 --- a/crates/sui-framework/docs/sui-framework/vec_map.md +++ b/crates/sui-framework/docs/sui-framework/vec_map.md @@ -262,7 +262,7 @@ Pop the most recently inserted entry from the map. Aborts if the map is empty.
public fun pop<K: copy, V>(self: &mut VecMap<K, V>): (K, V) {
-    assert!(!self.contents.is_empty(), EMapEmpty);
+    assert!(self.contents.length() != 0, EMapEmpty);
     let Entry { key, value } = self.contents.pop_back();
     (key, value)
 }
@@ -526,7 +526,7 @@ and are *not* sorted.
     keys.reverse();
     values.reverse();
     let mut map = empty();
-    while (!keys.is_empty()) map.insert(keys.pop_back(), values.pop_back());
+    while (keys.length() != 0) map.insert(keys.pop_back(), values.pop_back());
     keys.destroy_empty();
     values.destroy_empty();
     map
diff --git a/crates/sui-framework/docs/sui-framework/vec_set.md b/crates/sui-framework/docs/sui-framework/vec_set.md
index 25007f9b3a490..c6ee170d15863 100644
--- a/crates/sui-framework/docs/sui-framework/vec_set.md
+++ b/crates/sui-framework/docs/sui-framework/vec_set.md
@@ -309,7 +309,7 @@ and are *not* sorted.
 
public fun from_keys<K: copy + drop>(mut keys: vector<K>): VecSet<K> {
     keys.reverse();
     let mut set = empty();
-    while (!keys.is_empty()) set.insert(keys.pop_back());
+    while (keys.length() != 0) set.insert(keys.pop_back());
     set
 }
 
diff --git a/crates/sui-framework/docs/sui-system/stake_subsidy.md b/crates/sui-framework/docs/sui-system/stake_subsidy.md index f66978db04aec..8147ace90ae4f 100644 --- a/crates/sui-framework/docs/sui-system/stake_subsidy.md +++ b/crates/sui-framework/docs/sui-system/stake_subsidy.md @@ -9,6 +9,7 @@ title: Module `0x3::stake_subsidy` - [Function `create`](#0x3_stake_subsidy_create) - [Function `advance_epoch`](#0x3_stake_subsidy_advance_epoch) - [Function `current_epoch_subsidy_amount`](#0x3_stake_subsidy_current_epoch_subsidy_amount) +- [Function `get_distribution_counter`](#0x3_stake_subsidy_get_distribution_counter)
use 0x1::u64;
@@ -169,7 +170,6 @@ Advance the epoch counter and draw down the subsidy for the epoch.
 
     // Drawn down the subsidy for this epoch.
     let stake_subsidy = self.balance.split(to_withdraw);
-
     self.distribution_counter = self.distribution_counter + 1;
 
     // Decrease the subsidy amount only when the current period ends.
@@ -210,4 +210,29 @@ Returns the amount of stake subsidy to be added at the end of the current epoch.
 
 
 
+
+
+
+
+## Function `get_distribution_counter`
+
+Returns the number of distributions that have occurred.
+
+
+
public(friend) fun get_distribution_counter(self: &stake_subsidy::StakeSubsidy): u64
+
+ + + +
+Implementation + + +
public(package) fun get_distribution_counter(self: &StakeSubsidy): u64 {
+    self.distribution_counter
+}
+
+ + +
diff --git a/crates/sui-framework/docs/sui-system/sui_system_state_inner.md b/crates/sui-framework/docs/sui-system/sui_system_state_inner.md index f4357743de41d..ae65fd0a93402 100644 --- a/crates/sui-framework/docs/sui-system/sui_system_state_inner.md +++ b/crates/sui-framework/docs/sui-system/sui_system_state_inner.md @@ -2163,18 +2163,31 @@ gas coins. let storage_charge = storage_reward.value(); let computation_charge = computation_reward.value(); + let mut stake_subsidy = balance::zero(); + // during the transition from epoch N to epoch N + 1, ctx.epoch() will return N + let old_epoch = ctx.epoch(); // Include stake subsidy in the rewards given out to validators and stakers. // Delay distributing any stake subsidies until after `stake_subsidy_start_epoch`. // And if this epoch is shorter than the regular epoch duration, don't distribute any stake subsidy. - let stake_subsidy = - if (ctx.epoch() >= self.parameters.stake_subsidy_start_epoch && - epoch_start_timestamp_ms >= prev_epoch_start_timestamp + self.parameters.epoch_duration_ms) - { - self.stake_subsidy.advance_epoch() - } else { - balance::zero() + if (old_epoch >= self.parameters.stake_subsidy_start_epoch && + epoch_start_timestamp_ms >= prev_epoch_start_timestamp + self.parameters.epoch_duration_ms) + { + // special case for epoch 560 -> 561 change bug. add extra subsidies for "safe mode" + // where reward distribution was skipped. use distribution counter and epoch check to + // avoiding affecting devnet and testnet + if (self.stake_subsidy.get_distribution_counter() == 540 && old_epoch > 560) { + // safe mode was entered on the change from 560 to 561. so 560 was the first epoch without proper subsidy distribution + let first_safe_mode_epoch = 560; + let safe_mode_epoch_count = old_epoch - first_safe_mode_epoch; + safe_mode_epoch_count.do!(|_| { + stake_subsidy.join(self.stake_subsidy.advance_epoch()); + }); + // done with catchup for safe mode epochs. distribution counter is now >540, we won't hit this again + // fall through to the normal logic, which will add subsidies for the current epoch }; + stake_subsidy.join(self.stake_subsidy.advance_epoch()); + }; let stake_subsidy_amount = stake_subsidy.value(); computation_reward.join(stake_subsidy); diff --git a/crates/sui-framework/docs/sui-system/validator.md b/crates/sui-framework/docs/sui-system/validator.md index db50fa3400a9d..3c0250ae74815 100644 --- a/crates/sui-framework/docs/sui-system/validator.md +++ b/crates/sui-framework/docs/sui-system/validator.md @@ -1076,6 +1076,8 @@ Request to add stake to the validator's staking pool, processed at the end of th let sui = self.staking_pool.redeem_fungible_staked_sui(fungible_staked_sui, ctx); + self.next_epoch_stake = self.next_epoch_stake - sui.value(); + event::emit( RedeemingFungibleStakedSuiEvent { pool_id: self.staking_pool_id(), @@ -1346,7 +1348,8 @@ Process pending stakes and withdraws, called at the end of the epoch.
public(package) fun process_pending_stakes_and_withdraws(self: &mut Validator, ctx: &TxContext) {
     self.staking_pool.process_pending_stakes_and_withdraws(ctx);
-    assert!(stake_amount(self) == self.next_epoch_stake, EInvalidStakeAmount);
+    // TODO: bring this assertion back when we are ready.
+    // assert!(stake_amount(self) == self.next_epoch_stake, EInvalidStakeAmount);
 }
 
diff --git a/crates/sui-framework/docs/sui-system/voting_power.md b/crates/sui-framework/docs/sui-system/voting_power.md index 5b9be689ab2ab..d01fff559824d 100644 --- a/crates/sui-framework/docs/sui-system/voting_power.md +++ b/crates/sui-framework/docs/sui-system/voting_power.md @@ -378,7 +378,7 @@ Update validators with the decided voting power.
fun update_voting_power(validators: &mut vector<Validator>, mut info_list: vector<VotingPowerInfoV2>) {
-    while (!info_list.is_empty()) {
+    while (info_list.length() != 0) {
         let VotingPowerInfoV2 {
             validator_index,
             voting_power,
diff --git a/crates/sui-framework/packages/move-stdlib/sources/vector.move b/crates/sui-framework/packages/move-stdlib/sources/vector.move
index 96a2567855501..e9557b4d3f6ac 100644
--- a/crates/sui-framework/packages/move-stdlib/sources/vector.move
+++ b/crates/sui-framework/packages/move-stdlib/sources/vector.move
@@ -86,7 +86,7 @@ public fun reverse(v: &mut vector) {
 /// Pushes all of the elements of the `other` vector into the `lhs` vector.
 public fun append(lhs: &mut vector, mut other: vector) {
     other.reverse();
-    while (!other.is_empty()) lhs.push_back(other.pop_back());
+    while (other.length() != 0) lhs.push_back(other.pop_back());
     other.destroy_empty();
 }
 
@@ -156,7 +156,7 @@ public fun insert(v: &mut vector, e: Element, mut i: u64) {
 /// This is O(1), but does not preserve ordering of elements in the vector.
 /// Aborts if `i` is out of bounds.
 public fun swap_remove(v: &mut vector, i: u64): Element {
-    assert!(!v.is_empty(), EINDEX_OUT_OF_BOUNDS);
+    assert!(v.length() != 0, EINDEX_OUT_OF_BOUNDS);
     let last_idx = v.length() - 1;
     v.swap(i, last_idx);
     v.pop_back()
@@ -176,7 +176,7 @@ public macro fun tabulate<$T>($n: u64, $f: |u64| -> $T): vector<$T> {
 /// Does not preserve the order of elements in the vector (starts from the end of the vector).
 public macro fun destroy<$T>($v: vector<$T>, $f: |$T|) {
     let mut v = $v;
-    while (!v.is_empty()) $f(v.pop_back());
+    while (v.length() != 0) $f(v.pop_back());
     v.destroy_empty();
 }
 
@@ -185,7 +185,7 @@ public macro fun destroy<$T>($v: vector<$T>, $f: |$T|) {
 public macro fun do<$T>($v: vector<$T>, $f: |$T|) {
     let mut v = $v;
     v.reverse();
-    while (!v.is_empty()) $f(v.pop_back());
+    while (v.length() != 0) $f(v.pop_back());
     v.destroy_empty();
 }
 
diff --git a/crates/sui-framework/packages/sui-framework/sources/crypto/bls12381.move b/crates/sui-framework/packages/sui-framework/sources/crypto/bls12381.move
index a87eb451e93be..6a349b36acd8b 100644
--- a/crates/sui-framework/packages/sui-framework/sources/crypto/bls12381.move
+++ b/crates/sui-framework/packages/sui-framework/sources/crypto/bls12381.move
@@ -37,6 +37,7 @@ public struct Scalar {}
 public struct G1 {}
 public struct G2 {}
 public struct GT {}
+public struct UncompressedG1 {}
 
 // Scalars are encoded using big-endian byte order.
 // G1 and G2 are encoded using big-endian byte order and points are compressed. See
@@ -44,6 +45,9 @@ public struct GT {}
 // https://docs.rs/bls12_381/latest/bls12_381/notes/serialization/index.html for details.
 // GT is encoded using big-endian byte order and points are uncompressed and not intended
 // to be deserialized.
+// UncompressedG1 elements are G1 elements in uncompressed form. They are larger but faster to 
+// use since they do not have to be uncompressed before use. They can not be constructed 
+// on their own but have to be created from G1 elements.
 
 // Const elements.
 const SCALAR_ZERO_BYTES: vector =
@@ -68,6 +72,7 @@ const SCALAR_TYPE: u8 = 0;
 const G1_TYPE: u8 = 1;
 const G2_TYPE: u8 = 2;
 const GT_TYPE: u8 = 3;
+const UNCOMPRESSED_G1_TYPE: u8 = 4;
 
 ///////////////////////////////
 ////// Scalar operations //////
@@ -171,6 +176,11 @@ public fun g1_multi_scalar_multiplication(
     group_ops::multi_scalar_multiplication(G1_TYPE, scalars, elements)
 }
 
+/// Convert an `Element` to uncompressed form.
+public fun g1_to_uncompressed_g1(e: &Element): Element {
+    group_ops::convert(G1_TYPE, UNCOMPRESSED_G1_TYPE, e)
+}
+
 /////////////////////////////////
 ////// G2 group operations //////
 
@@ -264,3 +274,17 @@ public fun gt_neg(e: &Element): Element {
 public fun pairing(e1: &Element, e2: &Element): Element {
     group_ops::pairing(G1_TYPE, e1, e2)
 }
+
+///////////////////////////////////////
+/// UncompressedG1 group operations ///
+
+/// Create a `Element` from its uncompressed form.
+public fun uncompressed_g1_to_g1(e: &Element): Element {
+    group_ops::convert(UNCOMPRESSED_G1_TYPE, G1_TYPE, e)
+}
+
+/// Compute the sum of a list of uncompressed elements.
+/// This is significantly faster and cheaper than summing the elements.
+public fun uncompressed_g1_sum(terms: &vector>): Element {
+    group_ops::sum(UNCOMPRESSED_G1_TYPE, terms)
+}
diff --git a/crates/sui-framework/packages/sui-framework/sources/crypto/group_ops.move b/crates/sui-framework/packages/sui-framework/sources/crypto/group_ops.move
index 49d19ad7147ce..4892c9b8f94fc 100644
--- a/crates/sui-framework/packages/sui-framework/sources/crypto/group_ops.move
+++ b/crates/sui-framework/packages/sui-framework/sources/crypto/group_ops.move
@@ -88,6 +88,14 @@ public(package) fun pairing(
     Element { bytes: internal_pairing(type_, &e1.bytes, &e2.bytes) }
 }
 
+public(package) fun convert(from_type_: u8, to_type_: u8, e: &Element): Element {
+    Element { bytes: internal_convert(from_type_, to_type_, &e.bytes) }
+}
+
+public(package) fun sum(type_: u8, terms: &vector>): Element {
+    Element { bytes: internal_sum(type_, &(*terms).map!(|x| x.bytes)) }
+}
+
 //////////////////////////////
 ////// Native functions //////
 
@@ -114,6 +122,9 @@ native fun internal_multi_scalar_mul(
 // 'type' represents the type of e1, and the rest are determined automatically from e1.
 native fun internal_pairing(type_: u8, e1: &vector, e2: &vector): vector;
 
+native fun internal_convert(from_type_: u8, to_type_: u8, e: &vector): vector;
+native fun internal_sum(type_: u8, e: &vector>): vector;
+
 // Helper function for encoding a given u64 number as bytes in a given buffer.
 public(package) fun set_as_prefix(x: u64, big_endian: bool, buffer: &mut vector) {
     let buffer_len = buffer.length();
diff --git a/crates/sui-framework/packages/sui-framework/sources/vec_map.move b/crates/sui-framework/packages/sui-framework/sources/vec_map.move
index d1fb7646b7e57..6b38d57d289a8 100644
--- a/crates/sui-framework/packages/sui-framework/sources/vec_map.move
+++ b/crates/sui-framework/packages/sui-framework/sources/vec_map.move
@@ -58,7 +58,7 @@ public fun remove(self: &mut VecMap, key: &K): (K, V) {
 
 /// Pop the most recently inserted entry from the map. Aborts if the map is empty.
 public fun pop(self: &mut VecMap): (K, V) {
-    assert!(!self.contents.is_empty(), EMapEmpty);
+    assert!(self.contents.length() != 0, EMapEmpty);
     let Entry { key, value } = self.contents.pop_back();
     (key, value)
 }
@@ -144,7 +144,7 @@ public fun from_keys_values(mut keys: vector, mut values: vector<
     keys.reverse();
     values.reverse();
     let mut map = empty();
-    while (!keys.is_empty()) map.insert(keys.pop_back(), values.pop_back());
+    while (keys.length() != 0) map.insert(keys.pop_back(), values.pop_back());
     keys.destroy_empty();
     values.destroy_empty();
     map
diff --git a/crates/sui-framework/packages/sui-framework/sources/vec_set.move b/crates/sui-framework/packages/sui-framework/sources/vec_set.move
index e4d9301def975..c1b67c276191f 100644
--- a/crates/sui-framework/packages/sui-framework/sources/vec_set.move
+++ b/crates/sui-framework/packages/sui-framework/sources/vec_set.move
@@ -69,7 +69,7 @@ public fun into_keys(self: VecSet): vector {
 public fun from_keys(mut keys: vector): VecSet {
     keys.reverse();
     let mut set = empty();
-    while (!keys.is_empty()) set.insert(keys.pop_back());
+    while (keys.length() != 0) set.insert(keys.pop_back());
     set
 }
 
diff --git a/crates/sui-framework/packages/sui-framework/tests/crypto/bls12381_tests.move b/crates/sui-framework/packages/sui-framework/tests/crypto/bls12381_tests.move
index 9c80f51aac67a..504ec3e5a16ad 100644
--- a/crates/sui-framework/packages/sui-framework/tests/crypto/bls12381_tests.move
+++ b/crates/sui-framework/packages/sui-framework/tests/crypto/bls12381_tests.move
@@ -382,6 +382,70 @@ module sui::bls12381_tests {
         let _ = bls12381::hash_to_g1(&vector[]);
     }
 
+    #[random_test]
+    fun test_to_from_uncompressed_g1(scalar: u64) {
+        // Generator
+        let a = bls12381::g1_generator();
+        let a_uncompressed = bls12381::g1_to_uncompressed_g1(&a);
+        assert!(a_uncompressed.bytes() == x"17f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb08b3f481e3aaa0f1a09e30ed741d8ae4fcf5e095d5d00af600db18cb2c04b3edd03cc744a2888ae40caa232946c5e7e1");
+        let reconstructed = bls12381::uncompressed_g1_to_g1(&a_uncompressed);
+        assert!(group_ops::equal(&a, &reconstructed));
+
+        // Identity element
+        let b = bls12381::g1_identity();
+        let b_uncompressed = bls12381::g1_to_uncompressed_g1(&b);
+        assert!(b_uncompressed.bytes() == x"400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000");
+        let reconstructed = bls12381::uncompressed_g1_to_g1(&b_uncompressed);
+        assert!(group_ops::equal(&b, &reconstructed));
+
+        // Random element
+        let scalar = bls12381::scalar_from_u64(scalar);
+        let c = bls12381::g1_mul(&scalar, &bls12381::g1_generator());
+        let c_uncompressed = bls12381::g1_to_uncompressed_g1(&c);
+        let reconstructed = bls12381::uncompressed_g1_to_g1(&c_uncompressed);
+        assert!(group_ops::equal(&c, &reconstructed));
+    }
+
+    #[test]
+    fun test_uncompressed_g1_sum() {
+        // Empty sum
+        let sum = bls12381::uncompressed_g1_sum(&vector[]);
+        assert!(group_ops::equal(&bls12381::g1_to_uncompressed_g1(&bls12381::g1_identity()), &sum));
+
+        // Sum with random terms
+        let mut gen = random::new_generator_for_testing();
+        let mut elements = vector[];
+        let mut i = 100;
+        let mut expected_result = bls12381::g1_identity();
+        while (i > 0) {
+            let scalar = bls12381::scalar_from_u64(gen.generate_u64());
+            let element = bls12381::g1_mul(&scalar, &bls12381::g1_generator());
+            expected_result = bls12381::g1_add(&expected_result, &element);
+            let uncompressed_element = bls12381::g1_to_uncompressed_g1(&element);
+            elements.push_back(uncompressed_element);
+            let actual_result = bls12381::uncompressed_g1_sum(&elements);
+            assert!(group_ops::equal(&bls12381::g1_to_uncompressed_g1(&expected_result), &actual_result));
+            i = i - 1;
+        };
+    }
+
+    #[test]
+    #[expected_failure(abort_code = group_ops::EInputTooLong)]
+    fun test_uncompressed_g1_sum_too_long() {
+        // Sum with random terms
+        let mut gen = random::new_generator_for_testing();
+        let mut elements = vector[];
+        let mut i = 2001;
+        while (i > 0) {
+            let scalar = bls12381::scalar_from_u64(gen.generate_u64());
+            let element = bls12381::g1_mul(&scalar, &bls12381::g1_generator());
+            let uncompressed_element = bls12381::g1_to_uncompressed_g1(&element);
+            elements.push_back(uncompressed_element);
+            i = i - 1;
+        };
+        let _ = bls12381::uncompressed_g1_sum(&elements);
+    }
+
     #[test]
     fun test_g2_ops() {
         let id = bls12381::g2_identity();
diff --git a/crates/sui-framework/packages/sui-system/sources/stake_subsidy.move b/crates/sui-framework/packages/sui-system/sources/stake_subsidy.move
index d0760d97a88c0..d355a69c8489a 100644
--- a/crates/sui-framework/packages/sui-system/sources/stake_subsidy.move
+++ b/crates/sui-framework/packages/sui-system/sources/stake_subsidy.move
@@ -65,7 +65,6 @@ module sui_system::stake_subsidy {
 
         // Drawn down the subsidy for this epoch.
         let stake_subsidy = self.balance.split(to_withdraw);
-
         self.distribution_counter = self.distribution_counter + 1;
 
         // Decrease the subsidy amount only when the current period ends.
@@ -83,9 +82,13 @@ module sui_system::stake_subsidy {
         self.current_distribution_amount.min(self.balance.value())
     }
 
-    #[test_only]
     /// Returns the number of distributions that have occurred.
     public(package) fun get_distribution_counter(self: &StakeSubsidy): u64 {
         self.distribution_counter
     }
+
+    #[test_only]
+    public(package) fun set_distribution_counter(self: &mut StakeSubsidy, distribution_counter: u64) {
+        self.distribution_counter = distribution_counter;
+    }
 }
diff --git a/crates/sui-framework/packages/sui-system/sources/sui_system.move b/crates/sui-framework/packages/sui-system/sources/sui_system.move
index 916c2cd55b33b..641cc3f9dd86e 100644
--- a/crates/sui-framework/packages/sui-system/sources/sui_system.move
+++ b/crates/sui-framework/packages/sui-system/sources/sui_system.move
@@ -726,6 +726,17 @@ module sui_system::sui_system {
         self.get_stake_subsidy_distribution_counter()
     }
 
+    #[test_only]
+    public fun set_stake_subsidy_distribution_counter(wrapper: &mut SuiSystemState, counter: u64) {
+        let self = load_system_state_mut(wrapper);
+        self.set_stake_subsidy_distribution_counter(counter)
+    }
+
+    #[test_only]
+    public fun inner_mut_for_testing(wrapper: &mut SuiSystemState): &mut SuiSystemStateInnerV2 {
+        wrapper.load_system_state_mut()
+    }
+
     // CAUTION: THIS CODE IS ONLY FOR TESTING AND THIS MACRO MUST NEVER EVER BE REMOVED.  Creates a
     // candidate validator - bypassing the proof of possession check and other metadata validation
     // in the process.
diff --git a/crates/sui-framework/packages/sui-system/sources/sui_system_state_inner.move b/crates/sui-framework/packages/sui-system/sources/sui_system_state_inner.move
index 121a12fc75b94..4384f3fcd2599 100644
--- a/crates/sui-framework/packages/sui-system/sources/sui_system_state_inner.move
+++ b/crates/sui-framework/packages/sui-system/sources/sui_system_state_inner.move
@@ -865,18 +865,31 @@ module sui_system::sui_system_state_inner {
 
         let storage_charge = storage_reward.value();
         let computation_charge = computation_reward.value();
+        let mut stake_subsidy = balance::zero();
 
+        // during the transition from epoch N to epoch N + 1, ctx.epoch() will return N
+        let old_epoch = ctx.epoch();
         // Include stake subsidy in the rewards given out to validators and stakers.
         // Delay distributing any stake subsidies until after `stake_subsidy_start_epoch`.
         // And if this epoch is shorter than the regular epoch duration, don't distribute any stake subsidy.
-        let stake_subsidy =
-            if (ctx.epoch() >= self.parameters.stake_subsidy_start_epoch  &&
-                epoch_start_timestamp_ms >= prev_epoch_start_timestamp + self.parameters.epoch_duration_ms)
-            {
-                self.stake_subsidy.advance_epoch()
-            } else {
-                balance::zero()
+        if (old_epoch >= self.parameters.stake_subsidy_start_epoch  &&
+            epoch_start_timestamp_ms >= prev_epoch_start_timestamp + self.parameters.epoch_duration_ms)
+        {
+            // special case for epoch 560 -> 561 change bug. add extra subsidies for "safe mode"
+            // where reward distribution was skipped. use distribution counter and epoch check to
+            // avoiding affecting devnet and testnet
+            if (self.stake_subsidy.get_distribution_counter() == 540 && old_epoch > 560) {
+                // safe mode was entered on the change from 560 to 561. so 560 was the first epoch without proper subsidy distribution
+                let first_safe_mode_epoch = 560;
+                let safe_mode_epoch_count = old_epoch - first_safe_mode_epoch;
+                safe_mode_epoch_count.do!(|_| {
+                    stake_subsidy.join(self.stake_subsidy.advance_epoch());
+                });
+                // done with catchup for safe mode epochs. distribution counter is now >540, we won't hit this again
+                // fall through to the normal logic, which will add subsidies for the current epoch
             };
+            stake_subsidy.join(self.stake_subsidy.advance_epoch());
+        };
 
         let stake_subsidy_amount = stake_subsidy.value();
         computation_reward.join(stake_subsidy);
@@ -1127,6 +1140,16 @@ module sui_system::sui_system_state_inner {
         self.validators.request_add_validator(min_joining_stake_for_testing, ctx);
     }
 
+    #[test_only]
+    public(package) fun set_stake_subsidy_distribution_counter(self: &mut SuiSystemStateInnerV2, counter: u64) {
+        self.stake_subsidy.set_distribution_counter(counter)
+    }
+
+    #[test_only]
+    public(package) fun epoch_duration_ms(self: &SuiSystemStateInnerV2): u64 {
+        self.parameters.epoch_duration_ms
+    }
+
     // CAUTION: THIS CODE IS ONLY FOR TESTING AND THIS MACRO MUST NEVER EVER BE REMOVED.  Creates a
     // candidate validator - bypassing the proof of possession check and other metadata validation
     // in the process.
diff --git a/crates/sui-framework/packages/sui-system/sources/validator.move b/crates/sui-framework/packages/sui-system/sources/validator.move
index da6157014541e..0019ea7dd42dd 100644
--- a/crates/sui-framework/packages/sui-system/sources/validator.move
+++ b/crates/sui-framework/packages/sui-system/sources/validator.move
@@ -352,6 +352,8 @@ module sui_system::validator {
 
         let sui = self.staking_pool.redeem_fungible_staked_sui(fungible_staked_sui, ctx);
 
+        self.next_epoch_stake = self.next_epoch_stake - sui.value();
+
         event::emit(
             RedeemingFungibleStakedSuiEvent {
                 pool_id: self.staking_pool_id(),
@@ -462,7 +464,8 @@ module sui_system::validator {
     /// Process pending stakes and withdraws, called at the end of the epoch.
     public(package) fun process_pending_stakes_and_withdraws(self: &mut Validator, ctx: &TxContext) {
         self.staking_pool.process_pending_stakes_and_withdraws(ctx);
-        assert!(stake_amount(self) == self.next_epoch_stake, EInvalidStakeAmount);
+        // TODO: bring this assertion back when we are ready.
+        // assert!(stake_amount(self) == self.next_epoch_stake, EInvalidStakeAmount);
     }
 
     /// Returns true if the validator is preactive.
diff --git a/crates/sui-framework/packages/sui-system/sources/voting_power.move b/crates/sui-framework/packages/sui-system/sources/voting_power.move
index 5a9672316602b..dd0fc336e83f7 100644
--- a/crates/sui-framework/packages/sui-system/sources/voting_power.move
+++ b/crates/sui-framework/packages/sui-system/sources/voting_power.move
@@ -127,7 +127,7 @@ module sui_system::voting_power {
 
     /// Update validators with the decided voting power.
     fun update_voting_power(validators: &mut vector, mut info_list: vector) {
-        while (!info_list.is_empty()) {
+        while (info_list.length() != 0) {
             let VotingPowerInfoV2 {
                 validator_index,
                 voting_power,
diff --git a/crates/sui-framework/packages/sui-system/tests/rewards_distribution_tests.move b/crates/sui-framework/packages/sui-system/tests/rewards_distribution_tests.move
index 7c04d28e61aca..ec94cbf81a1bf 100644
--- a/crates/sui-framework/packages/sui-system/tests/rewards_distribution_tests.move
+++ b/crates/sui-framework/packages/sui-system/tests/rewards_distribution_tests.move
@@ -3,6 +3,7 @@
 
 #[test_only]
 module sui_system::rewards_distribution_tests {
+    use sui::balance;
     use sui::test_scenario::{Self, Scenario};
     use sui_system::sui_system::SuiSystemState;
     use sui_system::validator_cap::UnverifiedValidatorOperationCap;
@@ -491,4 +492,140 @@ module sui_system::rewards_distribution_tests {
         scenario.return_to_sender(cap);
         test_scenario::return_shared(system_state);
     }
+
+    fun check_distribution_counter_invariant(system: &mut SuiSystemState, ctx: &TxContext) {
+        assert!(ctx.epoch() == system.epoch());
+        // first subsidy distribution was at epoch 20, so counter should always be ahead by 20
+        assert_eq(system.get_stake_subsidy_distribution_counter() + 20, ctx.epoch());
+    }
+
+    #[test]
+    fun test_stake_subsidy_with_safe_mode_epoch_562_to_563() {
+        set_up_sui_system_state_with_big_amounts();
+
+        let mut test = test_scenario::begin(VALIDATOR_ADDR_1);
+        let mut sui_system = test.take_shared();
+        let ctx = test.ctx();
+        // mimic state during epoch 562, if we're in safe mode since the 560 -> 561 epoch change
+        let start_epoch: u64 = 562;
+        let start_distribution_counter = 540;
+        let epoch_start_time = 100000000000;
+        let epoch_duration = sui_system.inner_mut_for_testing().epoch_duration_ms();
+
+        // increment epoch number (safe mode emulation)
+        start_epoch.do!(|_| ctx.increment_epoch_number());
+        sui_system.set_epoch_for_testing(start_epoch);
+        sui_system.set_stake_subsidy_distribution_counter(start_distribution_counter);
+
+        assert!(ctx.epoch() == start_epoch);
+        assert!(ctx.epoch() == sui_system.epoch());
+        assert!(sui_system.get_stake_subsidy_distribution_counter() == start_distribution_counter);
+
+        // perform advance epoch
+        sui_system
+            .inner_mut_for_testing()
+            .advance_epoch(start_epoch + 1, 65, balance::zero(), balance::zero(), 0, 0, 0, 0, epoch_start_time, ctx)
+            .destroy_for_testing(); // balance returned from `advance_epoch`
+        ctx.increment_epoch_number();
+
+        // should distribute 3 epochs worth of subsidies: 560, 561, 562
+        assert_eq(sui_system.get_stake_subsidy_distribution_counter(), start_distribution_counter + 3);
+        check_distribution_counter_invariant(&mut sui_system, ctx);
+
+        // ensure that next epoch change only distributes one epoch's worth
+        sui_system
+            .inner_mut_for_testing()
+            .advance_epoch(start_epoch + 2, 65, balance::zero(), balance::zero(), 0, 0, 0, 0, epoch_start_time + epoch_duration, ctx)
+            .destroy_for_testing(); // balance returned from `advance_epoch`
+        ctx.increment_epoch_number();
+
+        // should distribute 1 epoch's worth of subsidies: 563 only
+        assert_eq(sui_system.get_stake_subsidy_distribution_counter(), start_distribution_counter + 4);
+        check_distribution_counter_invariant(&mut sui_system, ctx);
+
+        test_scenario::return_shared(sui_system);
+        test.end();
+    }
+
+    #[test]
+    fun test_stake_subsidy_with_safe_mode_epoch_563_to_564() {
+        set_up_sui_system_state_with_big_amounts();
+
+        let mut test = test_scenario::begin(VALIDATOR_ADDR_1);
+        let mut sui_system = test.take_shared();
+        let ctx = test.ctx();
+        // mimic state during epoch 563, if we're in safe mode since the 560 -> 561 epoch change
+        let start_epoch: u64 = 563;
+        let start_distribution_counter = 540;
+        let epoch_start_time = 100000000000;
+        let epoch_duration = sui_system.inner_mut_for_testing().epoch_duration_ms();
+
+        // increment epoch number (safe mode emulation)
+        start_epoch.do!(|_| ctx.increment_epoch_number());
+        sui_system.set_epoch_for_testing(start_epoch);
+        sui_system.set_stake_subsidy_distribution_counter(start_distribution_counter);
+
+        assert!(ctx.epoch() == start_epoch);
+        assert!(ctx.epoch() == sui_system.epoch());
+        assert!(sui_system.get_stake_subsidy_distribution_counter() == start_distribution_counter);
+
+        // perform advance epoch
+        sui_system
+            .inner_mut_for_testing()
+            .advance_epoch(start_epoch + 1, 65, balance::zero(), balance::zero(), 0, 0, 0, 0, epoch_start_time, ctx)
+            .destroy_for_testing(); // balance returned from `advance_epoch`
+        ctx.increment_epoch_number();
+
+        // should distribute 4 epochs worth of subsidies: 560, 561, 562, 563
+        assert_eq(sui_system.get_stake_subsidy_distribution_counter(), start_distribution_counter + 4);
+        check_distribution_counter_invariant(&mut sui_system, ctx);
+
+        // ensure that next epoch change only distributes one epoch's worth
+        sui_system
+            .inner_mut_for_testing()
+            .advance_epoch(start_epoch + 2, 65, balance::zero(), balance::zero(), 0, 0, 0, 0, epoch_start_time + epoch_duration, ctx)
+            .destroy_for_testing(); // balance returned from `advance_epoch`
+        ctx.increment_epoch_number();
+
+        // should distribute 1 epoch's worth of subsidies
+        assert_eq(sui_system.get_stake_subsidy_distribution_counter(), start_distribution_counter + 5);
+        check_distribution_counter_invariant(&mut sui_system, ctx);
+
+        test_scenario::return_shared(sui_system);
+        test.end();
+    }
+
+    #[test]
+    // Test that the fix for the subsidy distribution doesn't affect testnet,
+    // where the distribution has no epoch delay, and the condition could result
+    // in arithmetic error.
+    fun test_stake_subsidy_with_safe_mode_testnet() {
+        use std::unit_test::assert_eq;
+
+        set_up_sui_system_state_with_big_amounts();
+
+        let mut test = test_scenario::begin(VALIDATOR_ADDR_1);
+        let mut sui_system = test.take_shared();
+
+        let ctx = test.ctx();
+
+        // increment epoch number (safe mode emulation)
+        540u64.do!(|_| ctx.increment_epoch_number());
+        sui_system.set_epoch_for_testing(540);
+        sui_system.set_stake_subsidy_distribution_counter(540);
+
+        assert!(ctx.epoch() == 540);
+        assert!(sui_system.get_stake_subsidy_distribution_counter() == 540);
+
+        // perform advance epoch
+        sui_system
+            .inner_mut_for_testing()
+            .advance_epoch(541, 65, balance::zero(), balance::zero(), 0, 0, 0, 0, 100000000000, ctx)
+            .destroy_for_testing(); // balance returned from `advance_epoch`
+
+        assert_eq!(sui_system.get_stake_subsidy_distribution_counter(), 541);
+
+        test_scenario::return_shared(sui_system);
+        test.end();
+    }
 }
diff --git a/crates/sui-framework/packages/sui-system/tests/sui_system_tests.move b/crates/sui-framework/packages/sui-system/tests/sui_system_tests.move
index d333f4e64a642..c7fd42a3d1b20 100644
--- a/crates/sui-framework/packages/sui-system/tests/sui_system_tests.move
+++ b/crates/sui-framework/packages/sui-system/tests/sui_system_tests.move
@@ -1095,8 +1095,8 @@ module sui_system::sui_system_tests {
         let mut system_state = scenario.take_shared();
 
         let staked_sui = system_state.request_add_stake_non_entry(
-            coin::mint_for_testing(100_000_000_000, scenario.ctx()), 
-            @0x1, 
+            coin::mint_for_testing(100_000_000_000, scenario.ctx()),
+            @0x1,
             scenario.ctx()
         );
 
@@ -1107,20 +1107,23 @@ module sui_system::sui_system_tests {
 
         let mut system_state = scenario.take_shared();
         let fungible_staked_sui = system_state.convert_to_fungible_staked_sui(
-            staked_sui, 
+            staked_sui,
             scenario.ctx()
         );
 
         assert!(fungible_staked_sui.value() == 100_000_000_000, 0);
 
         let sui = system_state.redeem_fungible_staked_sui(
-            fungible_staked_sui, 
+            fungible_staked_sui,
             scenario.ctx()
         );
 
         assert!(sui.value() == 100_000_000_000, 0);
 
         test_scenario::return_shared(system_state);
+
+        advance_epoch(scenario);
+
         sui::test_utils::destroy(sui);
         scenario_val.end();
     }
diff --git a/crates/sui-framework/packages_compiled/move-stdlib b/crates/sui-framework/packages_compiled/move-stdlib
index 191c7bd74e552..48a182f7e21f3 100644
Binary files a/crates/sui-framework/packages_compiled/move-stdlib and b/crates/sui-framework/packages_compiled/move-stdlib differ
diff --git a/crates/sui-framework/packages_compiled/sui-framework b/crates/sui-framework/packages_compiled/sui-framework
index 343cb0eaea7ce..02d56854d580a 100644
Binary files a/crates/sui-framework/packages_compiled/sui-framework and b/crates/sui-framework/packages_compiled/sui-framework differ
diff --git a/crates/sui-framework/packages_compiled/sui-system b/crates/sui-framework/packages_compiled/sui-system
index 2f5cc8064c3d9..4f3423fe5d3c2 100644
Binary files a/crates/sui-framework/packages_compiled/sui-system and b/crates/sui-framework/packages_compiled/sui-system differ
diff --git a/crates/sui-framework/published_api.txt b/crates/sui-framework/published_api.txt
index 4d9f1f6f33262..8a91b732b83ee 100644
--- a/crates/sui-framework/published_api.txt
+++ b/crates/sui-framework/published_api.txt
@@ -691,6 +691,9 @@ advance_epoch
 current_epoch_subsidy_amount
 	public fun
 	0x3::stake_subsidy
+get_distribution_counter
+	public(package) fun
+	0x3::stake_subsidy
 SystemParameters
 	public struct
 	0x3::sui_system_state_inner
@@ -1561,6 +1564,12 @@ multi_scalar_multiplication
 pairing
 	public(package) fun
 	0x2::group_ops
+convert
+	public(package) fun
+	0x2::group_ops
+sum
+	public(package) fun
+	0x2::group_ops
 internal_validate
 	fun
 	0x2::group_ops
@@ -1585,6 +1594,12 @@ internal_multi_scalar_mul
 internal_pairing
 	fun
 	0x2::group_ops
+internal_convert
+	fun
+	0x2::group_ops
+internal_sum
+	fun
+	0x2::group_ops
 set_as_prefix
 	public(package) fun
 	0x2::group_ops
@@ -1600,6 +1615,9 @@ G2
 GT
 	public struct
 	0x2::bls12381
+UncompressedG1
+	public struct
+	0x2::bls12381
 bls12381_min_sig_verify
 	public fun
 	0x2::bls12381
@@ -1666,6 +1684,9 @@ hash_to_g1
 g1_multi_scalar_multiplication
 	public fun
 	0x2::bls12381
+g1_to_uncompressed_g1
+	public fun
+	0x2::bls12381
 g2_from_bytes
 	public fun
 	0x2::bls12381
@@ -1720,6 +1741,12 @@ gt_neg
 pairing
 	public fun
 	0x2::bls12381
+uncompressed_g1_to_g1
+	public fun
+	0x2::bls12381
+uncompressed_g1_sum
+	public fun
+	0x2::bls12381
 Referent
 	public struct
 	0x2::borrow
diff --git a/crates/sui-graphql-e2e-tests/tests/stable/packages/types.exp b/crates/sui-graphql-e2e-tests/tests/stable/packages/types.exp
index 078ee9b351126..452e3b571a172 100644
--- a/crates/sui-graphql-e2e-tests/tests/stable/packages/types.exp
+++ b/crates/sui-graphql-e2e-tests/tests/stable/packages/types.exp
@@ -141,7 +141,7 @@ Response: {
   "data": null,
   "errors": [
     {
-      "message": "Bad type: unexpected token Name(\"not_a_type\"), expected type tag",
+      "message": "Bad type: unexpected end of tokens",
       "locations": [
         {
           "line": 3,
diff --git a/crates/sui-graphql-rpc/src/test_infra/cluster.rs b/crates/sui-graphql-rpc/src/test_infra/cluster.rs
index 27ee4f5a23b31..4317b44de285a 100644
--- a/crates/sui-graphql-rpc/src/test_infra/cluster.rs
+++ b/crates/sui-graphql-rpc/src/test_infra/cluster.rs
@@ -132,6 +132,8 @@ pub async fn start_network_cluster() -> NetworkCluster {
         None,
         Some(data_ingestion_path.path().to_path_buf()),
         Some(cancellation_token.clone()),
+        None, /* start_checkpoint */
+        None, /* end_checkpoint */
     )
     .await;
 
@@ -187,6 +189,8 @@ pub async fn serve_executor(
         retention_config,
         Some(data_ingestion_path),
         Some(cancellation_token.clone()),
+        None,
+        None,
     )
     .await;
 
diff --git a/crates/sui-graphql-rpc/src/types/move_registry/named_move_package.rs b/crates/sui-graphql-rpc/src/types/move_registry/named_move_package.rs
index 781db11ba552d..93a8ebf0bfdc6 100644
--- a/crates/sui-graphql-rpc/src/types/move_registry/named_move_package.rs
+++ b/crates/sui-graphql-rpc/src/types/move_registry/named_move_package.rs
@@ -24,7 +24,7 @@ pub(crate) struct NamedMovePackage;
 
 impl NamedMovePackage {
     /// Queries a package by name (and version, encoded in the name but optional).
-    /// Name's format should be `{organization}/{application}:v{version}`.
+    /// Name's format should be `{organization}/{application}/{version}`.
     pub(crate) async fn query(
         ctx: &Context<'_>,
         name: &str,
diff --git a/crates/sui-indexer-alt/Cargo.toml b/crates/sui-indexer-alt/Cargo.toml
new file mode 100644
index 0000000000000..31a95d3a292c2
--- /dev/null
+++ b/crates/sui-indexer-alt/Cargo.toml
@@ -0,0 +1,47 @@
+[package]
+name = "sui-indexer-alt"
+version.workspace = true
+authors = ["Mysten Labs "]
+license = "Apache-2.0"
+publish = false
+edition = "2021"
+
+[[bin]]
+name = "sui-indexer-alt"
+path = "src/main.rs"
+
+[dependencies]
+anyhow.workspace = true
+async-trait.workspace = true
+axum.workspace = true
+backoff.workspace = true
+bb8 = "0.8.5"
+bcs.workspace = true
+chrono.workspace = true
+clap.workspace = true
+diesel = { workspace = true, features = ["chrono"] }
+diesel-async = { workspace = true, features = ["bb8", "postgres", "async-connection-wrapper"] }
+diesel_migrations.workspace = true
+futures.workspace = true
+prometheus.workspace = true
+reqwest.workspace = true
+serde.workspace = true
+telemetry-subscribers.workspace = true
+thiserror.workspace = true
+tokio.workspace = true
+tokio-stream.workspace = true
+tokio-util.workspace = true
+tracing.workspace = true
+url.workspace = true
+
+mysten-metrics.workspace = true
+sui-field-count.workspace = true
+sui-storage.workspace = true
+sui-types.workspace = true
+
+[dev-dependencies]
+rand.workspace = true
+wiremock.workspace = true
+tempfile.workspace = true
+
+sui-types = { workspace = true, features = ["test-utils"] }
diff --git a/crates/sui-indexer-alt/diesel.toml b/crates/sui-indexer-alt/diesel.toml
new file mode 100644
index 0000000000000..054029ff39a8a
--- /dev/null
+++ b/crates/sui-indexer-alt/diesel.toml
@@ -0,0 +1,6 @@
+[print_schema]
+file = "src/schema.rs"
+patch_file = "schema.patch"
+
+[migrations_directory]
+dir = "migrations"
diff --git a/crates/sui-indexer-alt/generate_schema.sh b/crates/sui-indexer-alt/generate_schema.sh
new file mode 100755
index 0000000000000..c65b178011d33
--- /dev/null
+++ b/crates/sui-indexer-alt/generate_schema.sh
@@ -0,0 +1,77 @@
+#!/bin/bash
+# Copyright (c) Mysten Labs, Inc.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Update sui-indexer's generated src/schema.rs based on the schema after
+# running all its migrations on a clean database. Expects the first argument to
+# be a port to run the temporary database on (defaults to 5433).
+
+set -x
+set -e
+
+if ! command -v git &> /dev/null; then
+    echo "Please install git: e.g. brew install git" >&2
+    exit 1
+fi
+
+for PG in psql initdb postgres pg_isready pg_ctl; do
+    if ! command -v $PG &> /dev/null; then
+        echo "Could not find $PG. Please install postgres: e.g. brew install postgresql@15" >&2
+        exit 1
+    fi
+done
+
+if ! command -v diesel &> /dev/null; then
+    echo "Please install diesel: e.g. cargo install diesel_cli --features postgres" >&2
+    exit 1
+fi
+
+REPO=$(git rev-parse --show-toplevel)
+
+# Create a temporary directory to store the ephemeral DB.
+TMP=$(mktemp -d)
+
+# Set-up a trap to clean everything up on EXIT (stop DB, delete temp directory)
+function cleanup {
+  pg_ctl stop -D "$TMP" -mfast
+  set +x
+  echo "Postgres STDOUT:"
+  cat "$TMP/db.stdout"
+  echo "Postgres STDERR:"
+  cat "$TMP/db.stderr"
+  set -x
+  rm -rf "$TMP"
+}
+trap cleanup EXIT
+
+# Create a new database in the temporary directory
+initdb -D "$TMP" --user postgres
+
+# Run the DB in the background, on the port provided and capture its output
+PORT=${1:-5433}
+postgres -D "$TMP" -p "$PORT" -c unix_socket_directories=                      \
+   > "$TMP/db.stdout"                                                          \
+  2> "$TMP/db.stderr"                                                          &
+
+# Wait for postgres to report as ready
+RETRIES=0
+while ! pg_isready -p "$PORT" --host "localhost" --username "postgres"; do
+  if [ $RETRIES -gt 5 ]; then
+    echo "Postgres failed to start" >&2
+    exit 1
+  fi
+  sleep 1
+  RETRIES=$((RETRIES + 1))
+done
+
+# Run all migrations on the new database
+diesel migration run                                                          \
+  --database-url "postgres://postgres:postgrespw@localhost:$PORT"             \
+  --migration-dir "$REPO/crates/sui-indexer-alt/migrations"
+
+# Generate the schema.rs file, excluding partition tables and including the
+# copyright notice.
+diesel print-schema                                                           \
+  --database-url "postgres://postgres:postgrespw@localhost:$PORT"             \
+  --patch-file "$REPO/crates/sui-indexer-alt/schema.patch"                    \
+  > "$REPO/crates/sui-indexer-alt/src/schema.rs"
diff --git a/crates/sui-indexer-alt/migrations/00000000000000_diesel_initial_setup/down.sql b/crates/sui-indexer-alt/migrations/00000000000000_diesel_initial_setup/down.sql
new file mode 100644
index 0000000000000..a9f526091194b
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/00000000000000_diesel_initial_setup/down.sql
@@ -0,0 +1,6 @@
+-- This file was automatically created by Diesel to setup helper functions
+-- and other internal bookkeeping. This file is safe to edit, any future
+-- changes will be added to existing projects as new migrations.
+
+DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass);
+DROP FUNCTION IF EXISTS diesel_set_updated_at();
diff --git a/crates/sui-indexer-alt/migrations/00000000000000_diesel_initial_setup/up.sql b/crates/sui-indexer-alt/migrations/00000000000000_diesel_initial_setup/up.sql
new file mode 100644
index 0000000000000..d68895b1a7b7d
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/00000000000000_diesel_initial_setup/up.sql
@@ -0,0 +1,36 @@
+-- This file was automatically created by Diesel to setup helper functions
+-- and other internal bookkeeping. This file is safe to edit, any future
+-- changes will be added to existing projects as new migrations.
+
+
+
+
+-- Sets up a trigger for the given table to automatically set a column called
+-- `updated_at` whenever the row is modified (unless `updated_at` was included
+-- in the modified columns)
+--
+-- # Example
+--
+-- ```sql
+-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW());
+--
+-- SELECT diesel_manage_updated_at('users');
+-- ```
+CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$
+BEGIN
+    EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s
+                    FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl);
+END;
+$$ LANGUAGE plpgsql;
+
+CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$
+BEGIN
+    IF (
+        NEW IS DISTINCT FROM OLD AND
+        NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at
+    ) THEN
+        NEW.updated_at := current_timestamp;
+    END IF;
+    RETURN NEW;
+END;
+$$ LANGUAGE plpgsql;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-14-123213_checkpoints/down.sql b/crates/sui-indexer-alt/migrations/2024-10-14-123213_checkpoints/down.sql
new file mode 100644
index 0000000000000..837ea1e8355cc
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-14-123213_checkpoints/down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS kv_checkpoints;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-14-123213_checkpoints/up.sql b/crates/sui-indexer-alt/migrations/2024-10-14-123213_checkpoints/up.sql
new file mode 100644
index 0000000000000..f177da0844341
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-14-123213_checkpoints/up.sql
@@ -0,0 +1,6 @@
+CREATE TABLE IF NOT EXISTS kv_checkpoints
+(
+    sequence_number                     BIGINT       PRIMARY KEY,
+    certified_checkpoint                BYTEA        NOT NULL,
+    checkpoint_contents                 BYTEA        NOT NULL
+);
diff --git a/crates/sui-indexer-alt/migrations/2024-10-15-143704_objects/down.sql b/crates/sui-indexer-alt/migrations/2024-10-15-143704_objects/down.sql
new file mode 100644
index 0000000000000..5d09c2f77e34c
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-15-143704_objects/down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS kv_objects;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-15-143704_objects/up.sql b/crates/sui-indexer-alt/migrations/2024-10-15-143704_objects/up.sql
new file mode 100644
index 0000000000000..471144af9840e
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-15-143704_objects/up.sql
@@ -0,0 +1,7 @@
+CREATE TABLE IF NOT EXISTS kv_objects
+(
+    object_id                   bytea         NOT NULL,
+    object_version              bigint        NOT NULL,
+    serialized_object           bytea,
+    PRIMARY KEY (object_id, object_version)
+);
diff --git a/crates/sui-indexer-alt/migrations/2024-10-15-170316_transactions/down.sql b/crates/sui-indexer-alt/migrations/2024-10-15-170316_transactions/down.sql
new file mode 100644
index 0000000000000..fa46db0f19143
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-15-170316_transactions/down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS kv_transactions;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-15-170316_transactions/up.sql b/crates/sui-indexer-alt/migrations/2024-10-15-170316_transactions/up.sql
new file mode 100644
index 0000000000000..cb0cdd5d68b01
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-15-170316_transactions/up.sql
@@ -0,0 +1,12 @@
+CREATE TABLE IF NOT EXISTS kv_transactions
+(
+    tx_digest                   BYTEA         PRIMARY KEY,
+    cp_sequence_number          BIGINT        NOT NULL,
+    timestamp_ms                BIGINT        NOT NULL,
+    -- BCS serialized TransactionData
+    raw_transaction             BYTEA         NOT NULL,
+    -- BCS serialized TransactionEffects
+    raw_effects                 BYTEA         NOT NULL,
+    -- BCS serialized array of Events
+    events                      BYTEA         NOT NULL
+);
diff --git a/crates/sui-indexer-alt/migrations/2024-10-16-002409_tx_affected_objects/down.sql b/crates/sui-indexer-alt/migrations/2024-10-16-002409_tx_affected_objects/down.sql
new file mode 100644
index 0000000000000..b0868da73b0f2
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-16-002409_tx_affected_objects/down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS tx_affected_objects;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-16-002409_tx_affected_objects/up.sql b/crates/sui-indexer-alt/migrations/2024-10-16-002409_tx_affected_objects/up.sql
new file mode 100644
index 0000000000000..5eeb8f40a3893
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-16-002409_tx_affected_objects/up.sql
@@ -0,0 +1,13 @@
+CREATE TABLE IF NOT EXISTS tx_affected_objects (
+    tx_sequence_number          BIGINT       NOT NULL,
+    -- Object ID of the object touched by this transaction.
+    affected                    BYTEA        NOT NULL,
+    sender                      BYTEA        NOT NULL,
+    PRIMARY KEY(affected, tx_sequence_number)
+);
+
+CREATE INDEX IF NOT EXISTS tx_affected_objects_tx_sequence_number
+ON tx_affected_objects (tx_sequence_number);
+
+CREATE INDEX IF NOT EXISTS tx_affected_objects_sender
+ON tx_affected_objects (sender, affected, tx_sequence_number);
diff --git a/crates/sui-indexer-alt/migrations/2024-10-16-211445_tx_balance_changes/down.sql b/crates/sui-indexer-alt/migrations/2024-10-16-211445_tx_balance_changes/down.sql
new file mode 100644
index 0000000000000..e36b0a7736cc2
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-16-211445_tx_balance_changes/down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS tx_balance_changes;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-16-211445_tx_balance_changes/up.sql b/crates/sui-indexer-alt/migrations/2024-10-16-211445_tx_balance_changes/up.sql
new file mode 100644
index 0000000000000..790c5aa14d543
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-16-211445_tx_balance_changes/up.sql
@@ -0,0 +1,6 @@
+CREATE TABLE IF NOT EXISTS tx_balance_changes
+(
+    tx_sequence_number          BIGINT        PRIMARY KEY,
+    -- BCS serialized array of BalanceChanges
+    balance_changes             BYTEA         NOT NULL
+);
diff --git a/crates/sui-indexer-alt/migrations/2024-10-16-225607_watermarks/down.sql b/crates/sui-indexer-alt/migrations/2024-10-16-225607_watermarks/down.sql
new file mode 100644
index 0000000000000..e9de336153f62
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-16-225607_watermarks/down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS watermarks;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-16-225607_watermarks/up.sql b/crates/sui-indexer-alt/migrations/2024-10-16-225607_watermarks/up.sql
new file mode 100644
index 0000000000000..1fd0d890d29b1
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-16-225607_watermarks/up.sql
@@ -0,0 +1,38 @@
+CREATE TABLE IF NOT EXISTS watermarks
+(
+    -- The pipeline governed by this watermark, i.e `epochs`, `checkpoints`,
+    -- `transactions`.
+    pipeline                    TEXT          PRIMARY KEY,
+    -- Inclusive upper epoch bound for this entity's data. Committer updates
+    -- this field. Pruner uses this to determine if pruning is necessary based
+    -- on the retention policy.
+    epoch_hi_inclusive          BIGINT        NOT NULL,
+    -- Inclusive upper checkpoint bound for this entity's data. Committer
+    -- updates this field. All data of this entity in the checkpoint must be
+    -- persisted before advancing this watermark. The committer refers to this
+    -- on disaster recovery to resume writing.
+    checkpoint_hi_inclusive     BIGINT        NOT NULL,
+    -- Exclusive upper transaction sequence number bound for this entity's
+    -- data. Committer updates this field.
+    tx_hi                       BIGINT        NOT NULL,
+    -- Inclusive upper timestamp bound (in milliseconds). Committer updates
+    -- this field once it can guarantee that all checkpoints at or before this
+    -- timestamp have been written to the database.
+    timestamp_ms_hi_inclusive   BIGINT        NOT NULL,
+    -- Inclusive lower epoch bound for this entity's data. Pruner updates this
+    -- field when the epoch range exceeds the retention policy.
+    epoch_lo                    BIGINT        NOT NULL,
+    -- Inclusive low watermark that the pruner advances. Corresponds to the
+    -- epoch id, checkpoint sequence number, or tx sequence number depending on
+    -- the entity. Data before this watermark is considered pruned by a reader.
+    -- The underlying data may still exist in the db instance.
+    reader_lo                   BIGINT        NOT NULL,
+    -- Updated using the database's current timestamp when the pruner sees that
+    -- some data needs to be dropped. The pruner uses this column to determine
+    -- whether to prune or wait long enough that all in-flight reads complete
+    -- or timeout before it acts on an updated watermark.
+    pruner_timestamp_ms         BIGINT        NOT NULL,
+    -- Column used by the pruner to track its true progress. Data below this
+    -- watermark can be immediately pruned.
+    pruner_hi                   BIGINT        NOT NULL
+);
diff --git a/crates/sui-indexer-alt/migrations/2024-10-19-113135_ev_indices/down.sql b/crates/sui-indexer-alt/migrations/2024-10-19-113135_ev_indices/down.sql
new file mode 100644
index 0000000000000..b1948d3bcfee0
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-19-113135_ev_indices/down.sql
@@ -0,0 +1,2 @@
+DROP TABLE IF EXISTS ev_emit_mod;
+DROP TABLE IF EXISTS ev_struct_inst;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-19-113135_ev_indices/up.sql b/crates/sui-indexer-alt/migrations/2024-10-19-113135_ev_indices/up.sql
new file mode 100644
index 0000000000000..8e553a30bfa0c
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-19-113135_ev_indices/up.sql
@@ -0,0 +1,56 @@
+CREATE TABLE IF NOT EXISTS ev_emit_mod
+(
+    package                     BYTEA,
+    module                      TEXT,
+    tx_sequence_number          BIGINT,
+    sender                      BYTEA         NOT NULL,
+    PRIMARY KEY(package, module, tx_sequence_number)
+);
+
+CREATE INDEX IF NOT EXISTS ev_emit_mod_tx_sequence_number
+ON ev_emit_mod (tx_sequence_number);
+
+CREATE INDEX IF NOT EXISTS ev_emit_mod_sender
+ON ev_emit_mod (sender, package, module, tx_sequence_number);
+
+CREATE INDEX IF NOT EXISTS ev_emit_pkg
+ON ev_emit_mod (package, tx_sequence_number);
+
+CREATE INDEX IF NOT EXISTS ev_emit_pkg_sender
+ON ev_emit_mod (sender, package, tx_sequence_number);
+
+CREATE TABLE IF NOT EXISTS ev_struct_inst
+(
+    package                     BYTEA,
+    module                      TEXT,
+    name                        TEXT,
+    -- BCS encoded array of TypeTags for type parameters.
+    instantiation               BYTEA,
+    tx_sequence_number          BIGINT,
+    sender                      BYTEA         NOT NULL,
+    PRIMARY KEY(package, module, name, instantiation, tx_sequence_number)
+);
+
+CREATE INDEX IF NOT EXISTS ev_struct_inst_tx_sequence_number
+ON ev_struct_inst (tx_sequence_number);
+
+CREATE INDEX IF NOT EXISTS ev_struct_inst_sender
+ON ev_struct_inst (sender, package, module, name, instantiation, tx_sequence_number);
+
+CREATE INDEX IF NOT EXISTS ev_struct_name
+ON ev_struct_inst (package, module, name, tx_sequence_number);
+
+CREATE INDEX IF NOT EXISTS ev_struct_name_sender
+ON ev_struct_inst (sender, package, module, name, tx_sequence_number);
+
+CREATE INDEX IF NOT EXISTS ev_struct_mod
+ON ev_struct_inst (package, module, tx_sequence_number);
+
+CREATE INDEX IF NOT EXISTS ev_struct_mod_sender
+ON ev_struct_inst (sender, package, module, tx_sequence_number);
+
+CREATE INDEX IF NOT EXISTS ev_struct_pkg
+ON ev_struct_inst (package, tx_sequence_number);
+
+CREATE INDEX IF NOT EXISTS ev_struct_pkg_sender
+ON ev_struct_inst (sender, package, tx_sequence_number);
diff --git a/crates/sui-indexer-alt/migrations/2024-10-27-150938_sum_obj_types/down.sql b/crates/sui-indexer-alt/migrations/2024-10-27-150938_sum_obj_types/down.sql
new file mode 100644
index 0000000000000..4056a62d85e54
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-27-150938_sum_obj_types/down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS sum_obj_types;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-27-150938_sum_obj_types/up.sql b/crates/sui-indexer-alt/migrations/2024-10-27-150938_sum_obj_types/up.sql
new file mode 100644
index 0000000000000..4658689f7823a
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-27-150938_sum_obj_types/up.sql
@@ -0,0 +1,62 @@
+-- A summary table of live objects, with owner and type information
+--
+-- This can be used to paginate the live object set at an instant in time,
+-- filtering by a combination of owner and/or type.
+CREATE TABLE IF NOT EXISTS sum_obj_types
+(
+    object_id                   BYTEA         PRIMARY KEY,
+    object_version              BIGINT        NOT NULL,
+    -- An enum describing the object's ownership model:
+    --
+    --   Immutable = 0,
+    --   Address-owned = 1,
+    --   Object-owned (dynamic field) = 2,
+    --   Shared = 3.
+    --
+    -- Note that there is a distinction between an object that is owned by
+    -- another object (kind 2), which relates to dynamic fields, and an object
+    -- that is owned by another object's address (kind 1), which relates to
+    -- transfer-to-object.
+    owner_kind                  SMALLINT      NOT NULL,
+    -- The address for address-owned objects, and the parent object for
+    -- object-owned objects.
+    owner_id                    BYTEA,
+    -- The following fields relate to the object's type. These only apply to
+    -- Move Objects. For Move Packages they will all be NULL.
+    --
+    -- The type's package ID.
+    package                     BYTEA,
+    -- The type's module name.
+    module                      TEXT,
+    -- The type's name.
+    name                        TEXT,
+    -- The type's type parameters, as a BCS-encoded array of TypeTags.
+    instantiation               BYTEA
+);
+
+CREATE INDEX IF NOT EXISTS sum_obj_types_owner
+ON sum_obj_types (owner_kind, owner_id, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS sum_obj_types_pkg
+ON sum_obj_types (package, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS sum_obj_types_mod
+ON sum_obj_types (package, module, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS sum_obj_types_name
+ON sum_obj_types (package, module, name, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS sum_obj_types_inst
+ON sum_obj_types (package, module, name, instantiation, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS sum_obj_types_owner_pkg
+ON sum_obj_types (owner_kind, owner_id, package, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS sum_obj_types_owner_mod
+ON sum_obj_types (owner_kind, owner_id, package, module, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS sum_obj_types_owner_name
+ON sum_obj_types (owner_kind, owner_id, package, module, name, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS sum_obj_types_owner_inst
+ON sum_obj_types (owner_kind, owner_id, package, module, name, instantiation, object_id, object_version);
diff --git a/crates/sui-indexer-alt/migrations/2024-10-28-144002_sum_coin_balances/down.sql b/crates/sui-indexer-alt/migrations/2024-10-28-144002_sum_coin_balances/down.sql
new file mode 100644
index 0000000000000..68b45da3c6d9a
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-28-144002_sum_coin_balances/down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS sum_coin_balances;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-28-144002_sum_coin_balances/up.sql b/crates/sui-indexer-alt/migrations/2024-10-28-144002_sum_coin_balances/up.sql
new file mode 100644
index 0000000000000..dbd93cc74539c
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-28-144002_sum_coin_balances/up.sql
@@ -0,0 +1,20 @@
+-- A summary table for coins owned by addresses
+--
+-- This can be used to paginate the coin balances of a given address at an
+-- instant in time, returning coins in descending balance order.
+CREATE TABLE IF NOT EXISTS sum_coin_balances
+(
+    object_id                   BYTEA         PRIMARY KEY,
+    object_version              BIGINT        NOT NULL,
+    -- The address that owns this version of the coin (it is guaranteed to be
+    -- address-owned).
+    owner_id                    BYTEA         NOT NULL,
+    -- The type of the coin, as a BCS-serialized `TypeTag`. This is only the
+    -- marker type, and not the full object type (e.g. `0x0...02::sui::SUI`).
+    coin_type                   BYTEA         NOT NULL,
+    -- The balance of the coin at this version.
+    coin_balance                BIGINT        NOT NULL
+);
+
+CREATE INDEX IF NOT EXISTS sum_coin_balances_owner_type
+ON sum_coin_balances (owner_id, coin_type, coin_balance, object_id, object_version);
diff --git a/crates/sui-indexer-alt/migrations/2024-10-30-142219_obj_versions/down.sql b/crates/sui-indexer-alt/migrations/2024-10-30-142219_obj_versions/down.sql
new file mode 100644
index 0000000000000..f32188e89f020
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-30-142219_obj_versions/down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS obj_versions;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-30-142219_obj_versions/up.sql b/crates/sui-indexer-alt/migrations/2024-10-30-142219_obj_versions/up.sql
new file mode 100644
index 0000000000000..31939132ae0b5
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-30-142219_obj_versions/up.sql
@@ -0,0 +1,14 @@
+-- This table is used to answer queries of the form: Give me the latest version
+-- of an object O with version less than or equal to V at checkpoint C. These
+-- are useful for looking up dynamic fields on objects (live or historical).
+CREATE TABLE IF NOT EXISTS obj_versions
+(
+    object_id                   BYTEA         NOT NULL,
+    object_version              BIGINT        NOT NULL,
+    object_digest               BYTEA         NOT NULL,
+    cp_sequence_number          BIGINT        NOT NULL,
+    PRIMARY KEY (object_id, object_version)
+);
+
+CREATE INDEX IF NOT EXISTS obj_versions_cp_sequence_number
+ON obj_versions (cp_sequence_number);
diff --git a/crates/sui-indexer-alt/migrations/2024-10-30-214852_wal_obj_types/down.sql b/crates/sui-indexer-alt/migrations/2024-10-30-214852_wal_obj_types/down.sql
new file mode 100644
index 0000000000000..e9a511867961e
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-30-214852_wal_obj_types/down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS wal_obj_types;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-30-214852_wal_obj_types/up.sql b/crates/sui-indexer-alt/migrations/2024-10-30-214852_wal_obj_types/up.sql
new file mode 100644
index 0000000000000..b7149fc50b29c
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-30-214852_wal_obj_types/up.sql
@@ -0,0 +1,76 @@
+-- Write-ahead log for `sum_obj_types`.
+--
+-- It contains the same columns and indices as `sum_obj_types`, but with the
+-- following changes:
+--
+-- - A `cp_sequence_number` column (and an index on it), to support pruning by
+--   checkpoint.
+--
+-- - The primary key includes the version, as the table may contain multiple
+--   versions per object ID.
+--
+-- - The `owner_kind` column is nullable, because this table also tracks
+--   deleted and wrapped objects (where all the fields except the ID, version,
+--   and checkpoint are NULL).
+--
+-- - There is an additional index on ID and version for querying the latest
+--   version of every object.
+--
+-- This table is used in conjunction with `sum_obj_types` to support consistent
+-- live object set queries: `sum_obj_types` holds the state of the live object
+-- set at some checkpoint `C < T` where `T` is the tip of the chain, and
+-- `wal_obj_types` stores all the updates and deletes between `C` and `T`.
+--
+-- To reconstruct the the live object set at some snapshot checkpoint `S`
+-- between `C` and `T`, a query can be constructed that starts with the set
+-- from `sum_obj_types` and adds updates in `wal_obj_types` from
+-- `cp_sequence_number <= S`.
+--
+-- See `up.sql` for the original `sum_obj_types` table for documentation on
+-- columns.
+CREATE TABLE IF NOT EXISTS wal_obj_types
+(
+    object_id                   BYTEA         NOT NULL,
+    object_version              BIGINT        NOT NULL,
+    owner_kind                  SMALLINT,
+    owner_id                    BYTEA,
+    package                     BYTEA,
+    module                      TEXT,
+    name                        TEXT,
+    instantiation               BYTEA,
+    cp_sequence_number          BIGINT        NOT NULL,
+    PRIMARY KEY (object_id, object_version)
+);
+
+CREATE INDEX IF NOT EXISTS wal_obj_types_cp_sequence_number
+ON wal_obj_types (cp_sequence_number);
+
+CREATE INDEX IF NOT EXISTS wal_obj_types_version
+ON wal_obj_types (object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS wal_obj_types_owner
+ON wal_obj_types (owner_kind, owner_id, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS wal_obj_types_pkg
+ON wal_obj_types (package, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS wal_obj_types_mod
+ON wal_obj_types (package, module, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS wal_obj_types_name
+ON wal_obj_types (package, module, name, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS wal_obj_types_inst
+ON wal_obj_types (package, module, name, instantiation, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS wal_obj_types_owner_pkg
+ON wal_obj_types (owner_kind, owner_id, package, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS wal_obj_types_owner_mod
+ON wal_obj_types (owner_kind, owner_id, package, module, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS wal_obj_types_owner_name
+ON wal_obj_types (owner_kind, owner_id, package, module, name, object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS wal_obj_types_owner_inst
+ON wal_obj_types (owner_kind, owner_id, package, module, name, instantiation, object_id, object_version);
diff --git a/crates/sui-indexer-alt/migrations/2024-10-30-232206_wal_coin_balances/down.sql b/crates/sui-indexer-alt/migrations/2024-10-30-232206_wal_coin_balances/down.sql
new file mode 100644
index 0000000000000..a60919b661e84
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-30-232206_wal_coin_balances/down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS wal_coin_balances;
diff --git a/crates/sui-indexer-alt/migrations/2024-10-30-232206_wal_coin_balances/up.sql b/crates/sui-indexer-alt/migrations/2024-10-30-232206_wal_coin_balances/up.sql
new file mode 100644
index 0000000000000..9a78eeea9303b
--- /dev/null
+++ b/crates/sui-indexer-alt/migrations/2024-10-30-232206_wal_coin_balances/up.sql
@@ -0,0 +1,49 @@
+-- Write-ahead log for `sum_coin_balances`.
+--
+-- It contains the same columns and indices as `sum_coin_balances`, but with
+-- the following changes:
+--
+-- - A `cp_sequence_number` column (and an index on it), to support pruning by
+--   checkpoint.
+--
+-- - The primary key includes the version, as the table may contain multiple
+--   versions per object ID.
+--
+-- - The other fields are nullable, because this table also tracks deleted and
+--   wrapped objects.
+--
+-- - There is an additional index on ID and version for querying the latest
+--   version of every object.
+--
+-- This table is used in conjunction with `sum_coin_balances` to support
+-- consistent live object set queries: `sum_coin_balances` holds the state of
+-- the live object set at some checkpoint `C < T` where `T` is the tip of the
+-- chain, and `wal_coin_balances` stores all the updates and deletes between
+-- `C` and `T`.
+--
+-- To reconstruct the the live object set at some snapshot checkpoint `S`
+-- between `C` and `T`, a query can be constructed that starts with the set
+-- from `sum_coin_balances` and adds updates in `wal_coin_balances` from
+-- `cp_sequence_number <= S`.
+--
+-- See `up.sql` for the original `sum_coin_balances` table for documentation on
+-- columns.
+CREATE TABLE IF NOT EXISTS wal_coin_balances
+(
+    object_id                   BYTEA         NOT NULL,
+    object_version              BIGINT        NOT NULL,
+    owner_id                    BYTEA,
+    coin_type                   BYTEA,
+    coin_balance                BIGINT,
+    cp_sequence_number          BIGINT        NOT NULL,
+    PRIMARY KEY (object_id, object_version)
+);
+
+CREATE INDEX IF NOT EXISTS wal_coin_balances_cp_sequence_number
+ON wal_coin_balances (cp_sequence_number);
+
+CREATE INDEX IF NOT EXISTS wal_coin_balances_version
+ON wal_coin_balances (object_id, object_version);
+
+CREATE INDEX IF NOT EXISTS wal_coin_balances_owner_type
+ON wal_coin_balances (owner_id, coin_type, coin_balance, object_id, object_version);
diff --git a/crates/sui-indexer-alt/schema.patch b/crates/sui-indexer-alt/schema.patch
new file mode 100644
index 0000000000000..ee683461f7a7d
--- /dev/null
+++ b/crates/sui-indexer-alt/schema.patch
@@ -0,0 +1,7 @@
+diff --git a/crates/sui-indexer-alt/src/schema.rs b/crates/sui-indexer-alt/src/schema.rs
+--- a/crates/sui-indexer-alt/src/schema.rs
++++ b/crates/sui-indexer-alt/src/schema.rs
+@@ -1 +1,3 @@
++// Copyright (c) Mysten Labs, Inc.
++// SPDX-License-Identifier: Apache-2.0
+ // @generated automatically by Diesel CLI.
diff --git a/crates/sui-indexer-alt/src/args.rs b/crates/sui-indexer-alt/src/args.rs
new file mode 100644
index 0000000000000..fd6fa3e20bd84
--- /dev/null
+++ b/crates/sui-indexer-alt/src/args.rs
@@ -0,0 +1,37 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use crate::db::DbConfig;
+use crate::IndexerConfig;
+use clap::Subcommand;
+
+#[derive(clap::Parser, Debug, Clone)]
+pub struct Args {
+    #[command(flatten)]
+    pub db_config: DbConfig,
+
+    #[command(subcommand)]
+    pub command: Command,
+}
+
+#[allow(clippy::large_enum_variant)]
+#[derive(Subcommand, Clone, Debug)]
+pub enum Command {
+    /// Run the indexer.
+    Indexer {
+        #[command(flatten)]
+        indexer: IndexerConfig,
+
+        /// Number of checkpoints to delay indexing summary tables for.
+        #[clap(long)]
+        consistent_range: Option,
+    },
+
+    /// Wipe the database of its contents
+    ResetDatabase {
+        /// If true, only drop all tables but do not run the migrations.
+        /// That is, no tables will exist in the DB after the reset.
+        #[clap(long, default_value_t = false)]
+        skip_migrations: bool,
+    },
+}
diff --git a/crates/sui-indexer-alt/src/db.rs b/crates/sui-indexer-alt/src/db.rs
new file mode 100644
index 0000000000000..cc2658099c742
--- /dev/null
+++ b/crates/sui-indexer-alt/src/db.rs
@@ -0,0 +1,158 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use anyhow::anyhow;
+use diesel::migration::MigrationVersion;
+use diesel_async::async_connection_wrapper::AsyncConnectionWrapper;
+use diesel_async::{
+    pooled_connection::{
+        bb8::{Pool, PooledConnection, RunError},
+        AsyncDieselConnectionManager, PoolError,
+    },
+    AsyncPgConnection, RunQueryDsl,
+};
+use diesel_migrations::{embed_migrations, EmbeddedMigrations};
+use std::time::Duration;
+use tracing::info;
+use url::Url;
+
+const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations");
+
+#[derive(Clone)]
+pub struct Db {
+    pool: Pool,
+}
+
+#[derive(clap::Args, Debug, Clone)]
+pub struct DbConfig {
+    /// The URL of the database to connect to.
+    #[arg(long)]
+    database_url: Url,
+
+    /// Number of connections to keep in the pool.
+    #[arg(long, default_value_t = 100)]
+    connection_pool_size: u32,
+
+    /// Time spent waiting for a connection from the pool to become available.
+    #[arg(
+        long,
+        default_value = "60",
+        value_name = "SECONDS",
+        value_parser = |s: &str| s.parse().map(Duration::from_secs)
+    )]
+    connection_timeout: Duration,
+}
+
+pub type Connection<'p> = PooledConnection<'p, AsyncPgConnection>;
+
+impl Db {
+    /// Construct a new DB connection pool. Instances of [Db] can be cloned to share access to the
+    /// same pool.
+    pub async fn new(config: DbConfig) -> Result {
+        let manager = AsyncDieselConnectionManager::new(config.database_url.as_str());
+
+        let pool = Pool::builder()
+            .max_size(config.connection_pool_size)
+            .connection_timeout(config.connection_timeout)
+            .build(manager)
+            .await?;
+
+        Ok(Self { pool })
+    }
+
+    /// Retrieves a connection from the pool. Can fail with a timeout if a connection cannot be
+    /// established before the [DbConfig::connection_timeout] has elapsed.
+    pub(crate) async fn connect(&self) -> Result, RunError> {
+        self.pool.get().await
+    }
+
+    /// Statistics about the connection pool
+    pub(crate) fn state(&self) -> bb8::State {
+        self.pool.state()
+    }
+
+    async fn clear_database(&self) -> Result<(), anyhow::Error> {
+        info!("Clearing the database...");
+        let mut conn = self.connect().await?;
+        let drop_all_tables = "
+        DO $$ DECLARE
+            r RECORD;
+        BEGIN
+        FOR r IN (SELECT tablename FROM pg_tables WHERE schemaname = 'public')
+            LOOP
+                EXECUTE 'DROP TABLE IF EXISTS ' || quote_ident(r.tablename) || ' CASCADE';
+            END LOOP;
+        END $$;";
+        diesel::sql_query(drop_all_tables)
+            .execute(&mut conn)
+            .await?;
+        info!("Dropped all tables.");
+
+        let drop_all_procedures = "
+        DO $$ DECLARE
+            r RECORD;
+        BEGIN
+            FOR r IN (SELECT proname, oidvectortypes(proargtypes) as argtypes
+                      FROM pg_proc INNER JOIN pg_namespace ns ON (pg_proc.pronamespace = ns.oid)
+                      WHERE ns.nspname = 'public' AND prokind = 'p')
+            LOOP
+                EXECUTE 'DROP PROCEDURE IF EXISTS ' || quote_ident(r.proname) || '(' || r.argtypes || ') CASCADE';
+            END LOOP;
+        END $$;";
+        diesel::sql_query(drop_all_procedures)
+            .execute(&mut conn)
+            .await?;
+        info!("Dropped all procedures.");
+
+        let drop_all_functions = "
+        DO $$ DECLARE
+            r RECORD;
+        BEGIN
+            FOR r IN (SELECT proname, oidvectortypes(proargtypes) as argtypes
+                      FROM pg_proc INNER JOIN pg_namespace ON (pg_proc.pronamespace = pg_namespace.oid)
+                      WHERE pg_namespace.nspname = 'public' AND prokind = 'f')
+            LOOP
+                EXECUTE 'DROP FUNCTION IF EXISTS ' || quote_ident(r.proname) || '(' || r.argtypes || ') CASCADE';
+            END LOOP;
+        END $$;";
+        diesel::sql_query(drop_all_functions)
+            .execute(&mut conn)
+            .await?;
+        info!("Database cleared.");
+        Ok(())
+    }
+
+    pub(crate) async fn run_migrations(
+        &self,
+    ) -> Result>, anyhow::Error> {
+        use diesel_migrations::MigrationHarness;
+
+        info!("Running migrations ...");
+        let conn = self.pool.dedicated_connection().await?;
+        let mut wrapper: AsyncConnectionWrapper =
+            diesel_async::async_connection_wrapper::AsyncConnectionWrapper::from(conn);
+
+        let finished_migrations = tokio::task::spawn_blocking(move || {
+            wrapper
+                .run_pending_migrations(MIGRATIONS)
+                .map(|versions| versions.iter().map(MigrationVersion::as_owned).collect())
+        })
+        .await?
+        .map_err(|e| anyhow!("Failed to run migrations: {:?}", e))?;
+        info!("Migrations complete.");
+        Ok(finished_migrations)
+    }
+}
+
+/// Drop all tables and rerunning migrations.
+pub async fn reset_database(
+    db_config: DbConfig,
+    skip_migrations: bool,
+) -> Result<(), anyhow::Error> {
+    let db = Db::new(db_config).await?;
+    db.clear_database().await?;
+    if !skip_migrations {
+        db.run_migrations().await?;
+    }
+    Ok(())
+}
diff --git a/crates/sui-indexer-alt/src/handlers/ev_emit_mod.rs b/crates/sui-indexer-alt/src/handlers/ev_emit_mod.rs
new file mode 100644
index 0000000000000..bc3e5f607c2c8
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/ev_emit_mod.rs
@@ -0,0 +1,62 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::{collections::BTreeSet, sync::Arc};
+
+use anyhow::Result;
+use diesel_async::RunQueryDsl;
+use sui_types::full_checkpoint_content::CheckpointData;
+
+use crate::{
+    db, models::events::StoredEvEmitMod, pipeline::concurrent::Handler, pipeline::Processor,
+    schema::ev_emit_mod,
+};
+pub struct EvEmitMod;
+
+impl Processor for EvEmitMod {
+    const NAME: &'static str = "ev_emit_mod";
+
+    type Value = StoredEvEmitMod;
+
+    fn process(checkpoint: &Arc) -> Result> {
+        let CheckpointData {
+            transactions,
+            checkpoint_summary,
+            ..
+        } = checkpoint.as_ref();
+
+        let mut values = BTreeSet::new();
+        let first_tx = checkpoint_summary.network_total_transactions as usize - transactions.len();
+
+        for (i, tx) in transactions.iter().enumerate() {
+            values.extend(
+                tx.events
+                    .iter()
+                    .flat_map(|evs| &evs.data)
+                    .map(|ev| StoredEvEmitMod {
+                        package: ev.package_id.to_vec(),
+                        module: ev.transaction_module.to_string(),
+                        tx_sequence_number: (first_tx + i) as i64,
+                        sender: ev.sender.to_vec(),
+                    }),
+            );
+        }
+
+        Ok(values.into_iter().collect())
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for EvEmitMod {
+    const MIN_EAGER_ROWS: usize = 100;
+    const MAX_CHUNK_ROWS: usize = 1000;
+    const MAX_PENDING_ROWS: usize = 10000;
+
+    async fn commit(values: &[Self::Value], conn: &mut db::Connection<'_>) -> Result {
+        Ok(diesel::insert_into(ev_emit_mod::table)
+            .values(values)
+            .on_conflict_do_nothing()
+            .execute(conn)
+            .await?)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/handlers/ev_struct_inst.rs b/crates/sui-indexer-alt/src/handlers/ev_struct_inst.rs
new file mode 100644
index 0000000000000..0a55d60172752
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/ev_struct_inst.rs
@@ -0,0 +1,66 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::{collections::BTreeSet, sync::Arc};
+
+use anyhow::{Context, Result};
+use diesel_async::RunQueryDsl;
+use sui_types::full_checkpoint_content::CheckpointData;
+
+use crate::{
+    db, models::events::StoredEvStructInst, pipeline::concurrent::Handler, pipeline::Processor,
+    schema::ev_struct_inst,
+};
+
+pub struct EvStructInst;
+
+impl Processor for EvStructInst {
+    const NAME: &'static str = "ev_struct_inst";
+
+    type Value = StoredEvStructInst;
+
+    fn process(checkpoint: &Arc) -> Result> {
+        let CheckpointData {
+            transactions,
+            checkpoint_summary,
+            ..
+        } = checkpoint.as_ref();
+
+        let mut values = BTreeSet::new();
+        let first_tx = checkpoint_summary.network_total_transactions as usize - transactions.len();
+
+        for (i, tx) in transactions.iter().enumerate() {
+            let tx_sequence_number = (first_tx + i) as i64;
+            for (j, ev) in tx.events.iter().flat_map(|evs| evs.data.iter().enumerate()) {
+                values.insert(StoredEvStructInst {
+                    package: ev.type_.address.to_vec(),
+                    module: ev.type_.module.to_string(),
+                    name: ev.type_.name.to_string(),
+                    instantiation: bcs::to_bytes(&ev.type_.type_params)
+                        .with_context(|| format!(
+                            "Failed to serialize type parameters for event ({tx_sequence_number}, {j})"
+                        ))?,
+                    tx_sequence_number: (first_tx + i) as i64,
+                    sender: ev.sender.to_vec(),
+                });
+            }
+        }
+
+        Ok(values.into_iter().collect())
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for EvStructInst {
+    const MIN_EAGER_ROWS: usize = 100;
+    const MAX_CHUNK_ROWS: usize = 1000;
+    const MAX_PENDING_ROWS: usize = 10000;
+
+    async fn commit(values: &[Self::Value], conn: &mut db::Connection<'_>) -> Result {
+        Ok(diesel::insert_into(ev_struct_inst::table)
+            .values(values)
+            .on_conflict_do_nothing()
+            .execute(conn)
+            .await?)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/handlers/kv_checkpoints.rs b/crates/sui-indexer-alt/src/handlers/kv_checkpoints.rs
new file mode 100644
index 0000000000000..ede9640b1f44b
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/kv_checkpoints.rs
@@ -0,0 +1,43 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::sync::Arc;
+
+use anyhow::{Context, Result};
+use diesel_async::RunQueryDsl;
+use sui_types::full_checkpoint_content::CheckpointData;
+
+use crate::{
+    db, models::checkpoints::StoredCheckpoint, pipeline::concurrent::Handler, pipeline::Processor,
+    schema::kv_checkpoints,
+};
+
+pub struct KvCheckpoints;
+
+impl Processor for KvCheckpoints {
+    const NAME: &'static str = "kv_checkpoints";
+
+    type Value = StoredCheckpoint;
+
+    fn process(checkpoint: &Arc) -> Result> {
+        let sequence_number = checkpoint.checkpoint_summary.sequence_number as i64;
+        Ok(vec![StoredCheckpoint {
+            sequence_number,
+            certified_checkpoint: bcs::to_bytes(&checkpoint.checkpoint_summary)
+                .with_context(|| format!("Serializing checkpoint {sequence_number} summary"))?,
+            checkpoint_contents: bcs::to_bytes(&checkpoint.checkpoint_contents)
+                .with_context(|| format!("Serializing checkpoint {sequence_number} contents"))?,
+        }])
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for KvCheckpoints {
+    async fn commit(values: &[Self::Value], conn: &mut db::Connection<'_>) -> Result {
+        Ok(diesel::insert_into(kv_checkpoints::table)
+            .values(values)
+            .on_conflict_do_nothing()
+            .execute(conn)
+            .await?)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/handlers/kv_objects.rs b/crates/sui-indexer-alt/src/handlers/kv_objects.rs
new file mode 100644
index 0000000000000..f645cceab347f
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/kv_objects.rs
@@ -0,0 +1,69 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::sync::Arc;
+
+use anyhow::{Context, Result};
+use diesel_async::RunQueryDsl;
+use sui_types::full_checkpoint_content::CheckpointData;
+
+use crate::{
+    db, models::objects::StoredObject, pipeline::concurrent::Handler, pipeline::Processor,
+    schema::kv_objects,
+};
+
+pub struct KvObjects;
+
+impl Processor for KvObjects {
+    const NAME: &'static str = "kv_objects";
+    type Value = StoredObject;
+
+    fn process(checkpoint: &Arc) -> Result> {
+        let deleted_objects = checkpoint
+            .eventually_removed_object_refs_post_version()
+            .into_iter()
+            .map(|(id, version, _)| {
+                Ok(StoredObject {
+                    object_id: id.to_vec(),
+                    object_version: version.value() as i64,
+                    serialized_object: None,
+                })
+            });
+
+        let created_objects =
+            checkpoint
+                .transactions
+                .iter()
+                .flat_map(|txn| txn.output_objects.iter())
+                .map(|o| {
+                    let id = o.id();
+                    let version = o.version().value();
+                    Ok(StoredObject {
+                        object_id: id.to_vec(),
+                        object_version: version as i64,
+                        serialized_object: Some(bcs::to_bytes(o).with_context(|| {
+                            format!("Serializing object {id} version {version}")
+                        })?),
+                    })
+                });
+
+        deleted_objects
+            .chain(created_objects)
+            .collect::, _>>()
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for KvObjects {
+    const MIN_EAGER_ROWS: usize = 100;
+    const MAX_CHUNK_ROWS: usize = 1000;
+    const MAX_PENDING_ROWS: usize = 10000;
+
+    async fn commit(values: &[Self::Value], conn: &mut db::Connection<'_>) -> Result {
+        Ok(diesel::insert_into(kv_objects::table)
+            .values(values)
+            .on_conflict_do_nothing()
+            .execute(conn)
+            .await?)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/handlers/kv_transactions.rs b/crates/sui-indexer-alt/src/handlers/kv_transactions.rs
new file mode 100644
index 0000000000000..d3144032705d6
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/kv_transactions.rs
@@ -0,0 +1,72 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::sync::Arc;
+
+use anyhow::{Context, Result};
+use diesel_async::RunQueryDsl;
+use sui_types::full_checkpoint_content::CheckpointData;
+
+use crate::{
+    db, models::transactions::StoredTransaction, pipeline::concurrent::Handler,
+    pipeline::Processor, schema::kv_transactions,
+};
+
+pub struct KvTransactions;
+
+impl Processor for KvTransactions {
+    const NAME: &'static str = "kv_transactions";
+
+    type Value = StoredTransaction;
+
+    fn process(checkpoint: &Arc) -> Result> {
+        let CheckpointData {
+            transactions,
+            checkpoint_summary,
+            ..
+        } = checkpoint.as_ref();
+
+        let cp_sequence_number = checkpoint_summary.sequence_number as i64;
+
+        let mut values = Vec::with_capacity(transactions.len());
+        for (i, tx) in transactions.iter().enumerate() {
+            let tx_digest = tx.transaction.digest();
+            let transaction = &tx.transaction.data().intent_message().value;
+
+            let effects = &tx.effects;
+            let events: Vec<_> = tx.events.iter().flat_map(|e| e.data.iter()).collect();
+
+            values.push(StoredTransaction {
+                tx_digest: tx_digest.inner().into(),
+                cp_sequence_number,
+                timestamp_ms: checkpoint_summary.timestamp_ms as i64,
+                raw_transaction: bcs::to_bytes(transaction).with_context(|| {
+                    format!("Serializing transaction {tx_digest} (cp {cp_sequence_number}, tx {i})")
+                })?,
+                raw_effects: bcs::to_bytes(effects).with_context(|| {
+                    format!("Serializing effects for transaction {tx_digest} (cp {cp_sequence_number}, tx {i})")
+                })?,
+                events: bcs::to_bytes(&events).with_context(|| {
+                    format!("Serializing events for transaction {tx_digest} (cp {cp_sequence_number}, tx {i})")
+                })?,
+            });
+        }
+
+        Ok(values)
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for KvTransactions {
+    const MIN_EAGER_ROWS: usize = 100;
+    const MAX_CHUNK_ROWS: usize = 1000;
+    const MAX_PENDING_ROWS: usize = 10000;
+
+    async fn commit(values: &[Self::Value], conn: &mut db::Connection<'_>) -> Result {
+        Ok(diesel::insert_into(kv_transactions::table)
+            .values(values)
+            .on_conflict_do_nothing()
+            .execute(conn)
+            .await?)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/handlers/mod.rs b/crates/sui-indexer-alt/src/handlers/mod.rs
new file mode 100644
index 0000000000000..055ceb870d8a9
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/mod.rs
@@ -0,0 +1,15 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+pub mod ev_emit_mod;
+pub mod ev_struct_inst;
+pub mod kv_checkpoints;
+pub mod kv_objects;
+pub mod kv_transactions;
+pub mod obj_versions;
+pub mod sum_coin_balances;
+pub mod sum_obj_types;
+pub mod tx_affected_objects;
+pub mod tx_balance_changes;
+pub mod wal_coin_balances;
+pub mod wal_obj_types;
diff --git a/crates/sui-indexer-alt/src/handlers/obj_versions.rs b/crates/sui-indexer-alt/src/handlers/obj_versions.rs
new file mode 100644
index 0000000000000..7ca0973cc03f0
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/obj_versions.rs
@@ -0,0 +1,62 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::sync::Arc;
+
+use anyhow::Result;
+use diesel_async::RunQueryDsl;
+use sui_types::full_checkpoint_content::CheckpointData;
+
+use crate::{
+    db,
+    models::objects::StoredObjVersion,
+    pipeline::{concurrent::Handler, Processor},
+    schema::obj_versions,
+};
+
+pub struct ObjVersions;
+
+impl Processor for ObjVersions {
+    const NAME: &'static str = "obj_versions";
+    type Value = StoredObjVersion;
+
+    fn process(checkpoint: &Arc) -> Result> {
+        let CheckpointData {
+            transactions,
+            checkpoint_summary,
+            ..
+        } = checkpoint.as_ref();
+
+        let cp_sequence_number = checkpoint_summary.sequence_number as i64;
+        Ok(transactions
+            .iter()
+            .flat_map(|txn| txn.output_objects.iter())
+            .map(|o| {
+                let id = o.id();
+                let version = o.version().value();
+                let digest = o.digest();
+                StoredObjVersion {
+                    object_id: id.to_vec(),
+                    object_version: version as i64,
+                    object_digest: digest.inner().into(),
+                    cp_sequence_number,
+                }
+            })
+            .collect())
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for ObjVersions {
+    const MIN_EAGER_ROWS: usize = 100;
+    const MAX_CHUNK_ROWS: usize = 1000;
+    const MAX_PENDING_ROWS: usize = 10000;
+
+    async fn commit(values: &[Self::Value], conn: &mut db::Connection<'_>) -> Result {
+        Ok(diesel::insert_into(obj_versions::table)
+            .values(values)
+            .on_conflict_do_nothing()
+            .execute(conn)
+            .await?)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/handlers/sum_coin_balances.rs b/crates/sui-indexer-alt/src/handlers/sum_coin_balances.rs
new file mode 100644
index 0000000000000..745430dba9353
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/sum_coin_balances.rs
@@ -0,0 +1,188 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::{
+    collections::{btree_map::Entry, BTreeMap},
+    sync::Arc,
+};
+
+use anyhow::{anyhow, bail, ensure};
+use diesel::{upsert::excluded, ExpressionMethods};
+use diesel_async::RunQueryDsl;
+use futures::future::try_join_all;
+use sui_types::{
+    base_types::ObjectID, effects::TransactionEffectsAPI, full_checkpoint_content::CheckpointData,
+    object::Owner,
+};
+
+use crate::{
+    db,
+    models::objects::{StoredObjectUpdate, StoredSumCoinBalance},
+    pipeline::{sequential::Handler, Processor},
+    schema::sum_coin_balances,
+};
+
+/// Each insert or update will include at most this many rows -- the size is chosen to maximize the
+/// rows without hitting the limit on bind parameters.
+const UPDATE_CHUNK_ROWS: usize = i16::MAX as usize / 5;
+
+/// Each deletion will include at most this many rows.
+const DELETE_CHUNK_ROWS: usize = i16::MAX as usize;
+
+pub struct SumCoinBalances;
+
+impl Processor for SumCoinBalances {
+    const NAME: &'static str = "sum_coin_balances";
+
+    type Value = StoredObjectUpdate;
+
+    fn process(checkpoint: &Arc) -> anyhow::Result> {
+        let CheckpointData {
+            transactions,
+            checkpoint_summary,
+            ..
+        } = checkpoint.as_ref();
+
+        let cp_sequence_number = checkpoint_summary.sequence_number;
+        let mut values: BTreeMap = BTreeMap::new();
+        let mut coin_types: BTreeMap> = BTreeMap::new();
+
+        // Iterate over transactions in reverse so we see the latest version of each object first.
+        for tx in transactions.iter().rev() {
+            // Find all coins in the transaction's inputs and outputs.
+            for object in tx.input_objects.iter().chain(tx.output_objects.iter()) {
+                if let Some(coin_type) = object.type_().and_then(|t| t.coin_type_maybe()) {
+                    let serialized = bcs::to_bytes(&coin_type)
+                        .map_err(|_| anyhow!("Failed to serialize type for {}", object.id()))?;
+
+                    coin_types.insert(object.id(), serialized);
+                }
+            }
+
+            // Deleted and wrapped coins
+            for change in tx.effects.object_changes() {
+                // The object is not deleted/wrapped, or if it is it was unwrapped in the same
+                // transaction.
+                if change.output_digest.is_some() || change.input_version.is_none() {
+                    continue;
+                }
+
+                // Object is not a coin
+                if !coin_types.contains_key(&change.id) {
+                    continue;
+                }
+
+                let object_id = change.id;
+                let object_version = tx.effects.lamport_version().value();
+                match values.entry(object_id) {
+                    Entry::Occupied(entry) => {
+                        ensure!(entry.get().object_version > object_version);
+                    }
+
+                    Entry::Vacant(entry) => {
+                        entry.insert(StoredObjectUpdate {
+                            object_id,
+                            object_version,
+                            cp_sequence_number,
+                            update: None,
+                        });
+                    }
+                }
+            }
+
+            // Modified and created coins.
+            for object in &tx.output_objects {
+                let object_id = object.id();
+                let object_version = object.version().value();
+
+                let Some(coin_type) = coin_types.get(&object_id) else {
+                    continue;
+                };
+
+                // Coin balance only tracks address-owned objects
+                let Owner::AddressOwner(owner_id) = object.owner() else {
+                    continue;
+                };
+
+                let Some(coin) = object.as_coin_maybe() else {
+                    bail!("Failed to deserialize Coin for {object_id}");
+                };
+
+                match values.entry(object_id) {
+                    Entry::Occupied(entry) => {
+                        ensure!(entry.get().object_version > object_version);
+                    }
+
+                    Entry::Vacant(entry) => {
+                        entry.insert(StoredObjectUpdate {
+                            object_id,
+                            object_version,
+                            cp_sequence_number,
+                            update: Some(StoredSumCoinBalance {
+                                object_id: object_id.to_vec(),
+                                object_version: object_version as i64,
+                                owner_id: owner_id.to_vec(),
+                                coin_type: coin_type.clone(),
+                                coin_balance: coin.balance.value() as i64,
+                            }),
+                        });
+                    }
+                }
+            }
+        }
+
+        Ok(values.into_values().collect())
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for SumCoinBalances {
+    type Batch = BTreeMap;
+
+    fn batch(batch: &mut Self::Batch, updates: Vec) {
+        // `updates` are guaranteed to be provided in checkpoint order, so blindly inserting them
+        // will result in the batch containing the most up-to-date update for each object.
+        for update in updates {
+            batch.insert(update.object_id, update);
+        }
+    }
+
+    async fn commit(batch: &Self::Batch, conn: &mut db::Connection<'_>) -> anyhow::Result {
+        let mut updates = vec![];
+        let mut deletes = vec![];
+
+        for update in batch.values() {
+            if let Some(update) = &update.update {
+                updates.push(update.clone());
+            } else {
+                deletes.push(update.object_id.to_vec());
+            }
+        }
+
+        let update_chunks = updates.chunks(UPDATE_CHUNK_ROWS).map(|chunk| {
+            diesel::insert_into(sum_coin_balances::table)
+                .values(chunk)
+                .on_conflict(sum_coin_balances::object_id)
+                .do_update()
+                .set((
+                    sum_coin_balances::object_version
+                        .eq(excluded(sum_coin_balances::object_version)),
+                    sum_coin_balances::owner_id.eq(excluded(sum_coin_balances::owner_id)),
+                    sum_coin_balances::coin_balance.eq(excluded(sum_coin_balances::coin_balance)),
+                ))
+                .execute(conn)
+        });
+
+        let updated: usize = try_join_all(update_chunks).await?.into_iter().sum();
+
+        let delete_chunks = deletes.chunks(DELETE_CHUNK_ROWS).map(|chunk| {
+            diesel::delete(sum_coin_balances::table)
+                .filter(sum_coin_balances::object_id.eq_any(chunk.iter().cloned()))
+                .execute(conn)
+        });
+
+        let deleted: usize = try_join_all(delete_chunks).await?.into_iter().sum();
+
+        Ok(updated + deleted)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/handlers/sum_obj_types.rs b/crates/sui-indexer-alt/src/handlers/sum_obj_types.rs
new file mode 100644
index 0000000000000..c118a684f26df
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/sum_obj_types.rs
@@ -0,0 +1,184 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::{
+    collections::{btree_map::Entry, BTreeMap},
+    sync::Arc,
+};
+
+use anyhow::{anyhow, ensure};
+use diesel::{upsert::excluded, ExpressionMethods};
+use diesel_async::RunQueryDsl;
+use futures::future::try_join_all;
+use sui_types::{
+    base_types::ObjectID, effects::TransactionEffectsAPI, full_checkpoint_content::CheckpointData,
+    object::Owner,
+};
+
+use crate::{
+    db,
+    models::objects::{StoredObjectUpdate, StoredOwnerKind, StoredSumObjType},
+    pipeline::{sequential::Handler, Processor},
+    schema::sum_obj_types,
+};
+
+/// Each insert or update will include at most this many rows -- the size is chosen to maximize the
+/// rows without hitting the limit on bind parameters.
+const UPDATE_CHUNK_ROWS: usize = i16::MAX as usize / 8;
+
+/// Each deletion will include at most this many rows.
+const DELETE_CHUNK_ROWS: usize = i16::MAX as usize;
+
+pub struct SumObjTypes;
+
+impl Processor for SumObjTypes {
+    const NAME: &'static str = "sum_obj_types";
+
+    type Value = StoredObjectUpdate;
+
+    fn process(checkpoint: &Arc) -> anyhow::Result> {
+        let CheckpointData {
+            transactions,
+            checkpoint_summary,
+            ..
+        } = checkpoint.as_ref();
+
+        let cp_sequence_number = checkpoint_summary.sequence_number;
+        let mut values: BTreeMap = BTreeMap::new();
+
+        // Iterate over transactions in reverse so we see the latest version of each object first.
+        for tx in transactions.iter().rev() {
+            // Deleted and wrapped objects -- objects that show up without a digest in
+            // `object_changes` are either deleted or wrapped. Objects without an input version
+            // must have been unwrapped and deleted, meaning they do not need to be deleted from
+            // our records.
+            for change in tx.effects.object_changes() {
+                if change.output_digest.is_some() || change.input_version.is_none() {
+                    continue;
+                }
+
+                let object_id = change.id;
+                let object_version = tx.effects.lamport_version().value();
+                match values.entry(object_id) {
+                    Entry::Occupied(entry) => {
+                        ensure!(entry.get().object_version > object_version);
+                    }
+
+                    Entry::Vacant(entry) => {
+                        entry.insert(StoredObjectUpdate {
+                            object_id,
+                            object_version,
+                            cp_sequence_number,
+                            update: None,
+                        });
+                    }
+                }
+            }
+
+            // Modified and created objects.
+            for object in &tx.output_objects {
+                let object_id = object.id();
+                let object_version = object.version().value();
+                match values.entry(object_id) {
+                    Entry::Occupied(entry) => {
+                        ensure!(entry.get().object_version > object_version);
+                    }
+
+                    Entry::Vacant(entry) => {
+                        let type_ = object.type_();
+                        entry.insert(StoredObjectUpdate {
+                            object_id,
+                            object_version,
+                            cp_sequence_number,
+                            update: Some(StoredSumObjType {
+                                object_id: object_id.to_vec(),
+                                object_version: object_version as i64,
+
+                                owner_kind: match object.owner() {
+                                    Owner::AddressOwner(_) => StoredOwnerKind::Address,
+                                    Owner::ObjectOwner(_) => StoredOwnerKind::Object,
+                                    Owner::Shared { .. } => StoredOwnerKind::Shared,
+                                    Owner::Immutable => StoredOwnerKind::Immutable,
+                                },
+
+                                owner_id: match object.owner() {
+                                    Owner::AddressOwner(a) => Some(a.to_vec()),
+                                    Owner::ObjectOwner(o) => Some(o.to_vec()),
+                                    _ => None,
+                                },
+
+                                package: type_.map(|t| t.address().to_vec()),
+                                module: type_.map(|t| t.module().to_string()),
+                                name: type_.map(|t| t.name().to_string()),
+                                instantiation: type_
+                                    .map(|t| bcs::to_bytes(&t.type_params()))
+                                    .transpose()
+                                    .map_err(|e| {
+                                        anyhow!(
+                                            "Failed to serialize type parameters for {}: {e}",
+                                            object
+                                                .id()
+                                                .to_canonical_display(/* with_prefix */ true),
+                                        )
+                                    })?,
+                            }),
+                        });
+                    }
+                }
+            }
+        }
+
+        Ok(values.into_values().collect())
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for SumObjTypes {
+    type Batch = BTreeMap;
+
+    fn batch(batch: &mut Self::Batch, updates: Vec) {
+        // `updates` are guaranteed to be provided in checkpoint order, so blindly inserting them
+        // will result in the batch containing the most up-to-date update for each object.
+        for update in updates {
+            batch.insert(update.object_id, update);
+        }
+    }
+
+    async fn commit(values: &Self::Batch, conn: &mut db::Connection<'_>) -> anyhow::Result {
+        let mut updates = vec![];
+        let mut deletes = vec![];
+
+        for update in values.values() {
+            if let Some(update) = &update.update {
+                updates.push(update.clone());
+            } else {
+                deletes.push(update.object_id.to_vec());
+            }
+        }
+
+        let update_chunks = updates.chunks(UPDATE_CHUNK_ROWS).map(|chunk| {
+            diesel::insert_into(sum_obj_types::table)
+                .values(chunk)
+                .on_conflict(sum_obj_types::object_id)
+                .do_update()
+                .set((
+                    sum_obj_types::object_version.eq(excluded(sum_obj_types::object_version)),
+                    sum_obj_types::owner_kind.eq(excluded(sum_obj_types::owner_kind)),
+                    sum_obj_types::owner_id.eq(excluded(sum_obj_types::owner_id)),
+                ))
+                .execute(conn)
+        });
+
+        let updated: usize = try_join_all(update_chunks).await?.into_iter().sum();
+
+        let delete_chunks = deletes.chunks(DELETE_CHUNK_ROWS).map(|chunk| {
+            diesel::delete(sum_obj_types::table)
+                .filter(sum_obj_types::object_id.eq_any(chunk.iter().cloned()))
+                .execute(conn)
+        });
+
+        let deleted: usize = try_join_all(delete_chunks).await?.into_iter().sum();
+
+        Ok(updated + deleted)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/handlers/tx_affected_objects.rs b/crates/sui-indexer-alt/src/handlers/tx_affected_objects.rs
new file mode 100644
index 0000000000000..309af2c08a300
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/tx_affected_objects.rs
@@ -0,0 +1,65 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::sync::Arc;
+
+use anyhow::Result;
+use diesel_async::RunQueryDsl;
+use sui_types::{effects::TransactionEffectsAPI, full_checkpoint_content::CheckpointData};
+
+use crate::{
+    db, models::transactions::StoredTxAffectedObject, pipeline::concurrent::Handler,
+    pipeline::Processor, schema::tx_affected_objects,
+};
+
+pub struct TxAffectedObjects;
+
+impl Processor for TxAffectedObjects {
+    const NAME: &'static str = "tx_affected_objects";
+
+    type Value = StoredTxAffectedObject;
+
+    fn process(checkpoint: &Arc) -> Result> {
+        let CheckpointData {
+            transactions,
+            checkpoint_summary,
+            ..
+        } = checkpoint.as_ref();
+
+        let mut values = Vec::new();
+        let first_tx = checkpoint_summary.network_total_transactions as usize - transactions.len();
+
+        for (i, tx) in transactions.iter().enumerate() {
+            let tx_sequence_number = (first_tx + i) as i64;
+            let sender = tx.transaction.sender_address();
+
+            values.extend(
+                tx.effects
+                    .object_changes()
+                    .iter()
+                    .map(|o| StoredTxAffectedObject {
+                        tx_sequence_number,
+                        affected: o.id.to_vec(),
+                        sender: sender.to_vec(),
+                    }),
+            );
+        }
+
+        Ok(values)
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for TxAffectedObjects {
+    const MIN_EAGER_ROWS: usize = 100;
+    const MAX_CHUNK_ROWS: usize = 1000;
+    const MAX_PENDING_ROWS: usize = 10000;
+
+    async fn commit(values: &[Self::Value], conn: &mut db::Connection<'_>) -> Result {
+        Ok(diesel::insert_into(tx_affected_objects::table)
+            .values(values)
+            .on_conflict_do_nothing()
+            .execute(conn)
+            .await?)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/handlers/tx_balance_changes.rs b/crates/sui-indexer-alt/src/handlers/tx_balance_changes.rs
new file mode 100644
index 0000000000000..1f97e806fd1ec
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/tx_balance_changes.rs
@@ -0,0 +1,105 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::{collections::BTreeMap, sync::Arc};
+
+use anyhow::{Context, Result};
+use diesel_async::RunQueryDsl;
+use sui_types::{
+    coin::Coin,
+    effects::TransactionEffectsAPI,
+    full_checkpoint_content::{CheckpointData, CheckpointTransaction},
+    gas_coin::GAS,
+};
+
+use crate::{
+    db,
+    models::transactions::{BalanceChange, StoredTxBalanceChange},
+    pipeline::concurrent::Handler,
+    pipeline::Processor,
+    schema::tx_balance_changes,
+};
+
+pub struct TxBalanceChanges;
+
+impl Processor for TxBalanceChanges {
+    const NAME: &'static str = "tx_balance_changes";
+
+    type Value = StoredTxBalanceChange;
+
+    fn process(checkpoint: &Arc) -> Result> {
+        let CheckpointData {
+            transactions,
+            checkpoint_summary,
+            ..
+        } = checkpoint.as_ref();
+
+        let mut values = Vec::new();
+        let first_tx = checkpoint_summary.network_total_transactions as usize - transactions.len();
+
+        for (i, tx) in transactions.iter().enumerate() {
+            let tx_sequence_number = (first_tx + i) as i64;
+            let balance_changes = balance_changes(tx).with_context(|| {
+                format!("Calculating balance changes for transaction {tx_sequence_number}")
+            })?;
+
+            values.push(StoredTxBalanceChange {
+                tx_sequence_number,
+                balance_changes: bcs::to_bytes(&balance_changes).with_context(|| {
+                    format!("Serializing balance changes for transaction {tx_sequence_number}")
+                })?,
+            });
+        }
+
+        Ok(values)
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for TxBalanceChanges {
+    const MIN_EAGER_ROWS: usize = 100;
+    const MAX_CHUNK_ROWS: usize = 1000;
+    const MAX_PENDING_ROWS: usize = 10000;
+
+    async fn commit(values: &[Self::Value], conn: &mut db::Connection<'_>) -> Result {
+        Ok(diesel::insert_into(tx_balance_changes::table)
+            .values(values)
+            .on_conflict_do_nothing()
+            .execute(conn)
+            .await?)
+    }
+}
+
+/// Calculate balance changes based on the object's input and output objects.
+fn balance_changes(transaction: &CheckpointTransaction) -> Result> {
+    // Shortcut if the transaction failed -- we know that only gas was charged.
+    if transaction.effects.status().is_err() {
+        return Ok(vec![BalanceChange::V1 {
+            owner: transaction.effects.gas_object().1,
+            coin_type: GAS::type_tag().to_canonical_string(/* with_prefix */ true),
+            amount: -(transaction.effects.gas_cost_summary().net_gas_usage() as i128),
+        }]);
+    }
+
+    let mut changes = BTreeMap::new();
+    for object in &transaction.input_objects {
+        if let Some((type_, balance)) = Coin::extract_balance_if_coin(object)? {
+            *changes.entry((object.owner(), type_)).or_insert(0i128) -= balance as i128;
+        }
+    }
+
+    for object in &transaction.output_objects {
+        if let Some((type_, balance)) = Coin::extract_balance_if_coin(object)? {
+            *changes.entry((object.owner(), type_)).or_insert(0i128) += balance as i128;
+        }
+    }
+
+    Ok(changes
+        .into_iter()
+        .map(|((owner, coin_type), amount)| BalanceChange::V1 {
+            owner: *owner,
+            coin_type: coin_type.to_canonical_string(/* with_prefix */ true),
+            amount,
+        })
+        .collect())
+}
diff --git a/crates/sui-indexer-alt/src/handlers/wal_coin_balances.rs b/crates/sui-indexer-alt/src/handlers/wal_coin_balances.rs
new file mode 100644
index 0000000000000..6482d6fc94bb6
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/wal_coin_balances.rs
@@ -0,0 +1,59 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::sync::Arc;
+
+use anyhow::Result;
+use diesel_async::RunQueryDsl;
+use sui_types::full_checkpoint_content::CheckpointData;
+
+use crate::{
+    db,
+    models::objects::{StoredObjectUpdate, StoredSumCoinBalance, StoredWalCoinBalance},
+    pipeline::{concurrent::Handler, Processor},
+    schema::wal_coin_balances,
+};
+
+use super::sum_coin_balances::SumCoinBalances;
+
+pub struct WalCoinBalances;
+
+impl Processor for WalCoinBalances {
+    const NAME: &'static str = "wal_coin_balances";
+
+    type Value = StoredObjectUpdate;
+
+    fn process(checkpoint: &Arc) -> Result> {
+        SumCoinBalances::process(checkpoint)
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for WalCoinBalances {
+    const MIN_EAGER_ROWS: usize = 100;
+    const MAX_CHUNK_ROWS: usize = 1000;
+    const MAX_PENDING_ROWS: usize = 10000;
+
+    async fn commit(values: &[Self::Value], conn: &mut db::Connection<'_>) -> Result {
+        let values: Vec<_> = values
+            .iter()
+            .map(|value| StoredWalCoinBalance {
+                object_id: value.object_id.to_vec(),
+                object_version: value.object_version as i64,
+
+                owner_id: value.update.as_ref().map(|o| o.owner_id.clone()),
+
+                coin_type: value.update.as_ref().map(|o| o.coin_type.clone()),
+                coin_balance: value.update.as_ref().map(|o| o.coin_balance),
+
+                cp_sequence_number: value.cp_sequence_number as i64,
+            })
+            .collect();
+
+        Ok(diesel::insert_into(wal_coin_balances::table)
+            .values(&values)
+            .on_conflict_do_nothing()
+            .execute(conn)
+            .await?)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/handlers/wal_obj_types.rs b/crates/sui-indexer-alt/src/handlers/wal_obj_types.rs
new file mode 100644
index 0000000000000..68cdb2b39945f
--- /dev/null
+++ b/crates/sui-indexer-alt/src/handlers/wal_obj_types.rs
@@ -0,0 +1,62 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::sync::Arc;
+
+use anyhow::Result;
+use diesel_async::RunQueryDsl;
+use sui_types::full_checkpoint_content::CheckpointData;
+
+use crate::{
+    db,
+    models::objects::{StoredObjectUpdate, StoredSumObjType, StoredWalObjType},
+    pipeline::{concurrent::Handler, Processor},
+    schema::wal_obj_types,
+};
+
+use super::sum_obj_types::SumObjTypes;
+
+pub struct WalObjTypes;
+
+impl Processor for WalObjTypes {
+    const NAME: &'static str = "wal_obj_types";
+
+    type Value = StoredObjectUpdate;
+
+    fn process(checkpoint: &Arc) -> Result> {
+        SumObjTypes::process(checkpoint)
+    }
+}
+
+#[async_trait::async_trait]
+impl Handler for WalObjTypes {
+    const MIN_EAGER_ROWS: usize = 100;
+    const MAX_CHUNK_ROWS: usize = 1000;
+    const MAX_PENDING_ROWS: usize = 10000;
+
+    async fn commit(values: &[Self::Value], conn: &mut db::Connection<'_>) -> Result {
+        let values: Vec<_> = values
+            .iter()
+            .map(|value| StoredWalObjType {
+                object_id: value.object_id.to_vec(),
+                object_version: value.object_version as i64,
+
+                owner_kind: value.update.as_ref().map(|o| o.owner_kind),
+                owner_id: value.update.as_ref().and_then(|o| o.owner_id.clone()),
+
+                package: value.update.as_ref().and_then(|o| o.package.clone()),
+                module: value.update.as_ref().and_then(|o| o.module.clone()),
+                name: value.update.as_ref().and_then(|o| o.name.clone()),
+                instantiation: value.update.as_ref().and_then(|o| o.instantiation.clone()),
+
+                cp_sequence_number: value.cp_sequence_number as i64,
+            })
+            .collect();
+
+        Ok(diesel::insert_into(wal_obj_types::table)
+            .values(&values)
+            .on_conflict_do_nothing()
+            .execute(conn)
+            .await?)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/ingestion/broadcaster.rs b/crates/sui-indexer-alt/src/ingestion/broadcaster.rs
new file mode 100644
index 0000000000000..8b20b2693415e
--- /dev/null
+++ b/crates/sui-indexer-alt/src/ingestion/broadcaster.rs
@@ -0,0 +1,103 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::sync::Arc;
+
+use backoff::backoff::Constant;
+use futures::{future::try_join_all, TryStreamExt};
+use mysten_metrics::spawn_monitored_task;
+use sui_types::full_checkpoint_content::CheckpointData;
+use tokio::{sync::mpsc, task::JoinHandle};
+use tokio_stream::{wrappers::ReceiverStream, StreamExt};
+use tokio_util::sync::CancellationToken;
+use tracing::{debug, error, info};
+
+use crate::{ingestion::error::Error, metrics::IndexerMetrics};
+
+use super::{client::IngestionClient, IngestionConfig};
+
+/// The broadcaster task is responsible for taking a stream of checkpoint sequence numbers from
+/// `checkpoint_rx`, fetching them using the `client` and disseminating them to all subscribers in
+/// `subscribers`.
+///
+/// The task will shut down if the `cancel` token is signalled, or if the `checkpoint_rx` channel
+/// closes.
+pub(super) fn broadcaster(
+    config: IngestionConfig,
+    client: IngestionClient,
+    metrics: Arc,
+    checkpoint_rx: mpsc::Receiver,
+    subscribers: Vec>>,
+    cancel: CancellationToken,
+) -> JoinHandle<()> {
+    spawn_monitored_task!(async move {
+        info!("Starting ingestion broadcaster");
+
+        match ReceiverStream::new(checkpoint_rx)
+            .map(Ok)
+            .try_for_each_concurrent(/* limit */ config.ingest_concurrency, |cp| {
+                let client = client.clone();
+                let metrics = metrics.clone();
+                let subscribers = subscribers.clone();
+
+                // One clone is for the supervisor to signal a cancel if it detects a
+                // subscriber that wants to wind down ingestion, and the other is to pass to
+                // each worker to detect cancellation.
+                let supervisor_cancel = cancel.clone();
+                let cancel = cancel.clone();
+
+                // Repeatedly retry if the checkpoint is not found, assuming that we are at the
+                // tip of the network and it will become available soon.
+                let backoff = Constant::new(config.retry_interval);
+                let fetch = move || {
+                    let client = client.clone();
+                    let metrics = metrics.clone();
+                    let cancel = cancel.clone();
+
+                    async move {
+                        use backoff::Error as BE;
+                        if cancel.is_cancelled() {
+                            return Err(BE::permanent(Error::Cancelled));
+                        }
+
+                        client.fetch(cp, &cancel).await.map_err(|e| match e {
+                            Error::NotFound(checkpoint) => {
+                                debug!(checkpoint, "Checkpoint not found, retrying...");
+                                metrics.total_ingested_not_found_retries.inc();
+                                BE::transient(e)
+                            }
+                            e => BE::permanent(e),
+                        })
+                    }
+                };
+
+                async move {
+                    let checkpoint = backoff::future::retry(backoff, fetch).await?;
+                    let futures = subscribers.iter().map(|s| s.send(checkpoint.clone()));
+
+                    if try_join_all(futures).await.is_err() {
+                        info!("Subscription dropped, signalling shutdown");
+                        supervisor_cancel.cancel();
+                        Err(Error::Cancelled)
+                    } else {
+                        Ok(())
+                    }
+                }
+            })
+            .await
+        {
+            Ok(()) => {
+                info!("Checkpoints done, stopping ingestion broadcaster");
+            }
+
+            Err(Error::Cancelled) => {
+                info!("Shutdown received, stopping ingestion broadcaster");
+            }
+
+            Err(e) => {
+                error!("Ingestion broadcaster failed: {}", e);
+                cancel.cancel();
+            }
+        }
+    })
+}
diff --git a/crates/sui-indexer-alt/src/ingestion/client.rs b/crates/sui-indexer-alt/src/ingestion/client.rs
new file mode 100644
index 0000000000000..b16a7c51daef1
--- /dev/null
+++ b/crates/sui-indexer-alt/src/ingestion/client.rs
@@ -0,0 +1,155 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use crate::ingestion::local_client::LocalIngestionClient;
+use crate::ingestion::remote_client::RemoteIngestionClient;
+use crate::ingestion::Error as IngestionError;
+use crate::ingestion::Result as IngestionResult;
+use crate::metrics::IndexerMetrics;
+use backoff::Error as BE;
+use backoff::ExponentialBackoff;
+use std::path::PathBuf;
+use std::sync::Arc;
+use std::time::Duration;
+use sui_storage::blob::Blob;
+use sui_types::full_checkpoint_content::CheckpointData;
+use tokio_util::bytes::Bytes;
+use tokio_util::sync::CancellationToken;
+use tracing::debug;
+use url::Url;
+
+/// Wait at most this long between retries for transient errors.
+const MAX_TRANSIENT_RETRY_INTERVAL: Duration = Duration::from_secs(60);
+
+#[async_trait::async_trait]
+pub(crate) trait IngestionClientTrait: Send + Sync {
+    async fn fetch(&self, checkpoint: u64) -> FetchResult;
+}
+
+#[derive(thiserror::Error, Debug)]
+pub enum FetchError {
+    #[error("Checkpoint not found")]
+    NotFound,
+    #[error("Failed to fetch checkpoint due to permanent error: {0}")]
+    Permanent(#[from] anyhow::Error),
+    #[error("Failed to fetch checkpoint due to {reason}: {error}")]
+    Transient {
+        reason: &'static str,
+        #[source]
+        error: anyhow::Error,
+    },
+}
+
+pub type FetchResult = Result;
+
+#[derive(Clone)]
+pub(crate) struct IngestionClient {
+    client: Arc,
+    /// Wrap the metrics in an `Arc` to keep copies of the client cheap.
+    metrics: Arc,
+}
+
+impl IngestionClient {
+    pub(crate) fn new_remote(url: Url, metrics: Arc) -> IngestionResult {
+        let client = Arc::new(RemoteIngestionClient::new(url)?);
+        Ok(IngestionClient { client, metrics })
+    }
+
+    pub(crate) fn new_local(path: PathBuf, metrics: Arc) -> Self {
+        let client = Arc::new(LocalIngestionClient::new(path));
+        IngestionClient { client, metrics }
+    }
+
+    /// Repeatedly retries transient errors with an exponential backoff (up to [MAX_RETRY_INTERVAL]).
+    /// Transient errors are either defined by the client implementation that
+    /// returns a `FetchError::Transient` error variant, or within this function
+    /// if we fail to deserialize the result as [CheckpointData].
+    /// The function will immediately return on:
+    /// - non-transient errors determined by the client implementation,
+    ///   This includes both the FetcherError::NotFound and FetcherError::Permanent variants.
+    /// - cancellation of the supplied `cancel` token.
+    pub(crate) async fn fetch(
+        &self,
+        checkpoint: u64,
+        cancel: &CancellationToken,
+    ) -> IngestionResult> {
+        let client = self.client.clone();
+        let request = move || {
+            let client = client.clone();
+            async move {
+                if cancel.is_cancelled() {
+                    return Err(BE::permanent(IngestionError::Cancelled));
+                }
+
+                let bytes = client.fetch(checkpoint).await.map_err(|err| match err {
+                    FetchError::NotFound => BE::permanent(IngestionError::NotFound(checkpoint)),
+                    FetchError::Permanent(error) => {
+                        BE::permanent(IngestionError::FetchError(checkpoint, error))
+                    }
+                    FetchError::Transient { reason, error } => self.metrics.inc_retry(
+                        checkpoint,
+                        reason,
+                        IngestionError::FetchError(checkpoint, error),
+                    ),
+                })?;
+
+                self.metrics.total_ingested_bytes.inc_by(bytes.len() as u64);
+                let data: CheckpointData = Blob::from_bytes(&bytes).map_err(|e| {
+                    self.metrics.inc_retry(
+                        checkpoint,
+                        "deserialization",
+                        IngestionError::DeserializationError(checkpoint, e),
+                    )
+                })?;
+
+                Ok(data)
+            }
+        };
+
+        // Keep backing off until we are waiting for the max interval, but don't give up.
+        let backoff = ExponentialBackoff {
+            max_interval: MAX_TRANSIENT_RETRY_INTERVAL,
+            max_elapsed_time: None,
+            ..Default::default()
+        };
+
+        let guard = self.metrics.ingested_checkpoint_latency.start_timer();
+        let data = backoff::future::retry(backoff, request).await?;
+        let elapsed = guard.stop_and_record();
+
+        debug!(
+            checkpoint,
+            elapsed_ms = elapsed * 1000.0,
+            "Fetched checkpoint"
+        );
+
+        self.metrics.total_ingested_checkpoints.inc();
+
+        self.metrics
+            .total_ingested_transactions
+            .inc_by(data.transactions.len() as u64);
+
+        self.metrics.total_ingested_events.inc_by(
+            data.transactions
+                .iter()
+                .map(|tx| tx.events.as_ref().map_or(0, |evs| evs.data.len()) as u64)
+                .sum(),
+        );
+
+        self.metrics.total_ingested_inputs.inc_by(
+            data.transactions
+                .iter()
+                .map(|tx| tx.input_objects.len() as u64)
+                .sum(),
+        );
+
+        self.metrics.total_ingested_outputs.inc_by(
+            data.transactions
+                .iter()
+                .map(|tx| tx.output_objects.len() as u64)
+                .sum(),
+        );
+
+        Ok(Arc::new(data))
+    }
+}
diff --git a/crates/sui-indexer-alt/src/ingestion/error.rs b/crates/sui-indexer-alt/src/ingestion/error.rs
new file mode 100644
index 0000000000000..17cafe495aa80
--- /dev/null
+++ b/crates/sui-indexer-alt/src/ingestion/error.rs
@@ -0,0 +1,25 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+pub type Result = std::result::Result;
+
+#[derive(thiserror::Error, Debug)]
+pub enum Error {
+    #[error("Checkpoint {0} not found")]
+    NotFound(u64),
+
+    #[error("Failed to deserialize checkpoint {0}: {1}")]
+    DeserializationError(u64, #[source] anyhow::Error),
+
+    #[error("Failed to fetch checkpoint {0}: {1}")]
+    FetchError(u64, #[source] anyhow::Error),
+
+    #[error(transparent)]
+    ReqwestError(#[from] reqwest::Error),
+
+    #[error("No subscribers for ingestion service")]
+    NoSubscribers,
+
+    #[error("Shutdown signal received, stopping ingestion service")]
+    Cancelled,
+}
diff --git a/crates/sui-indexer-alt/src/ingestion/local_client.rs b/crates/sui-indexer-alt/src/ingestion/local_client.rs
new file mode 100644
index 0000000000000..2efb6708939ff
--- /dev/null
+++ b/crates/sui-indexer-alt/src/ingestion/local_client.rs
@@ -0,0 +1,65 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use crate::ingestion::client::{FetchError, FetchResult, IngestionClientTrait};
+use axum::body::Bytes;
+use std::path::PathBuf;
+
+pub struct LocalIngestionClient {
+    path: PathBuf,
+}
+
+impl LocalIngestionClient {
+    pub fn new(path: PathBuf) -> Self {
+        LocalIngestionClient { path }
+    }
+}
+
+#[async_trait::async_trait]
+impl IngestionClientTrait for LocalIngestionClient {
+    async fn fetch(&self, checkpoint: u64) -> FetchResult {
+        let path = self.path.join(format!("{}.chk", checkpoint));
+        let bytes = tokio::fs::read(path).await.map_err(|e| {
+            if e.kind() == std::io::ErrorKind::NotFound {
+                FetchError::NotFound
+            } else {
+                FetchError::Transient {
+                    reason: "io_error",
+                    error: e.into(),
+                }
+            }
+        })?;
+        Ok(Bytes::from(bytes))
+    }
+}
+
+#[cfg(test)]
+pub(crate) mod tests {
+    use crate::ingestion::client::IngestionClient;
+    use crate::ingestion::test_utils::test_checkpoint_data;
+    use crate::metrics::tests::test_metrics;
+    use std::sync::Arc;
+    use sui_storage::blob::{Blob, BlobEncoding};
+    use tokio_util::sync::CancellationToken;
+
+    #[tokio::test]
+    async fn local_test_fetch() {
+        let tempdir = tempfile::tempdir().unwrap().into_path();
+        let path = tempdir.join("1.chk");
+        let test_checkpoint = test_checkpoint_data(1);
+        tokio::fs::write(&path, &test_checkpoint).await.unwrap();
+
+        let metrics = Arc::new(test_metrics());
+        let local_client = IngestionClient::new_local(tempdir, metrics);
+        let checkpoint = local_client
+            .fetch(1, &CancellationToken::new())
+            .await
+            .unwrap();
+        assert_eq!(
+            Blob::encode(&*checkpoint, BlobEncoding::Bcs)
+                .unwrap()
+                .to_bytes(),
+            test_checkpoint
+        );
+    }
+}
diff --git a/crates/sui-indexer-alt/src/ingestion/mod.rs b/crates/sui-indexer-alt/src/ingestion/mod.rs
new file mode 100644
index 0000000000000..46e24b2f33d4e
--- /dev/null
+++ b/crates/sui-indexer-alt/src/ingestion/mod.rs
@@ -0,0 +1,432 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+// Allow use of `unbounded_channel` in `ingestion` -- it is used by the regulator task to receive
+// feedback. Traffic through this task should be minimal, but if a bound is applied to it and that
+// bound is hit, the indexer could deadlock.
+#![allow(clippy::disallowed_methods)]
+
+use std::{path::PathBuf, sync::Arc, time::Duration};
+
+use sui_types::full_checkpoint_content::CheckpointData;
+use tokio::{sync::mpsc, task::JoinHandle};
+use tokio_util::sync::CancellationToken;
+use url::Url;
+
+use crate::ingestion::broadcaster::broadcaster;
+use crate::ingestion::client::IngestionClient;
+use crate::ingestion::error::{Error, Result};
+use crate::ingestion::regulator::regulator;
+use crate::metrics::IndexerMetrics;
+
+mod broadcaster;
+mod client;
+pub mod error;
+mod local_client;
+mod regulator;
+mod remote_client;
+#[cfg(test)]
+mod test_utils;
+
+pub struct IngestionService {
+    config: IngestionConfig,
+    client: IngestionClient,
+    metrics: Arc,
+    ingest_hi_tx: mpsc::UnboundedSender<(&'static str, u64)>,
+    ingest_hi_rx: mpsc::UnboundedReceiver<(&'static str, u64)>,
+    subscribers: Vec>>,
+    cancel: CancellationToken,
+}
+
+#[derive(clap::Args, Debug, Clone)]
+pub struct IngestionConfig {
+    /// Remote Store to fetch checkpoints from.
+    #[arg(long, required = true, group = "source")]
+    remote_store_url: Option,
+
+    /// Path to the local ingestion directory.
+    /// If both remote_store_url and local_ingestion_path are provided, remote_store_url will be used.
+    #[arg(long, required = true, group = "source")]
+    local_ingestion_path: Option,
+
+    /// Maximum size of checkpoint backlog across all workers downstream of the ingestion service.
+    #[arg(long, default_value_t = 5000)]
+    checkpoint_buffer_size: usize,
+
+    /// Maximum number of checkpoints to attempt to fetch concurrently.
+    #[arg(long, default_value_t = 200)]
+    ingest_concurrency: usize,
+
+    /// Polling interval to retry fetching checkpoints that do not exist.
+    #[arg(
+        long,
+        default_value = "200",
+        value_name = "MILLISECONDS",
+        value_parser = |s: &str| s.parse().map(Duration::from_millis)
+    )]
+    retry_interval: Duration,
+}
+
+impl IngestionService {
+    pub fn new(
+        config: IngestionConfig,
+        metrics: Arc,
+        cancel: CancellationToken,
+    ) -> Result {
+        // TODO: Potentially support a hybrid mode where we can fetch from both local and remote.
+        let client = if let Some(url) = config.remote_store_url.as_ref() {
+            IngestionClient::new_remote(url.clone(), metrics.clone())?
+        } else if let Some(path) = config.local_ingestion_path.as_ref() {
+            IngestionClient::new_local(path.clone(), metrics.clone())
+        } else {
+            panic!("Either remote_store_url or local_ingestion_path must be provided");
+        };
+        let subscribers = Vec::new();
+        let (ingest_hi_tx, ingest_hi_rx) = mpsc::unbounded_channel();
+        Ok(Self {
+            config,
+            client,
+            metrics,
+            ingest_hi_tx,
+            ingest_hi_rx,
+            subscribers,
+            cancel,
+        })
+    }
+
+    /// Add a new subscription to the ingestion service. Note that the service is susceptible to
+    /// the "slow receiver" problem: If one receiver is slower to process checkpoints than the
+    /// checkpoint ingestion rate, it will eventually hold up all receivers.
+    ///
+    /// The ingestion service can optionally receive checkpoint high watermarks from its
+    /// subscribers. If a subscriber provides a watermark, the ingestion service will commit to not
+    /// run ahead of the watermark by more than the config's buffer_size.
+    ///
+    /// Returns the channel to receive checkpoints from and the channel to accept watermarks from.
+    pub fn subscribe(
+        &mut self,
+    ) -> (
+        mpsc::Receiver>,
+        mpsc::UnboundedSender<(&'static str, u64)>,
+    ) {
+        let (sender, receiver) = mpsc::channel(self.config.checkpoint_buffer_size);
+        self.subscribers.push(sender);
+        (receiver, self.ingest_hi_tx.clone())
+    }
+
+    /// Start the ingestion service as a background task, consuming it in the process.
+    ///
+    /// Checkpoints are fetched concurrently from the `checkpoints` iterator, and pushed to
+    /// subscribers' channels (potentially out-of-order). Subscribers can communicate with the
+    /// ingestion service via their channels in the following ways:
+    ///
+    /// - If a subscriber is lagging (not receiving checkpoints fast enough), it will eventually
+    ///   provide back-pressure to the ingestion service, which will stop fetching new checkpoints.
+    /// - If a subscriber closes its channel, the ingestion service will interpret that as a signal
+    ///   to shutdown as well.
+    ///
+    /// If ingestion reaches the leading edge of the network, it will encounter checkpoints that do
+    /// not exist yet. These will be retried repeatedly on a fixed `retry_interval` until they
+    /// become available.
+    pub async fn run(self, checkpoints: I) -> Result<(JoinHandle<()>, JoinHandle<()>)>
+    where
+        I: IntoIterator + Send + Sync + 'static,
+        I::IntoIter: Send + Sync + 'static,
+    {
+        let IngestionService {
+            config,
+            client,
+            metrics,
+            ingest_hi_tx: _,
+            ingest_hi_rx,
+            subscribers,
+            cancel,
+        } = self;
+
+        if subscribers.is_empty() {
+            return Err(Error::NoSubscribers);
+        }
+
+        let (checkpoint_tx, checkpoint_rx) = mpsc::channel(config.ingest_concurrency);
+
+        let regulator = regulator(
+            checkpoints,
+            config.checkpoint_buffer_size,
+            ingest_hi_rx,
+            checkpoint_tx,
+            cancel.clone(),
+        );
+
+        let broadcaster = broadcaster(
+            config,
+            client,
+            metrics,
+            checkpoint_rx,
+            subscribers,
+            cancel.clone(),
+        );
+
+        Ok((regulator, broadcaster))
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::sync::Mutex;
+
+    use mysten_metrics::spawn_monitored_task;
+    use reqwest::StatusCode;
+    use wiremock::{MockServer, Request};
+
+    use crate::ingestion::remote_client::tests::{respond_with, status};
+    use crate::ingestion::test_utils::test_checkpoint_data;
+    use crate::metrics::tests::test_metrics;
+
+    use super::*;
+
+    async fn test_ingestion(
+        uri: String,
+        checkpoint_buffer_size: usize,
+        ingest_concurrency: usize,
+        cancel: CancellationToken,
+    ) -> IngestionService {
+        IngestionService::new(
+            IngestionConfig {
+                remote_store_url: Some(Url::parse(&uri).unwrap()),
+                local_ingestion_path: None,
+                checkpoint_buffer_size,
+                ingest_concurrency,
+                retry_interval: Duration::from_millis(200),
+            },
+            Arc::new(test_metrics()),
+            cancel,
+        )
+        .unwrap()
+    }
+
+    async fn test_subscriber(
+        stop_after: usize,
+        mut rx: mpsc::Receiver>,
+        cancel: CancellationToken,
+    ) -> JoinHandle> {
+        spawn_monitored_task!(async move {
+            let mut seqs = vec![];
+            for _ in 0..stop_after {
+                tokio::select! {
+                    _ = cancel.cancelled() => break,
+                    Some(checkpoint) = rx.recv() => {
+                        seqs.push(checkpoint.checkpoint_summary.sequence_number);
+                    }
+                }
+            }
+
+            rx.close();
+            seqs
+        })
+    }
+
+    /// If the ingestion service has no subscribers, it will fail fast (before fetching any
+    /// checkpoints).
+    #[tokio::test]
+    async fn fail_on_no_subscribers() {
+        telemetry_subscribers::init_for_testing();
+
+        // The mock server will repeatedly return 404, so if the service does try to fetch a
+        // checkpoint, it will be stuck repeatedly retrying.
+        let server = MockServer::start().await;
+        respond_with(&server, status(StatusCode::NOT_FOUND)).await;
+
+        let cancel = CancellationToken::new();
+        let ingestion_service = test_ingestion(server.uri(), 1, 1, cancel.clone()).await;
+
+        let err = ingestion_service.run(0..).await.unwrap_err();
+        assert!(matches!(err, Error::NoSubscribers));
+    }
+
+    /// The subscriber has no effective limit, and the mock server will always return checkpoint
+    /// information, but the ingestion service can still be stopped using the cancellation token.
+    #[tokio::test]
+    async fn shutdown_on_cancel() {
+        telemetry_subscribers::init_for_testing();
+
+        let server = MockServer::start().await;
+        respond_with(
+            &server,
+            status(StatusCode::OK).set_body_bytes(test_checkpoint_data(42)),
+        )
+        .await;
+
+        let cancel = CancellationToken::new();
+        let mut ingestion_service = test_ingestion(server.uri(), 1, 1, cancel.clone()).await;
+
+        let (rx, _) = ingestion_service.subscribe();
+        let subscriber = test_subscriber(usize::MAX, rx, cancel.clone()).await;
+        let (regulator, broadcaster) = ingestion_service.run(0..).await.unwrap();
+
+        cancel.cancel();
+        subscriber.await.unwrap();
+        regulator.await.unwrap();
+        broadcaster.await.unwrap();
+    }
+
+    /// The subscriber will stop after receiving a single checkpoint, and this will trigger the
+    /// ingestion service to stop as well, even if there are more checkpoints to fetch.
+    #[tokio::test]
+    async fn shutdown_on_subscriber_drop() {
+        telemetry_subscribers::init_for_testing();
+
+        let server = MockServer::start().await;
+        respond_with(
+            &server,
+            status(StatusCode::OK).set_body_bytes(test_checkpoint_data(42)),
+        )
+        .await;
+
+        let cancel = CancellationToken::new();
+        let mut ingestion_service = test_ingestion(server.uri(), 1, 1, cancel.clone()).await;
+
+        let (rx, _) = ingestion_service.subscribe();
+        let subscriber = test_subscriber(1, rx, cancel.clone()).await;
+        let (regulator, broadcaster) = ingestion_service.run(0..).await.unwrap();
+
+        cancel.cancelled().await;
+        subscriber.await.unwrap();
+        regulator.await.unwrap();
+        broadcaster.await.unwrap();
+    }
+
+    /// If fetching the checkpoint throws an unexpected error, the whole pipeline will be shut
+    /// down.
+    #[tokio::test]
+    async fn shutdown_on_unexpected_error() {
+        telemetry_subscribers::init_for_testing();
+
+        let server = MockServer::start().await;
+        respond_with(&server, status(StatusCode::IM_A_TEAPOT)).await;
+
+        let cancel = CancellationToken::new();
+        let mut ingestion_service = test_ingestion(server.uri(), 1, 1, cancel.clone()).await;
+
+        let (rx, _) = ingestion_service.subscribe();
+        let subscriber = test_subscriber(usize::MAX, rx, cancel.clone()).await;
+        let (regulator, broadcaster) = ingestion_service.run(0..).await.unwrap();
+
+        cancel.cancelled().await;
+        subscriber.await.unwrap();
+        regulator.await.unwrap();
+        broadcaster.await.unwrap();
+    }
+
+    /// The service will retry fetching a checkpoint that does not exist, in this test, the 4th
+    /// checkpoint will return 404 a couple of times, before eventually succeeding.
+    #[tokio::test]
+    async fn retry_on_not_found() {
+        telemetry_subscribers::init_for_testing();
+
+        let server = MockServer::start().await;
+        let times: Mutex = Mutex::new(0);
+        respond_with(&server, move |_: &Request| {
+            let mut times = times.lock().unwrap();
+            *times += 1;
+            match *times {
+                1..4 => status(StatusCode::OK).set_body_bytes(test_checkpoint_data(*times)),
+                4..6 => status(StatusCode::NOT_FOUND),
+                _ => status(StatusCode::OK).set_body_bytes(test_checkpoint_data(*times)),
+            }
+        })
+        .await;
+
+        let cancel = CancellationToken::new();
+        let mut ingestion_service = test_ingestion(server.uri(), 1, 1, cancel.clone()).await;
+
+        let (rx, _) = ingestion_service.subscribe();
+        let subscriber = test_subscriber(5, rx, cancel.clone()).await;
+        let (regulator, broadcaster) = ingestion_service.run(0..).await.unwrap();
+
+        cancel.cancelled().await;
+        let seqs = subscriber.await.unwrap();
+        regulator.await.unwrap();
+        broadcaster.await.unwrap();
+
+        assert_eq!(seqs, vec![1, 2, 3, 6, 7]);
+    }
+
+    /// Similar to the previous test, but now it's a transient error that causes the retry.
+    #[tokio::test]
+    async fn retry_on_transient_error() {
+        telemetry_subscribers::init_for_testing();
+
+        let server = MockServer::start().await;
+        let times: Mutex = Mutex::new(0);
+        respond_with(&server, move |_: &Request| {
+            let mut times = times.lock().unwrap();
+            *times += 1;
+            match *times {
+                1..4 => status(StatusCode::OK).set_body_bytes(test_checkpoint_data(*times)),
+                4..6 => status(StatusCode::REQUEST_TIMEOUT),
+                _ => status(StatusCode::OK).set_body_bytes(test_checkpoint_data(*times)),
+            }
+        })
+        .await;
+
+        let cancel = CancellationToken::new();
+        let mut ingestion_service = test_ingestion(server.uri(), 1, 1, cancel.clone()).await;
+
+        let (rx, _) = ingestion_service.subscribe();
+        let subscriber = test_subscriber(5, rx, cancel.clone()).await;
+        let (regulator, broadcaster) = ingestion_service.run(0..).await.unwrap();
+
+        cancel.cancelled().await;
+        let seqs = subscriber.await.unwrap();
+        regulator.await.unwrap();
+        broadcaster.await.unwrap();
+
+        assert_eq!(seqs, vec![1, 2, 3, 6, 7]);
+    }
+
+    /// One subscriber is going to stop processing checkpoints, so even though the service can keep
+    /// fetching checkpoints, it will stop short because of the slow receiver. Other subscribers
+    /// can keep processing checkpoints that were buffered for the slow one.
+    #[tokio::test]
+    async fn back_pressure_and_buffering() {
+        telemetry_subscribers::init_for_testing();
+
+        let server = MockServer::start().await;
+        let times: Mutex = Mutex::new(0);
+        respond_with(&server, move |_: &Request| {
+            let mut times = times.lock().unwrap();
+            *times += 1;
+            status(StatusCode::OK).set_body_bytes(test_checkpoint_data(*times))
+        })
+        .await;
+
+        let cancel = CancellationToken::new();
+        let mut ingestion_service =
+            test_ingestion(server.uri(), /* buffer */ 3, 1, cancel.clone()).await;
+
+        // This subscriber will take its sweet time processing checkpoints.
+        let (mut laggard, _) = ingestion_service.subscribe();
+        async fn unblock(laggard: &mut mpsc::Receiver>) -> u64 {
+            let checkpoint = laggard.recv().await.unwrap();
+            checkpoint.checkpoint_summary.sequence_number
+        }
+
+        let (rx, _) = ingestion_service.subscribe();
+        let subscriber = test_subscriber(5, rx, cancel.clone()).await;
+        let (regulator, broadcaster) = ingestion_service.run(0..).await.unwrap();
+
+        // At this point, the service will have been able to pass 3 checkpoints to the non-lagging
+        // subscriber, while the laggard's buffer fills up. Now the laggard will pull two
+        // checkpoints, which will allow the rest of the pipeline to progress enough for the live
+        // subscriber to receive its quota.
+        assert_eq!(unblock(&mut laggard).await, 1);
+        assert_eq!(unblock(&mut laggard).await, 2);
+
+        cancel.cancelled().await;
+        let seqs = subscriber.await.unwrap();
+        regulator.await.unwrap();
+        broadcaster.await.unwrap();
+
+        assert_eq!(seqs, vec![1, 2, 3, 4, 5]);
+    }
+}
diff --git a/crates/sui-indexer-alt/src/ingestion/regulator.rs b/crates/sui-indexer-alt/src/ingestion/regulator.rs
new file mode 100644
index 0000000000000..de2e6e738a506
--- /dev/null
+++ b/crates/sui-indexer-alt/src/ingestion/regulator.rs
@@ -0,0 +1,257 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::collections::HashMap;
+
+use mysten_metrics::spawn_monitored_task;
+use tokio::{sync::mpsc, task::JoinHandle};
+use tokio_util::sync::CancellationToken;
+use tracing::info;
+
+/// The regulator task is responsible for writing out checkpoint sequence numbers from the
+/// `checkpoints` iterator to `checkpoint_tx`, bounded by the high watermark dictated by
+/// subscribers.
+///
+/// Subscribers can share their high watermarks on `ingest_hi_rx`. The regulator remembers these,
+/// and stops serving checkpoints if they are over the minimum subscriber watermark plus the
+/// ingestion `buffer_size`.
+///
+/// This offers a form of back-pressure that is sensitive to ordering, which is useful for
+/// subscribers that need to commit information in order: Without it, those subscribers may need to
+/// buffer unboundedly many updates from checkpoints while they wait for the checkpoint that they
+/// need to commit.
+///
+/// Note that back-pressure is optional, and will only be applied if a subscriber provides a
+/// watermark, at which point it must keep updating the watermark to allow the ingestion service to
+/// continue making progress.
+///
+/// The task will shut down if the `cancel` token is signalled, or if the `checkpoints` iterator
+/// runs out.
+pub(super) fn regulator(
+    checkpoints: I,
+    buffer_size: usize,
+    mut ingest_hi_rx: mpsc::UnboundedReceiver<(&'static str, u64)>,
+    checkpoint_tx: mpsc::Sender,
+    cancel: CancellationToken,
+) -> JoinHandle<()>
+where
+    I: IntoIterator + Send + Sync + 'static,
+    I::IntoIter: Send + Sync + 'static,
+{
+    spawn_monitored_task!(async move {
+        let mut ingest_hi = None;
+        let mut subscribers_hi = HashMap::new();
+        let mut checkpoints = checkpoints.into_iter().peekable();
+
+        info!("Starting ingestion regulator");
+
+        loop {
+            let Some(cp) = checkpoints.peek() else {
+                info!("Checkpoints done, stopping regulator");
+                break;
+            };
+
+            tokio::select! {
+                _ = cancel.cancelled() => {
+                    info!("Shutdown received, stopping regulator");
+                    break;
+                }
+
+                Some((name, hi)) = ingest_hi_rx.recv() => {
+                    subscribers_hi.insert(name, hi);
+                    ingest_hi = subscribers_hi.values().copied().min().map(|hi| hi + buffer_size as u64);
+                }
+
+                res = checkpoint_tx.send(*cp), if ingest_hi.map_or(true, |hi| *cp <= hi) => if res.is_ok() {
+                    checkpoints.next();
+                } else {
+                    info!("Checkpoint channel closed, stopping regulator");
+                    break;
+                }
+            }
+        }
+    })
+}
+
+#[cfg(test)]
+mod tests {
+    use std::time::Duration;
+
+    use tokio::time::{error::Elapsed, timeout};
+
+    use super::*;
+
+    /// Wait up to a second for a response on the channel, and return it, expecting this operation
+    /// to succeed.
+    async fn expect_recv(rx: &mut mpsc::Receiver) -> Option {
+        timeout(Duration::from_secs(1), rx.recv()).await.unwrap()
+    }
+
+    /// Wait up to a second for a response on the channel, but expecting this operation to timeout.
+    async fn expect_timeout(rx: &mut mpsc::Receiver) -> Elapsed {
+        timeout(Duration::from_secs(1), rx.recv())
+            .await
+            .unwrap_err()
+    }
+
+    #[tokio::test]
+    async fn finite_list_of_checkpoints() {
+        let (_, hi_rx) = mpsc::unbounded_channel();
+        let (cp_tx, mut cp_rx) = mpsc::channel(1);
+        let cancel = CancellationToken::new();
+
+        let cps = 0..5;
+        let h_regulator = regulator(cps, 0, hi_rx, cp_tx, cancel.clone());
+
+        for i in 0..5 {
+            assert_eq!(Some(i), expect_recv(&mut cp_rx).await);
+        }
+
+        h_regulator.await.unwrap();
+    }
+
+    #[tokio::test]
+    async fn shutdown_on_sender_closed() {
+        let (_, hi_rx) = mpsc::unbounded_channel();
+        let (cp_tx, mut cp_rx) = mpsc::channel(1);
+        let cancel = CancellationToken::new();
+
+        let h_regulator = regulator(0.., 0, hi_rx, cp_tx, cancel.clone());
+
+        for i in 0..5 {
+            assert_eq!(Some(i), expect_recv(&mut cp_rx).await);
+        }
+
+        drop(cp_rx);
+        h_regulator.await.unwrap();
+    }
+
+    #[tokio::test]
+    async fn shutdown_on_cancel() {
+        let (_, hi_rx) = mpsc::unbounded_channel();
+        let (cp_tx, mut cp_rx) = mpsc::channel(1);
+        let cancel = CancellationToken::new();
+
+        let h_regulator = regulator(0.., 0, hi_rx, cp_tx, cancel.clone());
+
+        for i in 0..5 {
+            assert_eq!(Some(i), expect_recv(&mut cp_rx).await);
+        }
+
+        cancel.cancel();
+        h_regulator.await.unwrap();
+    }
+
+    #[tokio::test]
+    async fn halted() {
+        let (hi_tx, hi_rx) = mpsc::unbounded_channel();
+        let (cp_tx, mut cp_rx) = mpsc::channel(1);
+        let cancel = CancellationToken::new();
+
+        hi_tx.send(("test", 4)).unwrap();
+
+        let h_regulator = regulator(0.., 0, hi_rx, cp_tx, cancel.clone());
+
+        for _ in 0..=4 {
+            expect_recv(&mut cp_rx).await;
+        }
+
+        // Regulator stopped because of watermark.
+        expect_timeout(&mut cp_rx).await;
+
+        cancel.cancel();
+        h_regulator.await.unwrap();
+    }
+
+    #[tokio::test]
+    async fn halted_buffered() {
+        let (hi_tx, hi_rx) = mpsc::unbounded_channel();
+        let (cp_tx, mut cp_rx) = mpsc::channel(1);
+        let cancel = CancellationToken::new();
+
+        hi_tx.send(("test", 2)).unwrap();
+
+        let h_regulator = regulator(0.., 2, hi_rx, cp_tx, cancel.clone());
+
+        for i in 0..=4 {
+            assert_eq!(Some(i), expect_recv(&mut cp_rx).await);
+        }
+
+        // Regulator stopped because of watermark (plus buffering).
+        expect_timeout(&mut cp_rx).await;
+
+        cancel.cancel();
+        h_regulator.await.unwrap();
+    }
+
+    #[tokio::test]
+    async fn resumption() {
+        let (hi_tx, hi_rx) = mpsc::unbounded_channel();
+        let (cp_tx, mut cp_rx) = mpsc::channel(1);
+        let cancel = CancellationToken::new();
+
+        hi_tx.send(("test", 2)).unwrap();
+
+        let h_regulator = regulator(0.., 0, hi_rx, cp_tx, cancel.clone());
+
+        for i in 0..=2 {
+            assert_eq!(Some(i), expect_recv(&mut cp_rx).await);
+        }
+
+        // Regulator stopped because of watermark, but resumes when that watermark is updated.
+        expect_timeout(&mut cp_rx).await;
+        hi_tx.send(("test", 4)).unwrap();
+
+        for i in 3..=4 {
+            assert_eq!(Some(i), expect_recv(&mut cp_rx).await);
+        }
+
+        // Halted again.
+        expect_timeout(&mut cp_rx).await;
+
+        cancel.cancel();
+        h_regulator.await.unwrap();
+    }
+
+    #[tokio::test]
+    async fn multiple_subscribers() {
+        let (hi_tx, hi_rx) = mpsc::unbounded_channel();
+        let (cp_tx, mut cp_rx) = mpsc::channel(1);
+        let cancel = CancellationToken::new();
+
+        hi_tx.send(("a", 2)).unwrap();
+        hi_tx.send(("b", 3)).unwrap();
+
+        let cps = 0..10;
+        let h_regulator = regulator(cps, 0, hi_rx, cp_tx, cancel.clone());
+
+        for i in 0..=2 {
+            assert_eq!(Some(i), expect_recv(&mut cp_rx).await);
+        }
+
+        // Watermark stopped because of a's watermark.
+        expect_timeout(&mut cp_rx).await;
+
+        // Updating b's watermark doesn't make a difference.
+        hi_tx.send(("b", 4)).unwrap();
+        expect_timeout(&mut cp_rx).await;
+
+        // But updating a's watermark does.
+        hi_tx.send(("a", 3)).unwrap();
+        assert_eq!(Some(3), expect_recv(&mut cp_rx).await);
+
+        // ...by one checkpoint.
+        expect_timeout(&mut cp_rx).await;
+
+        // And we can make more progress by updating it again.
+        hi_tx.send(("a", 4)).unwrap();
+        assert_eq!(Some(4), expect_recv(&mut cp_rx).await);
+
+        // But another update to "a" will now not make a difference, because "b" is still behind.
+        hi_tx.send(("a", 5)).unwrap();
+        expect_timeout(&mut cp_rx).await;
+
+        cancel.cancel();
+        h_regulator.await.unwrap();
+    }
+}
diff --git a/crates/sui-indexer-alt/src/ingestion/remote_client.rs b/crates/sui-indexer-alt/src/ingestion/remote_client.rs
new file mode 100644
index 0000000000000..c4f91fee57990
--- /dev/null
+++ b/crates/sui-indexer-alt/src/ingestion/remote_client.rs
@@ -0,0 +1,292 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use crate::ingestion::client::{FetchError, FetchResult, IngestionClientTrait};
+use crate::ingestion::Result as IngestionResult;
+use reqwest::{Client, StatusCode};
+use tracing::{debug, error};
+use url::Url;
+
+#[derive(thiserror::Error, Debug, Eq, PartialEq)]
+pub enum HttpError {
+    #[error("HTTP error with status code: {0}")]
+    Http(StatusCode),
+}
+
+fn status_code_to_error(code: StatusCode) -> anyhow::Error {
+    HttpError::Http(code).into()
+}
+
+pub(crate) struct RemoteIngestionClient {
+    url: Url,
+    client: Client,
+}
+
+impl RemoteIngestionClient {
+    pub(crate) fn new(url: Url) -> IngestionResult {
+        Ok(Self {
+            url,
+            client: Client::builder().build()?,
+        })
+    }
+}
+
+#[async_trait::async_trait]
+impl IngestionClientTrait for RemoteIngestionClient {
+    /// Fetch a checkpoint from the remote store.
+    ///
+    /// Transient errors include:
+    ///
+    /// - failures to issue a request, (network errors, redirect issues, etc)
+    /// - request timeouts,
+    /// - rate limiting,
+    /// - server errors (5xx),
+    /// - issues getting a full response.
+    async fn fetch(&self, checkpoint: u64) -> FetchResult {
+        // SAFETY: The path being joined is statically known to be valid.
+        let url = self
+            .url
+            .join(&format!("/{checkpoint}.chk"))
+            .expect("Unexpected invalid URL");
+
+        let response = self
+            .client
+            .get(url)
+            .send()
+            .await
+            .map_err(|e| FetchError::Transient {
+                reason: "request",
+                error: e.into(),
+            })?;
+
+        match response.status() {
+            code if code.is_success() => {
+                // Failure to extract all the bytes from the payload, or to deserialize the
+                // checkpoint from them is considered a transient error -- the store being
+                // fetched from needs to be corrected, and ingestion will keep retrying it
+                // until it is.
+                response.bytes().await.map_err(|e| FetchError::Transient {
+                    reason: "bytes",
+                    error: e.into(),
+                })
+            }
+
+            // Treat 404s as a special case so we can match on this error type.
+            code @ StatusCode::NOT_FOUND => {
+                debug!(checkpoint, %code, "Checkpoint not found");
+                Err(FetchError::NotFound)
+            }
+
+            // Timeouts are a client error but they are usually transient.
+            code @ StatusCode::REQUEST_TIMEOUT => Err(FetchError::Transient {
+                reason: "timeout",
+                error: status_code_to_error(code),
+            }),
+
+            // Rate limiting is also a client error, but the backoff will eventually widen the
+            // interval appropriately.
+            code @ StatusCode::TOO_MANY_REQUESTS => Err(FetchError::Transient {
+                reason: "too_many_requests",
+                error: status_code_to_error(code),
+            }),
+
+            // Assume that if the server is facing difficulties, it will recover eventually.
+            code if code.is_server_error() => Err(FetchError::Transient {
+                reason: "server_error",
+                error: status_code_to_error(code),
+            }),
+
+            // For everything else, assume it's a permanent error and don't retry.
+            code => {
+                error!(checkpoint, %code, "Permanent error, giving up!");
+                Err(FetchError::Permanent(status_code_to_error(code)))
+            }
+        }
+    }
+}
+
+#[cfg(test)]
+pub(crate) mod tests {
+    use super::*;
+    use crate::ingestion::client::IngestionClient;
+    use crate::ingestion::error::Error;
+    use crate::ingestion::test_utils::test_checkpoint_data;
+    use crate::metrics::tests::test_metrics;
+    use axum::http::StatusCode;
+    use std::sync::{Arc, Mutex};
+    use tokio_util::sync::CancellationToken;
+    use wiremock::{
+        matchers::{method, path_regex},
+        Mock, MockServer, Request, Respond, ResponseTemplate,
+    };
+
+    pub(crate) async fn respond_with(server: &MockServer, response: impl Respond + 'static) {
+        Mock::given(method("GET"))
+            .and(path_regex(r"/\d+.chk"))
+            .respond_with(response)
+            .mount(server)
+            .await;
+    }
+
+    pub(crate) fn status(code: StatusCode) -> ResponseTemplate {
+        ResponseTemplate::new(code.as_u16())
+    }
+
+    fn remote_test_client(uri: String) -> IngestionClient {
+        IngestionClient::new_remote(Url::parse(&uri).unwrap(), Arc::new(test_metrics())).unwrap()
+    }
+
+    fn assert_http_error(error: Error, checkpoint: u64, code: StatusCode) {
+        let Error::FetchError(c, inner) = error else {
+            panic!("Expected FetchError, got: {:?}", error);
+        };
+        assert_eq!(c, checkpoint);
+        let Some(http_error) = inner.downcast_ref::() else {
+            panic!("Expected HttpError, got: {:?}", inner);
+        };
+        assert_eq!(http_error, &HttpError::Http(code));
+    }
+
+    #[tokio::test]
+    async fn fail_on_not_found() {
+        let server = MockServer::start().await;
+        respond_with(&server, status(StatusCode::NOT_FOUND)).await;
+
+        let client = remote_test_client(server.uri());
+        let error = client
+            .fetch(42, &CancellationToken::new())
+            .await
+            .unwrap_err();
+
+        assert!(matches!(error, Error::NotFound(42)));
+    }
+
+    #[tokio::test]
+    async fn fail_on_client_error() {
+        let server = MockServer::start().await;
+        respond_with(&server, status(StatusCode::IM_A_TEAPOT)).await;
+
+        let client = remote_test_client(server.uri());
+        let error = client
+            .fetch(42, &CancellationToken::new())
+            .await
+            .unwrap_err();
+
+        assert_http_error(error, 42, StatusCode::IM_A_TEAPOT);
+    }
+
+    /// Even if the server is repeatedly returning transient errors, it is possible to cancel the
+    /// fetch request via its cancellation token.
+    #[tokio::test]
+    async fn fail_on_cancel() {
+        let cancel = CancellationToken::new();
+        let server = MockServer::start().await;
+
+        // This mock server repeatedly returns internal server errors, but will also send a
+        // cancellation with the second request (this is a bit of a contrived test set-up).
+        let times: Mutex = Mutex::new(0);
+        let server_cancel = cancel.clone();
+        respond_with(&server, move |_: &Request| {
+            let mut times = times.lock().unwrap();
+            *times += 1;
+
+            if *times > 2 {
+                server_cancel.cancel();
+            }
+
+            status(StatusCode::INTERNAL_SERVER_ERROR)
+        })
+        .await;
+
+        let client = remote_test_client(server.uri());
+        let error = client.fetch(42, &cancel.clone()).await.unwrap_err();
+
+        assert!(matches!(error, Error::Cancelled));
+    }
+
+    /// Assume that failures to send the request to the remote store are due to temporary
+    /// connectivity issues, and retry them.
+    #[tokio::test]
+    async fn retry_on_request_error() {
+        let server = MockServer::start().await;
+
+        let times: Mutex = Mutex::new(0);
+        respond_with(&server, move |r: &Request| {
+            let mut times = times.lock().unwrap();
+            *times += 1;
+            match (*times, r.url.path()) {
+                // The first request will trigger a redirect to 0.chk no matter what the original
+                // request was for -- triggering a request error.
+                (1, _) => status(StatusCode::MOVED_PERMANENTLY).append_header("Location", "/0.chk"),
+
+                // Set-up checkpoint 0 as an infinite redirect loop.
+                (_, "/0.chk") => {
+                    status(StatusCode::MOVED_PERMANENTLY).append_header("Location", r.url.as_str())
+                }
+
+                // Subsequently, requests will fail with a permanent error, this is what we expect
+                // to see.
+                _ => status(StatusCode::IM_A_TEAPOT),
+            }
+        })
+        .await;
+
+        let client = remote_test_client(server.uri());
+        let error = client
+            .fetch(42, &CancellationToken::new())
+            .await
+            .unwrap_err();
+
+        assert_http_error(error, 42, StatusCode::IM_A_TEAPOT);
+    }
+
+    /// Assume that certain errors will recover by themselves, and keep retrying with an
+    /// exponential back-off. These errors include: 5xx (server) errors, 408 (timeout), and 429
+    /// (rate limiting).
+    #[tokio::test]
+    async fn retry_on_transient_server_error() {
+        let server = MockServer::start().await;
+        let times: Mutex = Mutex::new(0);
+        respond_with(&server, move |_: &Request| {
+            let mut times = times.lock().unwrap();
+            *times += 1;
+            status(match *times {
+                1 => StatusCode::INTERNAL_SERVER_ERROR,
+                2 => StatusCode::REQUEST_TIMEOUT,
+                3 => StatusCode::TOO_MANY_REQUESTS,
+                _ => StatusCode::IM_A_TEAPOT,
+            })
+        })
+        .await;
+
+        let client = remote_test_client(server.uri());
+        let error = client
+            .fetch(42, &CancellationToken::new())
+            .await
+            .unwrap_err();
+
+        assert_http_error(error, 42, StatusCode::IM_A_TEAPOT);
+    }
+
+    /// Treat deserialization failure as another kind of transient error -- all checkpoint data
+    /// that is fetched should be valid (deserializable as a `CheckpointData`).
+    #[tokio::test]
+    async fn retry_on_deserialization_error() {
+        let server = MockServer::start().await;
+        let times: Mutex = Mutex::new(0);
+        respond_with(&server, move |_: &Request| {
+            let mut times = times.lock().unwrap();
+            *times += 1;
+            if *times < 3 {
+                status(StatusCode::OK).set_body_bytes(vec![])
+            } else {
+                status(StatusCode::OK).set_body_bytes(test_checkpoint_data(42))
+            }
+        })
+        .await;
+
+        let client = remote_test_client(server.uri());
+        let checkpoint = client.fetch(42, &CancellationToken::new()).await.unwrap();
+        assert_eq!(42, checkpoint.checkpoint_summary.sequence_number)
+    }
+}
diff --git a/crates/sui-indexer-alt/src/ingestion/test_utils.rs b/crates/sui-indexer-alt/src/ingestion/test_utils.rs
new file mode 100644
index 0000000000000..99f130927d0bf
--- /dev/null
+++ b/crates/sui-indexer-alt/src/ingestion/test_utils.rs
@@ -0,0 +1,56 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use rand::prelude::StdRng;
+use rand::SeedableRng;
+use sui_storage::blob::{Blob, BlobEncoding};
+use sui_types::crypto::KeypairTraits;
+use sui_types::full_checkpoint_content::CheckpointData;
+use sui_types::gas::GasCostSummary;
+use sui_types::messages_checkpoint::{
+    CertifiedCheckpointSummary, CheckpointContents, CheckpointSummary, SignedCheckpointSummary,
+};
+use sui_types::supported_protocol_versions::ProtocolConfig;
+use sui_types::utils::make_committee_key;
+
+const RNG_SEED: [u8; 32] = [
+    21, 23, 199, 200, 234, 250, 252, 178, 94, 15, 202, 178, 62, 186, 88, 137, 233, 192, 130, 157,
+    179, 179, 65, 9, 31, 249, 221, 123, 225, 112, 199, 247,
+];
+
+pub(crate) fn test_checkpoint_data(cp: u64) -> Vec {
+    let mut rng = StdRng::from_seed(RNG_SEED);
+    let (keys, committee) = make_committee_key(&mut rng);
+    let contents = CheckpointContents::new_with_digests_only_for_tests(vec![]);
+    let summary = CheckpointSummary::new(
+        &ProtocolConfig::get_for_max_version_UNSAFE(),
+        0,
+        cp,
+        0,
+        &contents,
+        None,
+        GasCostSummary::default(),
+        None,
+        0,
+        Vec::new(),
+    );
+
+    let sign_infos: Vec<_> = keys
+        .iter()
+        .map(|k| {
+            let name = k.public().into();
+            SignedCheckpointSummary::sign(committee.epoch, &summary, k, name)
+        })
+        .collect();
+
+    let checkpoint_data = CheckpointData {
+        checkpoint_summary: CertifiedCheckpointSummary::new(summary, sign_infos, &committee)
+            .unwrap(),
+        checkpoint_contents: contents,
+        transactions: vec![],
+    };
+
+    Blob::encode(&checkpoint_data, BlobEncoding::Bcs)
+        .unwrap()
+        .to_bytes()
+}
diff --git a/crates/sui-indexer-alt/src/lib.rs b/crates/sui-indexer-alt/src/lib.rs
new file mode 100644
index 0000000000000..fdfe8057966dd
--- /dev/null
+++ b/crates/sui-indexer-alt/src/lib.rs
@@ -0,0 +1,274 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::{collections::BTreeSet, net::SocketAddr, sync::Arc};
+
+use anyhow::{Context, Result};
+use db::{Db, DbConfig};
+use ingestion::{IngestionConfig, IngestionService};
+use metrics::{IndexerMetrics, MetricsService};
+use models::watermarks::CommitterWatermark;
+use pipeline::{concurrent, sequential, PipelineConfig, Processor};
+use task::graceful_shutdown;
+use tokio::task::JoinHandle;
+use tokio_util::sync::CancellationToken;
+use tracing::info;
+
+pub mod args;
+pub mod db;
+pub mod handlers;
+pub mod ingestion;
+pub mod metrics;
+pub mod models;
+pub mod pipeline;
+pub mod schema;
+pub mod task;
+
+pub struct Indexer {
+    /// Connection pool to the database.
+    db: Db,
+
+    /// Prometheus Metrics.
+    metrics: Arc,
+
+    /// Service for serving Prometheis metrics.
+    metrics_service: MetricsService,
+
+    /// Service for downloading and disseminating checkpoint data.
+    ingestion_service: IngestionService,
+
+    /// Parameters for the committers of each pipeline.
+    pipeline_config: PipelineConfig,
+
+    /// Optional override of the checkpoint lowerbound.
+    first_checkpoint: Option,
+
+    /// Optional override of the checkpoint upperbound.
+    last_checkpoint: Option,
+
+    /// Optional override of enabled pipelines.
+    enabled_pipelines: BTreeSet,
+
+    /// Cancellation token shared among all continuous tasks in the service.
+    cancel: CancellationToken,
+
+    /// The checkpoint lowerbound derived from watermarks of pipelines added to the indexer. When
+    /// the indexer runs, it will start from this point, unless this has been overridden by
+    /// [Self::first_checkpoint].
+    first_checkpoint_from_watermark: u64,
+
+    /// The handles for every task spawned by this indexer, used to manage graceful shutdown.
+    handles: Vec>,
+}
+
+#[derive(clap::Args, Debug, Clone)]
+pub struct IndexerConfig {
+    #[command(flatten)]
+    pub ingestion_config: IngestionConfig,
+
+    #[command(flatten)]
+    pub pipeline_config: PipelineConfig,
+
+    /// Override for the checkpoint to start ingestion from -- useful for backfills. By default,
+    /// ingestion will start just after the lowest checkpoint watermark across all active
+    /// pipelines.
+    #[arg(long)]
+    first_checkpoint: Option,
+
+    /// Override for the checkpoint to end ingestion at (inclusive) -- useful for backfills. By
+    /// default, ingestion will not stop, and will continue to poll for new checkpoints.
+    #[arg(long)]
+    last_checkpoint: Option,
+
+    /// Only run the following pipelines -- useful for backfills. If not provided, all pipelines
+    /// will be run.
+    #[arg(long, action = clap::ArgAction::Append)]
+    pipeline: Vec,
+
+    /// Address to serve Prometheus Metrics from.
+    #[arg(long, default_value = "0.0.0.0:9184")]
+    pub metrics_address: SocketAddr,
+}
+
+impl Indexer {
+    pub async fn new(
+        db_config: DbConfig,
+        indexer_config: IndexerConfig,
+        cancel: CancellationToken,
+    ) -> Result {
+        let IndexerConfig {
+            ingestion_config,
+            pipeline_config,
+            first_checkpoint,
+            last_checkpoint,
+            pipeline,
+            metrics_address,
+        } = indexer_config;
+
+        let db = Db::new(db_config)
+            .await
+            .context("Failed to connect to database")?;
+
+        // At indexer initialization, we ensure that the DB schema is up-to-date.
+        db.run_migrations()
+            .await
+            .context("Failed to run pending migrations")?;
+
+        let (metrics, metrics_service) =
+            MetricsService::new(metrics_address, db.clone(), cancel.clone())?;
+        let ingestion_service =
+            IngestionService::new(ingestion_config, metrics.clone(), cancel.clone())?;
+
+        Ok(Self {
+            db,
+            metrics,
+            metrics_service,
+            ingestion_service,
+            pipeline_config,
+            first_checkpoint,
+            last_checkpoint,
+            enabled_pipelines: pipeline.into_iter().collect(),
+            cancel,
+            first_checkpoint_from_watermark: u64::MAX,
+            handles: vec![],
+        })
+    }
+
+    /// Adds a new pipeline to this indexer and starts it up. Although their tasks have started,
+    /// they will be idle until the ingestion service starts, and serves it checkpoint data.
+    ///
+    /// Concurrent pipelines commit checkpoint data out-of-order to maximise throughput, and they
+    /// keep the watermark table up-to-date with the highest point they can guarantee all data
+    /// exists for, for their pipeline.
+    pub async fn concurrent_pipeline(&mut self) -> Result<()> {
+        let Some(watermark) = self.add_pipeline::().await? else {
+            return Ok(());
+        };
+
+        let (processor, collector, committer, watermark) = concurrent::pipeline::(
+            watermark,
+            self.pipeline_config.clone(),
+            self.db.clone(),
+            self.ingestion_service.subscribe().0,
+            self.metrics.clone(),
+            self.cancel.clone(),
+        );
+
+        self.handles.push(processor);
+        self.handles.push(collector);
+        self.handles.push(committer);
+        self.handles.push(watermark);
+
+        Ok(())
+    }
+
+    /// Adds a new pipeline to this indexer and starts it up. Although their tasks have started,
+    /// they will be idle until the ingestion service starts, and serves it checkpoint data.
+    ///
+    /// Sequential pipelines commit checkpoint data in-order which sacrifices throughput, but may
+    /// be required to handle pipelines that modify data in-place (where each update is not an
+    /// insert, but could be a modification of an existing row, where ordering between updates is
+    /// important).
+    ///
+    /// The pipeline can optionally be configured to lag behind the ingestion service by a fixed
+    /// number of checkpoints (configured by `checkpoint_lag`).
+    pub async fn sequential_pipeline(
+        &mut self,
+        checkpoint_lag: Option,
+    ) -> Result<()> {
+        let Some(watermark) = self.add_pipeline::().await? else {
+            return Ok(());
+        };
+
+        let (checkpoint_rx, watermark_tx) = self.ingestion_service.subscribe();
+
+        let (processor, committer) = sequential::pipeline::(
+            watermark,
+            self.pipeline_config.clone(),
+            checkpoint_lag,
+            self.db.clone(),
+            checkpoint_rx,
+            watermark_tx,
+            self.metrics.clone(),
+            self.cancel.clone(),
+        );
+
+        self.handles.push(processor);
+        self.handles.push(committer);
+
+        Ok(())
+    }
+
+    /// Start ingesting checkpoints. Ingestion either starts from the configured
+    /// `first_checkpoint`, or it is calculated based on the watermarks of all active pipelines.
+    /// Ingestion will stop after consuming the configured `last_checkpoint`, if one is provided,
+    /// or will continue until it tracks the tip of the network.
+    pub async fn run(mut self) -> Result> {
+        let metrics_handle = self
+            .metrics_service
+            .run()
+            .await
+            .context("Failed to start metrics service")?;
+
+        // If an override has been provided, start ingestion from there, otherwise start ingestion
+        // from just after the lowest committer watermark across all enabled pipelines.
+        let first_checkpoint = self
+            .first_checkpoint
+            .unwrap_or(self.first_checkpoint_from_watermark);
+
+        let last_checkpoint = self.last_checkpoint.unwrap_or(u64::MAX);
+
+        info!(first_checkpoint, last_checkpoint = ?self.last_checkpoint, "Ingestion range");
+
+        let (regulator_handle, broadcaster_handle) = self
+            .ingestion_service
+            .run(first_checkpoint..=last_checkpoint)
+            .await
+            .context("Failed to start ingestion service")?;
+
+        self.handles.push(regulator_handle);
+        self.handles.push(broadcaster_handle);
+
+        let cancel = self.cancel.clone();
+        Ok(tokio::spawn(async move {
+            // Wait for the ingestion service and all its related tasks to wind down gracefully:
+            // If ingestion has been configured to only handle a specific range of checkpoints, we
+            // want to make sure that tasks are allowed to run to completion before shutting them
+            // down.
+            graceful_shutdown(self.handles, self.cancel).await;
+
+            info!("Indexing pipeline gracefully shut down");
+
+            // Pick off any stragglers (in this case, just the metrics service).
+            cancel.cancel();
+            metrics_handle.await.unwrap();
+        }))
+    }
+
+    /// Update the indexer's first checkpoint based on the watermark for the pipeline by adding for
+    /// handler `H` (as long as it's enabled). Returns `Ok(None)` if the pipeline is disabled,
+    /// `Ok(Some(None))` if the pipeline is enabled but its watermark is not found, and
+    /// `Ok(Some(Some(watermark)))` if the pipeline is enabled and the watermark is found.
+    async fn add_pipeline(
+        &mut self,
+    ) -> Result>>> {
+        if !self.enabled_pipelines.is_empty() && !self.enabled_pipelines.contains(P::NAME) {
+            info!("Skipping pipeline {}", P::NAME);
+            return Ok(None);
+        }
+
+        let mut conn = self.db.connect().await.context("Failed DB connection")?;
+
+        let watermark = CommitterWatermark::get(&mut conn, P::NAME)
+            .await
+            .with_context(|| format!("Failed to get watermark for {}", P::NAME))?;
+
+        // TODO(amnn): Test this (depends on supporting migrations and tempdb).
+        self.first_checkpoint_from_watermark = watermark
+            .as_ref()
+            .map_or(0, |w| w.checkpoint_hi_inclusive as u64 + 1)
+            .min(self.first_checkpoint_from_watermark);
+
+        Ok(Some(watermark))
+    }
+}
diff --git a/crates/sui-indexer-alt/src/main.rs b/crates/sui-indexer-alt/src/main.rs
new file mode 100644
index 0000000000000..fd8e30cc8bee5
--- /dev/null
+++ b/crates/sui-indexer-alt/src/main.rs
@@ -0,0 +1,63 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use anyhow::{Context, Result};
+use clap::Parser;
+use sui_indexer_alt::args::Command;
+use sui_indexer_alt::db::reset_database;
+use sui_indexer_alt::{
+    args::Args,
+    handlers::{
+        ev_emit_mod::EvEmitMod, ev_struct_inst::EvStructInst, kv_checkpoints::KvCheckpoints,
+        kv_objects::KvObjects, kv_transactions::KvTransactions, obj_versions::ObjVersions,
+        sum_coin_balances::SumCoinBalances, sum_obj_types::SumObjTypes,
+        tx_affected_objects::TxAffectedObjects, tx_balance_changes::TxBalanceChanges,
+        wal_coin_balances::WalCoinBalances, wal_obj_types::WalObjTypes,
+    },
+    Indexer,
+};
+use tokio_util::sync::CancellationToken;
+
+#[tokio::main]
+async fn main() -> Result<()> {
+    let args = Args::parse();
+
+    // Enable tracing, configured by environment variables.
+    let _guard = telemetry_subscribers::TelemetryConfig::new()
+        .with_env()
+        .init();
+
+    let cancel = CancellationToken::new();
+
+    match args.command {
+        Command::Indexer {
+            indexer,
+            consistent_range: lag,
+        } => {
+            let mut indexer = Indexer::new(args.db_config, indexer, cancel.clone()).await?;
+
+            indexer.concurrent_pipeline::().await?;
+            indexer.concurrent_pipeline::().await?;
+            indexer.concurrent_pipeline::().await?;
+            indexer.concurrent_pipeline::().await?;
+            indexer.concurrent_pipeline::().await?;
+            indexer.concurrent_pipeline::().await?;
+            indexer.concurrent_pipeline::().await?;
+            indexer.concurrent_pipeline::().await?;
+            indexer.concurrent_pipeline::().await?;
+            indexer.concurrent_pipeline::().await?;
+            indexer.sequential_pipeline::(lag).await?;
+            indexer.sequential_pipeline::(lag).await?;
+
+            let h_indexer = indexer.run().await.context("Failed to start indexer")?;
+
+            cancel.cancelled().await;
+            let _ = h_indexer.await;
+        }
+        Command::ResetDatabase { skip_migrations } => {
+            reset_database(args.db_config, skip_migrations).await?;
+        }
+    }
+
+    Ok(())
+}
diff --git a/crates/sui-indexer-alt/src/metrics.rs b/crates/sui-indexer-alt/src/metrics.rs
new file mode 100644
index 0000000000000..819298f4b915e
--- /dev/null
+++ b/crates/sui-indexer-alt/src/metrics.rs
@@ -0,0 +1,592 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::{net::SocketAddr, sync::Arc};
+
+use anyhow::Result;
+use axum::{extract::Extension, routing::get, Router};
+use mysten_metrics::RegistryService;
+use prometheus::{
+    core::{Collector, Desc},
+    proto::{Counter, Gauge, LabelPair, Metric, MetricFamily, MetricType, Summary},
+    register_histogram_vec_with_registry, register_histogram_with_registry,
+    register_int_counter_vec_with_registry, register_int_counter_with_registry,
+    register_int_gauge_vec_with_registry, Histogram, HistogramVec, IntCounter, IntCounterVec,
+    IntGaugeVec, Registry,
+};
+use tokio::{net::TcpListener, task::JoinHandle};
+use tokio_util::sync::CancellationToken;
+use tracing::{info, warn};
+
+use crate::{db::Db, ingestion::error::Error};
+
+/// Histogram buckets for the distribution of checkpoint fetching latencies.
+const INGESTION_LATENCY_SEC_BUCKETS: &[f64] = &[
+    0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0,
+];
+
+/// Histogram buckets for the distribution of latencies for processing a checkpoint in the indexer
+/// (without having to call out to other services).
+const PROCESSING_LATENCY_SEC_BUCKETS: &[f64] = &[
+    0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0,
+];
+
+/// Histogram buckets for the distribution of latencies for writing to the database.
+const DB_UPDATE_LATENCY_SEC_BUCKETS: &[f64] = &[
+    0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1000.0,
+    2000.0, 5000.0, 10000.0,
+];
+
+/// Histogram buckets for the distribution of batch sizes (number of rows) written to the database.
+const BATCH_SIZE_BUCKETS: &[f64] = &[
+    1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1000.0, 2000.0, 5000.0, 10000.0, 20000.0,
+];
+
+/// Service to expose prometheus metrics from the indexer.
+pub struct MetricsService {
+    addr: SocketAddr,
+    service: RegistryService,
+    cancel: CancellationToken,
+}
+
+#[derive(Clone)]
+pub struct IndexerMetrics {
+    // Statistics related to fetching data from the remote store.
+    pub total_ingested_checkpoints: IntCounter,
+    pub total_ingested_transactions: IntCounter,
+    pub total_ingested_events: IntCounter,
+    pub total_ingested_inputs: IntCounter,
+    pub total_ingested_outputs: IntCounter,
+    pub total_ingested_bytes: IntCounter,
+    pub total_ingested_transient_retries: IntCounterVec,
+    pub total_ingested_not_found_retries: IntCounter,
+
+    pub ingested_checkpoint_latency: Histogram,
+
+    // Statistics related to individual ingestion pipelines' handlers.
+    pub total_handler_checkpoints_received: IntCounterVec,
+    pub total_handler_checkpoints_processed: IntCounterVec,
+    pub total_handler_rows_created: IntCounterVec,
+
+    pub handler_checkpoint_latency: HistogramVec,
+
+    // Statistics related to individual ingestion pipelines' committers.
+    pub total_collector_rows_received: IntCounterVec,
+    pub total_collector_batches_created: IntCounterVec,
+    pub total_committer_batches_attempted: IntCounterVec,
+    pub total_committer_batches_succeeded: IntCounterVec,
+    pub total_committer_rows_committed: IntCounterVec,
+    pub total_committer_rows_affected: IntCounterVec,
+    pub total_watermarks_out_of_order: IntCounterVec,
+
+    pub collector_gather_latency: HistogramVec,
+    pub collector_batch_size: HistogramVec,
+    pub committer_commit_latency: HistogramVec,
+    pub watermark_gather_latency: HistogramVec,
+    pub watermark_commit_latency: HistogramVec,
+
+    pub watermark_epoch: IntGaugeVec,
+    pub watermark_checkpoint: IntGaugeVec,
+    pub watermark_transaction: IntGaugeVec,
+    pub watermark_timestamp_ms: IntGaugeVec,
+
+    pub watermark_epoch_in_db: IntGaugeVec,
+    pub watermark_checkpoint_in_db: IntGaugeVec,
+    pub watermark_transaction_in_db: IntGaugeVec,
+    pub watermark_timestamp_in_db_ms: IntGaugeVec,
+}
+
+/// Collects information about the database connection pool.
+struct DbConnectionStatsCollector {
+    db: Db,
+    desc: Vec<(MetricType, Desc)>,
+}
+
+impl MetricsService {
+    /// Create a new metrics service, exposing Mysten-wide metrics, and Indexer-specific metrics.
+    /// Returns the Indexer-specific metrics and the service itself (which must be run with
+    /// [Self::run]).
+    pub fn new(
+        addr: SocketAddr,
+        db: Db,
+        cancel: CancellationToken,
+    ) -> Result<(Arc, MetricsService)> {
+        let registry = Registry::new_custom(Some("indexer_alt".to_string()), None)?;
+
+        let metrics = IndexerMetrics::new(®istry);
+        mysten_metrics::init_metrics(®istry);
+        registry.register(Box::new(DbConnectionStatsCollector::new(db)))?;
+
+        let service = Self {
+            addr,
+            service: RegistryService::new(registry),
+            cancel,
+        };
+
+        Ok((Arc::new(metrics), service))
+    }
+
+    /// Start the service. The service will run until the cancellation token is triggered.
+    pub async fn run(self) -> Result> {
+        let listener = TcpListener::bind(&self.addr).await?;
+        let app = Router::new()
+            .route("/metrics", get(mysten_metrics::metrics))
+            .layer(Extension(self.service));
+
+        Ok(tokio::spawn(async move {
+            info!("Starting metrics service on {}", self.addr);
+            axum::serve(listener, app)
+                .with_graceful_shutdown(async move {
+                    self.cancel.cancelled().await;
+                    info!("Shutdown received, stopping metrics service");
+                })
+                .await
+                .unwrap();
+        }))
+    }
+}
+
+impl IndexerMetrics {
+    pub fn new(registry: &Registry) -> Self {
+        Self {
+            total_ingested_checkpoints: register_int_counter_with_registry!(
+                "indexer_total_ingested_checkpoints",
+                "Total number of checkpoints fetched from the remote store",
+                registry,
+            )
+            .unwrap(),
+            total_ingested_transactions: register_int_counter_with_registry!(
+                "indexer_total_ingested_transactions",
+                "Total number of transactions fetched from the remote store",
+                registry,
+            )
+            .unwrap(),
+            total_ingested_events: register_int_counter_with_registry!(
+                "indexer_total_ingested_events",
+                "Total number of events fetched from the remote store",
+                registry,
+            )
+            .unwrap(),
+            total_ingested_inputs: register_int_counter_with_registry!(
+                "indexer_total_ingested_inputs",
+                "Total number of input objects fetched from the remote store",
+                registry,
+            )
+            .unwrap(),
+            total_ingested_outputs: register_int_counter_with_registry!(
+                "indexer_total_ingested_outputs",
+                "Total number of output objects fetched from the remote store",
+                registry,
+            )
+            .unwrap(),
+            total_ingested_bytes: register_int_counter_with_registry!(
+                "indexer_total_ingested_bytes",
+                "Total number of bytes fetched from the remote store",
+                registry,
+            )
+            .unwrap(),
+            total_ingested_transient_retries: register_int_counter_vec_with_registry!(
+                "indexer_total_ingested_retries",
+                "Total number of retries due to transient errors while fetching data from the \
+                 remote store",
+                &["reason"],
+                registry,
+            )
+            .unwrap(),
+            total_ingested_not_found_retries: register_int_counter_with_registry!(
+                "indexer_total_ingested_not_found_retries",
+                "Total number of retries due to the not found errors while fetching data from the \
+                 remote store",
+                registry,
+            )
+            .unwrap(),
+            ingested_checkpoint_latency: register_histogram_with_registry!(
+                "indexer_ingested_checkpoint_latency",
+                "Time taken to fetch a checkpoint from the remote store, including retries",
+                INGESTION_LATENCY_SEC_BUCKETS.to_vec(),
+                registry,
+            )
+            .unwrap(),
+            total_handler_checkpoints_received: register_int_counter_vec_with_registry!(
+                "indexer_total_handler_checkpoints_received",
+                "Total number of checkpoints received by this handler",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            total_handler_checkpoints_processed: register_int_counter_vec_with_registry!(
+                "indexer_total_handler_checkpoints_processed",
+                "Total number of checkpoints processed (converted into rows) by this handler",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            total_handler_rows_created: register_int_counter_vec_with_registry!(
+                "indexer_total_handler_rows_created",
+                "Total number of rows created by this handler",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            handler_checkpoint_latency: register_histogram_vec_with_registry!(
+                "indexer_handler_checkpoint_latency",
+                "Time taken to process a checkpoint by this handler",
+                &["pipeline"],
+                PROCESSING_LATENCY_SEC_BUCKETS.to_vec(),
+                registry,
+            )
+            .unwrap(),
+            total_collector_rows_received: register_int_counter_vec_with_registry!(
+                "indexer_total_collector_rows_received",
+                "Total number of rows received by this collector",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            total_collector_batches_created: register_int_counter_vec_with_registry!(
+                "indexer_total_collector_batches_created",
+                "Total number of batches created by this collector",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            total_committer_batches_attempted: register_int_counter_vec_with_registry!(
+                "indexer_total_committer_batches_attempted",
+                "Total number of batches writes attempted by this committer",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            total_committer_batches_succeeded: register_int_counter_vec_with_registry!(
+                "indexer_total_committer_batches_succeeded",
+                "Total number of successful batches writes by this committer",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            total_committer_rows_committed: register_int_counter_vec_with_registry!(
+                "indexer_total_committer_rows_committed",
+                "Total number of rows sent to the database by this committer",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            total_committer_rows_affected: register_int_counter_vec_with_registry!(
+                "indexer_total_committer_rows_affected",
+                "Total number of rows actually written to the database by this committer",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            total_watermarks_out_of_order: register_int_counter_vec_with_registry!(
+                "indexer_watermark_out_of_order",
+                "Number of times this committer encountered a batch for a checkpoint before its watermark",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            collector_gather_latency: register_histogram_vec_with_registry!(
+                "indexer_collector_gather_latency",
+                "Time taken to gather rows into a batch by this collector",
+                &["pipeline"],
+                PROCESSING_LATENCY_SEC_BUCKETS.to_vec(),
+                registry,
+            )
+            .unwrap(),
+            collector_batch_size: register_histogram_vec_with_registry!(
+                "indexer_collector_batch_size",
+                "Number of rows in a batch written to the database by this collector",
+                &["pipeline"],
+                BATCH_SIZE_BUCKETS.to_vec(),
+                registry,
+            )
+            .unwrap(),
+            committer_commit_latency: register_histogram_vec_with_registry!(
+                "indexer_committer_commit_latency",
+                "Time taken to write a batch of rows to the database by this committer",
+                &["pipeline"],
+                DB_UPDATE_LATENCY_SEC_BUCKETS.to_vec(),
+                registry,
+            )
+            .unwrap(),
+            watermark_gather_latency: register_histogram_vec_with_registry!(
+                "indexer_watermark_gather_latency",
+                "Time taken to calculate the new high watermark after a write by this committer",
+                &["pipeline"],
+                PROCESSING_LATENCY_SEC_BUCKETS.to_vec(),
+                registry,
+            )
+            .unwrap(),
+            watermark_commit_latency: register_histogram_vec_with_registry!(
+                "indexer_watermark_commit_latency",
+                "Time taken to write the new high watermark to the database by this committer",
+                &["pipeline"],
+                DB_UPDATE_LATENCY_SEC_BUCKETS.to_vec(),
+                registry,
+            )
+            .unwrap(),
+            watermark_epoch: register_int_gauge_vec_with_registry!(
+                "indexer_watermark_epoch",
+                "Current epoch high watermark for this committer",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            watermark_checkpoint: register_int_gauge_vec_with_registry!(
+                "indexer_watermark_checkpoint",
+                "Current checkpoint high watermark for this committer",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            watermark_transaction: register_int_gauge_vec_with_registry!(
+                "indexer_watermark_transaction",
+                "Current transaction high watermark for this committer",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            watermark_timestamp_ms: register_int_gauge_vec_with_registry!(
+                "indexer_watermark_timestamp_ms",
+                "Current timestamp high watermark for this committer, in milliseconds",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            watermark_epoch_in_db: register_int_gauge_vec_with_registry!(
+                "indexer_watermark_epoch_in_db",
+                "Last epoch high watermark this committer wrote to the DB",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            watermark_checkpoint_in_db: register_int_gauge_vec_with_registry!(
+                "indexer_watermark_checkpoint_in_db",
+                "Last checkpoint high watermark this committer wrote to the DB",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            watermark_transaction_in_db: register_int_gauge_vec_with_registry!(
+                "indexer_watermark_transaction_in_db",
+                "Last transaction high watermark this committer wrote to the DB",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+            watermark_timestamp_in_db_ms: register_int_gauge_vec_with_registry!(
+                "indexer_watermark_timestamp_ms_in_db",
+                "Last timestamp high watermark this committer wrote to the DB, in milliseconds",
+                &["pipeline"],
+                registry,
+            )
+            .unwrap(),
+        }
+    }
+
+    /// Register that we're retrying a checkpoint fetch due to a transient error, logging the
+    /// reason and error.
+    pub(crate) fn inc_retry(
+        &self,
+        checkpoint: u64,
+        reason: &str,
+        error: Error,
+    ) -> backoff::Error {
+        warn!(checkpoint, reason, "Retrying due to error: {error}");
+
+        self.total_ingested_transient_retries
+            .with_label_values(&[reason])
+            .inc();
+
+        backoff::Error::transient(error)
+    }
+}
+
+impl DbConnectionStatsCollector {
+    fn new(db: Db) -> Self {
+        let desc = vec![
+            (
+                MetricType::GAUGE,
+                desc(
+                    "db_connections",
+                    "Number of connections currently being managed by the pool",
+                ),
+            ),
+            (
+                MetricType::GAUGE,
+                desc(
+                    "db_idle_connections",
+                    "Number of idle connections in the pool",
+                ),
+            ),
+            (
+                MetricType::COUNTER,
+                desc("db_connect_direct", "Connections that did not have to wait"),
+            ),
+            (
+                MetricType::SUMMARY,
+                desc("db_connect_waited", "Connections that had to wait"),
+            ),
+            (
+                MetricType::COUNTER,
+                desc(
+                    "db_connect_timed_out",
+                    "Connections that timed out waiting for a connection",
+                ),
+            ),
+            (
+                MetricType::COUNTER,
+                desc(
+                    "db_connections_created",
+                    "Connections that have been created in the pool",
+                ),
+            ),
+            (
+                MetricType::COUNTER,
+                desc_with_labels(
+                    "db_connections_closed",
+                    "Total connections that were closed",
+                    &["reason"],
+                ),
+            ),
+        ];
+
+        Self { db, desc }
+    }
+}
+
+impl Collector for DbConnectionStatsCollector {
+    fn desc(&self) -> Vec<&Desc> {
+        self.desc.iter().map(|d| &d.1).collect()
+    }
+
+    fn collect(&self) -> Vec {
+        let state = self.db.state();
+        let stats = state.statistics;
+
+        vec![
+            gauge(&self.desc[0].1, state.connections as f64),
+            gauge(&self.desc[1].1, state.idle_connections as f64),
+            counter(&self.desc[2].1, stats.get_direct as f64),
+            summary(
+                &self.desc[3].1,
+                stats.get_wait_time.as_millis() as f64,
+                stats.get_waited + stats.get_timed_out,
+            ),
+            counter(&self.desc[4].1, stats.get_timed_out as f64),
+            counter(&self.desc[5].1, stats.connections_created as f64),
+            counter_with_labels(
+                &self.desc[6].1,
+                &[
+                    ("reason", "broken", stats.connections_closed_broken as f64),
+                    ("reason", "invalid", stats.connections_closed_invalid as f64),
+                    (
+                        "reason",
+                        "max_lifetime",
+                        stats.connections_closed_max_lifetime as f64,
+                    ),
+                    (
+                        "reason",
+                        "idle_timeout",
+                        stats.connections_closed_idle_timeout as f64,
+                    ),
+                ],
+            ),
+        ]
+    }
+}
+
+fn desc(name: &str, help: &str) -> Desc {
+    desc_with_labels(name, help, &[])
+}
+
+fn desc_with_labels(name: &str, help: &str, labels: &[&str]) -> Desc {
+    Desc::new(
+        name.to_string(),
+        help.to_string(),
+        labels.iter().map(|s| s.to_string()).collect(),
+        Default::default(),
+    )
+    .expect("Bad metric description")
+}
+
+fn gauge(desc: &Desc, value: f64) -> MetricFamily {
+    let mut g = Gauge::default();
+    let mut m = Metric::default();
+    let mut mf = MetricFamily::new();
+
+    g.set_value(value);
+    m.set_gauge(g);
+
+    mf.mut_metric().push(m);
+    mf.set_name(desc.fq_name.clone());
+    mf.set_help(desc.help.clone());
+    mf.set_field_type(MetricType::COUNTER);
+    mf
+}
+
+fn counter(desc: &Desc, value: f64) -> MetricFamily {
+    let mut c = Counter::default();
+    let mut m = Metric::default();
+    let mut mf = MetricFamily::new();
+
+    c.set_value(value);
+    m.set_counter(c);
+
+    mf.mut_metric().push(m);
+    mf.set_name(desc.fq_name.clone());
+    mf.set_help(desc.help.clone());
+    mf.set_field_type(MetricType::GAUGE);
+    mf
+}
+
+fn counter_with_labels(desc: &Desc, values: &[(&str, &str, f64)]) -> MetricFamily {
+    let mut mf = MetricFamily::new();
+
+    for (name, label, value) in values {
+        let mut c = Counter::default();
+        let mut l = LabelPair::default();
+        let mut m = Metric::default();
+
+        c.set_value(*value);
+        l.set_name(name.to_string());
+        l.set_value(label.to_string());
+
+        m.set_counter(c);
+        m.mut_label().push(l);
+        mf.mut_metric().push(m);
+    }
+
+    mf.set_name(desc.fq_name.clone());
+    mf.set_help(desc.help.clone());
+    mf.set_field_type(MetricType::COUNTER);
+    mf
+}
+
+fn summary(desc: &Desc, sum: f64, count: u64) -> MetricFamily {
+    let mut s = Summary::default();
+    let mut m = Metric::default();
+    let mut mf = MetricFamily::new();
+
+    s.set_sample_sum(sum);
+    s.set_sample_count(count);
+    m.set_summary(s);
+
+    mf.mut_metric().push(m);
+    mf.set_name(desc.fq_name.clone());
+    mf.set_help(desc.help.clone());
+    mf.set_field_type(MetricType::SUMMARY);
+    mf
+}
+
+#[cfg(test)]
+pub(crate) mod tests {
+    use prometheus::Registry;
+
+    use super::IndexerMetrics;
+
+    /// Construct metrics for test purposes.
+    pub fn test_metrics() -> IndexerMetrics {
+        IndexerMetrics::new(&Registry::new())
+    }
+}
diff --git a/crates/sui-indexer-alt/src/models/checkpoints.rs b/crates/sui-indexer-alt/src/models/checkpoints.rs
new file mode 100644
index 0000000000000..0a7c625eb95e6
--- /dev/null
+++ b/crates/sui-indexer-alt/src/models/checkpoints.rs
@@ -0,0 +1,26 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use crate::schema::kv_checkpoints;
+use diesel::prelude::*;
+use sui_field_count::FieldCount;
+
+#[derive(Insertable, Debug, Clone, FieldCount)]
+#[diesel(table_name = kv_checkpoints)]
+pub struct StoredCheckpoint {
+    pub sequence_number: i64,
+    /// BCS serialized CertifiedCheckpointSummary
+    pub certified_checkpoint: Vec,
+    /// BCS serialized CheckpointContents
+    pub checkpoint_contents: Vec,
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_stored_checkpoint_field_count() {
+        assert_eq!(StoredCheckpoint::field_count(), 3);
+    }
+}
diff --git a/crates/sui-indexer-alt/src/models/events.rs b/crates/sui-indexer-alt/src/models/events.rs
new file mode 100644
index 0000000000000..ceb25d3e48a1a
--- /dev/null
+++ b/crates/sui-indexer-alt/src/models/events.rs
@@ -0,0 +1,41 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use crate::schema::{ev_emit_mod, ev_struct_inst};
+use diesel::prelude::*;
+use sui_field_count::FieldCount;
+
+#[derive(Insertable, Debug, Clone, Eq, PartialEq, Ord, PartialOrd, FieldCount)]
+#[diesel(table_name = ev_emit_mod)]
+pub struct StoredEvEmitMod {
+    pub package: Vec,
+    pub module: String,
+    pub tx_sequence_number: i64,
+    pub sender: Vec,
+}
+
+#[derive(Insertable, Debug, Clone, Eq, PartialEq, Ord, PartialOrd, FieldCount)]
+#[diesel(table_name = ev_struct_inst)]
+pub struct StoredEvStructInst {
+    pub package: Vec,
+    pub module: String,
+    pub name: String,
+    pub instantiation: Vec,
+    pub tx_sequence_number: i64,
+    pub sender: Vec,
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_stored_event_field_count() {
+        assert_eq!(StoredEvEmitMod::field_count(), 4);
+    }
+
+    #[test]
+    fn test_stored_struct_inst_field_count() {
+        assert_eq!(StoredEvStructInst::field_count(), 6);
+    }
+}
diff --git a/crates/sui-indexer-alt/src/models/mod.rs b/crates/sui-indexer-alt/src/models/mod.rs
new file mode 100644
index 0000000000000..b20e260b29176
--- /dev/null
+++ b/crates/sui-indexer-alt/src/models/mod.rs
@@ -0,0 +1,8 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+pub mod checkpoints;
+pub mod events;
+pub mod objects;
+pub mod transactions;
+pub mod watermarks;
diff --git a/crates/sui-indexer-alt/src/models/objects.rs b/crates/sui-indexer-alt/src/models/objects.rs
new file mode 100644
index 0000000000000..46a5ac8d5a03b
--- /dev/null
+++ b/crates/sui-indexer-alt/src/models/objects.rs
@@ -0,0 +1,148 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use diesel::{
+    backend::Backend, deserialize, expression::AsExpression, prelude::*, serialize,
+    sql_types::SmallInt, FromSqlRow,
+};
+use sui_field_count::FieldCount;
+use sui_types::base_types::ObjectID;
+
+use crate::schema::{
+    kv_objects, obj_versions, sum_coin_balances, sum_obj_types, wal_coin_balances, wal_obj_types,
+};
+
+#[derive(Insertable, Debug, Clone, FieldCount)]
+#[diesel(table_name = kv_objects, primary_key(object_id, object_version))]
+pub struct StoredObject {
+    pub object_id: Vec,
+    pub object_version: i64,
+    pub serialized_object: Option>,
+}
+
+#[derive(Insertable, Debug, Clone)]
+#[diesel(table_name = obj_versions, primary_key(object_id, object_version))]
+pub struct StoredObjVersion {
+    pub object_id: Vec,
+    pub object_version: i64,
+    pub object_digest: Vec,
+    pub cp_sequence_number: i64,
+}
+
+/// An insert/update or deletion of an object record, keyed on a particular Object ID and version.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
+pub struct StoredObjectUpdate {
+    pub object_id: ObjectID,
+    pub object_version: u64,
+    pub cp_sequence_number: u64,
+    /// `None` means the object was deleted or wrapped at this version, `Some(x)` means it was
+    /// changed to `x`.
+    pub update: Option,
+}
+
+#[derive(AsExpression, FromSqlRow, Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
+#[diesel(sql_type = SmallInt)]
+#[repr(i16)]
+pub enum StoredOwnerKind {
+    Immutable = 0,
+    Address = 1,
+    Object = 2,
+    Shared = 3,
+}
+
+#[derive(Insertable, Debug, Clone, FieldCount)]
+#[diesel(table_name = sum_coin_balances, primary_key(object_id))]
+pub struct StoredSumCoinBalance {
+    pub object_id: Vec,
+    pub object_version: i64,
+    pub owner_id: Vec,
+    pub coin_type: Vec,
+    pub coin_balance: i64,
+}
+
+#[derive(Insertable, Debug, Clone, FieldCount)]
+#[diesel(table_name = sum_obj_types, primary_key(object_id))]
+pub struct StoredSumObjType {
+    pub object_id: Vec,
+    pub object_version: i64,
+    pub owner_kind: StoredOwnerKind,
+    pub owner_id: Option>,
+    pub package: Option>,
+    pub module: Option,
+    pub name: Option,
+    pub instantiation: Option>,
+}
+
+#[derive(Insertable, Debug, Clone)]
+#[diesel(table_name = wal_coin_balances, primary_key(object_id, object_version))]
+pub struct StoredWalCoinBalance {
+    pub object_id: Vec,
+    pub object_version: i64,
+    pub owner_id: Option>,
+    pub coin_type: Option>,
+    pub coin_balance: Option,
+    pub cp_sequence_number: i64,
+}
+
+#[derive(Insertable, Debug, Clone)]
+#[diesel(table_name = wal_obj_types, primary_key(object_id, object_version))]
+pub struct StoredWalObjType {
+    pub object_id: Vec,
+    pub object_version: i64,
+    pub owner_kind: Option,
+    pub owner_id: Option>,
+    pub package: Option>,
+    pub module: Option,
+    pub name: Option,
+    pub instantiation: Option>,
+    pub cp_sequence_number: i64,
+}
+
+impl serialize::ToSql for StoredOwnerKind
+where
+    i16: serialize::ToSql,
+{
+    fn to_sql<'b>(&'b self, out: &mut serialize::Output<'b, '_, DB>) -> serialize::Result {
+        match self {
+            StoredOwnerKind::Immutable => 0.to_sql(out),
+            StoredOwnerKind::Address => 1.to_sql(out),
+            StoredOwnerKind::Object => 2.to_sql(out),
+            StoredOwnerKind::Shared => 3.to_sql(out),
+        }
+    }
+}
+
+impl deserialize::FromSql for StoredOwnerKind
+where
+    i16: deserialize::FromSql,
+{
+    fn from_sql(raw: DB::RawValue<'_>) -> deserialize::Result {
+        Ok(match i16::from_sql(raw)? {
+            0 => StoredOwnerKind::Immutable,
+            1 => StoredOwnerKind::Address,
+            2 => StoredOwnerKind::Object,
+            3 => StoredOwnerKind::Shared,
+            o => return Err(format!("Unexpected StoredOwnerKind: {o}").into()),
+        })
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_stored_owner_kind_field_count() {
+        assert_eq!(StoredObject::field_count(), 3);
+    }
+
+    #[test]
+    fn test_stored_sum_coin_balance_field_count() {
+        assert_eq!(StoredSumCoinBalance::field_count(), 5);
+    }
+
+    #[test]
+    fn test_stored_sum_obj_type_field_count() {
+        assert_eq!(StoredSumObjType::field_count(), 8);
+    }
+}
diff --git a/crates/sui-indexer-alt/src/models/transactions.rs b/crates/sui-indexer-alt/src/models/transactions.rs
new file mode 100644
index 0000000000000..3a7325c484793
--- /dev/null
+++ b/crates/sui-indexer-alt/src/models/transactions.rs
@@ -0,0 +1,69 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use crate::schema::{kv_transactions, tx_affected_objects, tx_balance_changes};
+use diesel::prelude::*;
+use serde::{Deserialize, Serialize};
+use sui_field_count::FieldCount;
+use sui_types::object::Owner;
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub enum BalanceChange {
+    V1 {
+        /// Owner whose balance changed
+        owner: Owner,
+
+        /// Type of the Coin (just the one-time witness type).
+        coin_type: String,
+
+        /// The amount the balance changed by. A negative amount means the net flow of value is
+        /// from the owner, and a positive amount means the net flow of value is to the owner.
+        amount: i128,
+    },
+}
+
+#[derive(Insertable, Debug, Clone, FieldCount)]
+#[diesel(table_name = kv_transactions)]
+pub struct StoredTransaction {
+    pub tx_digest: Vec,
+    pub cp_sequence_number: i64,
+    pub timestamp_ms: i64,
+    pub raw_transaction: Vec,
+    pub raw_effects: Vec,
+    pub events: Vec,
+}
+
+#[derive(Insertable, Debug, Clone, FieldCount)]
+#[diesel(table_name = tx_affected_objects)]
+pub struct StoredTxAffectedObject {
+    pub tx_sequence_number: i64,
+    pub affected: Vec,
+    pub sender: Vec,
+}
+
+#[derive(Insertable, Debug, Clone, FieldCount)]
+#[diesel(table_name = tx_balance_changes)]
+pub struct StoredTxBalanceChange {
+    pub tx_sequence_number: i64,
+    pub balance_changes: Vec,
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_stored_transaction_field_count() {
+        assert_eq!(StoredTransaction::field_count(), 6);
+    }
+
+    #[test]
+    fn test_stored_tx_affected_object_field_count() {
+        assert_eq!(StoredTxAffectedObject::field_count(), 3);
+    }
+
+    #[test]
+    fn test_stored_tx_balance_change_field_count() {
+        assert_eq!(StoredTxBalanceChange::field_count(), 2);
+    }
+}
diff --git a/crates/sui-indexer-alt/src/models/watermarks.rs b/crates/sui-indexer-alt/src/models/watermarks.rs
new file mode 100644
index 0000000000000..d26d968796ba4
--- /dev/null
+++ b/crates/sui-indexer-alt/src/models/watermarks.rs
@@ -0,0 +1,117 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::borrow::Cow;
+
+use chrono::{DateTime, Utc};
+use diesel::prelude::*;
+use diesel_async::RunQueryDsl;
+use sui_field_count::FieldCount;
+
+use crate::{db::Connection, schema::watermarks};
+
+#[derive(Insertable, Debug, Clone, FieldCount)]
+#[diesel(table_name = watermarks)]
+pub struct StoredWatermark {
+    pub pipeline: String,
+    pub epoch_hi_inclusive: i64,
+    pub checkpoint_hi_inclusive: i64,
+    pub tx_hi: i64,
+    pub timestamp_ms_hi_inclusive: i64,
+    pub epoch_lo: i64,
+    pub reader_lo: i64,
+    pub pruner_timestamp_ms: i64,
+    pub pruner_hi: i64,
+}
+
+/// Fields that the committer is responsible for setting.
+#[derive(AsChangeset, Selectable, Queryable, Debug, Clone, FieldCount)]
+#[diesel(table_name = watermarks)]
+pub struct CommitterWatermark<'p> {
+    pub pipeline: Cow<'p, str>,
+    pub epoch_hi_inclusive: i64,
+    pub checkpoint_hi_inclusive: i64,
+    pub tx_hi: i64,
+    pub timestamp_ms_hi_inclusive: i64,
+}
+
+impl CommitterWatermark<'static> {
+    /// Get the current high watermark for the pipeline.
+    pub async fn get(
+        conn: &mut Connection<'_>,
+        pipeline: &'static str,
+    ) -> QueryResult> {
+        watermarks::table
+            .select(CommitterWatermark::as_select())
+            .filter(watermarks::pipeline.eq(pipeline))
+            .first(conn)
+            .await
+            .optional()
+    }
+}
+
+impl<'p> CommitterWatermark<'p> {
+    /// A new watermark with the given pipeline name indicating zero progress.
+    pub fn initial(pipeline: Cow<'p, str>) -> Self {
+        CommitterWatermark {
+            pipeline,
+            epoch_hi_inclusive: 0,
+            checkpoint_hi_inclusive: 0,
+            tx_hi: 0,
+            timestamp_ms_hi_inclusive: 0,
+        }
+    }
+
+    /// The consensus timestamp associated with this checkpoint.
+    pub fn timestamp(&self) -> DateTime {
+        DateTime::from_timestamp_millis(self.timestamp_ms_hi_inclusive).unwrap_or_default()
+    }
+
+    /// Upsert the high watermark as long as it raises the watermark stored in the database.
+    /// Returns a boolean indicating whether the watermark was actually updated or not.
+    ///
+    /// TODO(amnn): Test this (depends on supporting migrations and tempdb).
+    pub async fn update(&self, conn: &mut Connection<'_>) -> QueryResult {
+        use diesel::query_dsl::methods::FilterDsl;
+        Ok(diesel::insert_into(watermarks::table)
+            .values(StoredWatermark::from(self.clone()))
+            .on_conflict(watermarks::pipeline)
+            .do_update()
+            .set(self)
+            .filter(watermarks::checkpoint_hi_inclusive.lt(self.checkpoint_hi_inclusive))
+            .execute(conn)
+            .await?
+            > 0)
+    }
+}
+
+impl<'p> From> for StoredWatermark {
+    fn from(watermark: CommitterWatermark<'p>) -> Self {
+        StoredWatermark {
+            pipeline: watermark.pipeline.into_owned(),
+            epoch_hi_inclusive: watermark.epoch_hi_inclusive,
+            checkpoint_hi_inclusive: watermark.checkpoint_hi_inclusive,
+            tx_hi: watermark.tx_hi,
+            timestamp_ms_hi_inclusive: watermark.timestamp_ms_hi_inclusive,
+            epoch_lo: 0,
+            reader_lo: 0,
+            pruner_timestamp_ms: 0,
+            pruner_hi: 0,
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_stored_watermark_field_count() {
+        assert_eq!(StoredWatermark::field_count(), 9);
+    }
+
+    #[test]
+    fn test_committer_watermark_field_count() {
+        assert_eq!(CommitterWatermark::<'static>::field_count(), 5);
+    }
+}
diff --git a/crates/sui-indexer-alt/src/pipeline/concurrent/collector.rs b/crates/sui-indexer-alt/src/pipeline/concurrent/collector.rs
new file mode 100644
index 0000000000000..1bf3459d6817a
--- /dev/null
+++ b/crates/sui-indexer-alt/src/pipeline/concurrent/collector.rs
@@ -0,0 +1,181 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::{collections::BTreeMap, sync::Arc};
+
+use mysten_metrics::spawn_monitored_task;
+use tokio::{
+    sync::mpsc,
+    task::JoinHandle,
+    time::{interval, MissedTickBehavior},
+};
+use tokio_util::sync::CancellationToken;
+use tracing::{debug, info};
+
+use crate::{
+    metrics::IndexerMetrics,
+    pipeline::{Indexed, PipelineConfig, WatermarkPart},
+};
+
+use super::{Batched, Handler};
+
+/// Processed values that are waiting to be written to the database. This is an internal type used
+/// by the concurrent collector to hold data it is waiting to send to the committer.
+struct Pending {
+    /// Values to be inserted into the database from this checkpoint
+    values: Vec,
+    /// The watermark associated with this checkpoint and the part of it that is left to commit
+    watermark: WatermarkPart,
+}
+
+impl Pending {
+    /// Whether there are values left to commit from this indexed checkpoint.
+    fn is_empty(&self) -> bool {
+        debug_assert!(self.watermark.batch_rows == 0);
+        self.values.is_empty()
+    }
+
+    /// Adds data from this indexed checkpoint to the `batch`, honoring the handler's bounds on
+    /// chunk size.
+    fn batch_into(&mut self, batch: &mut Batched) {
+        if batch.values.len() + self.values.len() > H::MAX_CHUNK_ROWS {
+            let mut for_batch = self
+                .values
+                .split_off(H::MAX_CHUNK_ROWS - batch.values.len());
+
+            std::mem::swap(&mut self.values, &mut for_batch);
+            batch.watermark.push(self.watermark.take(for_batch.len()));
+            batch.values.extend(for_batch);
+        } else {
+            batch.watermark.push(self.watermark.take(self.values.len()));
+            batch.values.extend(std::mem::take(&mut self.values));
+        }
+    }
+}
+
+impl From> for Pending {
+    fn from(indexed: Indexed) -> Self {
+        Self {
+            watermark: WatermarkPart {
+                watermark: indexed.watermark,
+                batch_rows: indexed.values.len(),
+                total_rows: indexed.values.len(),
+            },
+            values: indexed.values,
+        }
+    }
+}
+
+/// The collector task is responsible for gathering rows into batches which it then sends to a
+/// committer task to write to the database. The task publishes batches in the following
+/// circumstances:
+///
+/// - If `H::BATCH_SIZE` rows are pending, it will immediately schedule a batch to be gathered.
+///
+/// - If after sending one batch there is more data to be sent, it will immediately schedule the
+///   next batch to be gathered (Each batch will contain at most `H::CHUNK_SIZ` rows).
+///
+/// - Otherwise, it will check for any data to write out at a regular interval (controlled by
+///   `config.collect_interval`).
+///
+/// This task will shutdown if canceled via the `cancel` token, or if any of its channels are
+/// closed.
+pub(super) fn collector(
+    config: PipelineConfig,
+    mut rx: mpsc::Receiver>,
+    tx: mpsc::Sender>,
+    metrics: Arc,
+    cancel: CancellationToken,
+) -> JoinHandle<()> {
+    spawn_monitored_task!(async move {
+        // The `poll` interval controls the maximum time to wait between collecting batches,
+        // regardless of number of rows pending.
+        let mut poll = interval(config.collect_interval);
+        poll.set_missed_tick_behavior(MissedTickBehavior::Delay);
+
+        // Data for checkpoints that haven't been written yet.
+        let mut pending: BTreeMap> = BTreeMap::new();
+        let mut pending_rows = 0;
+
+        info!(pipeline = H::NAME, "Starting collector");
+
+        loop {
+            tokio::select! {
+                _ = cancel.cancelled() => {
+                    info!(pipeline = H::NAME, "Shutdown received, stopping collector");
+                    break;
+                }
+
+                // Time to create another batch and push it to the committer.
+                _ = poll.tick() => {
+                    let guard = metrics
+                        .collector_gather_latency
+                        .with_label_values(&[H::NAME])
+                        .start_timer();
+
+                    let mut batch = Batched::new();
+                    while !batch.is_full() {
+                        let Some(mut entry) = pending.first_entry() else {
+                            break;
+                        };
+
+                        let indexed = entry.get_mut();
+                        indexed.batch_into(&mut batch);
+                        if indexed.is_empty() {
+                            entry.remove();
+                        }
+                    }
+
+                    pending_rows -= batch.len();
+                    let elapsed = guard.stop_and_record();
+                    debug!(
+                        pipeline = H::NAME,
+                        elapsed_ms = elapsed * 1000.0,
+                        rows = batch.len(),
+                        pending = pending_rows,
+                        "Gathered batch",
+                    );
+
+                    metrics
+                        .total_collector_batches_created
+                        .with_label_values(&[H::NAME])
+                        .inc();
+
+                    metrics
+                        .collector_batch_size
+                        .with_label_values(&[H::NAME])
+                        .observe(batch.len() as f64);
+
+                    if tx.send(batch).await.is_err() {
+                        info!(pipeline = H::NAME, "Committer closed channel, stopping collector");
+                        break;
+                    }
+
+                    if pending_rows > 0 {
+                        poll.reset_immediately();
+                    } else if rx.is_closed() && rx.is_empty() {
+                        info!(
+                            pipeline = H::NAME,
+                            "Processor closed channel, pending rows empty, stopping collector",
+                        );
+                        break;
+                    }
+                }
+
+                Some(indexed) = rx.recv(), if pending_rows < H::MAX_PENDING_ROWS => {
+                    metrics
+                        .total_collector_rows_received
+                        .with_label_values(&[H::NAME])
+                        .inc_by(indexed.values.len() as u64);
+
+                    pending_rows += indexed.values.len();
+                    pending.insert(indexed.checkpoint(), indexed.into());
+
+                    if pending_rows >= H::MIN_EAGER_ROWS {
+                        poll.reset_immediately()
+                    }
+                }
+            }
+        }
+    })
+}
diff --git a/crates/sui-indexer-alt/src/pipeline/concurrent/committer.rs b/crates/sui-indexer-alt/src/pipeline/concurrent/committer.rs
new file mode 100644
index 0000000000000..0a6ba850cc24b
--- /dev/null
+++ b/crates/sui-indexer-alt/src/pipeline/concurrent/committer.rs
@@ -0,0 +1,182 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::{sync::Arc, time::Duration};
+
+use backoff::ExponentialBackoff;
+use futures::TryStreamExt;
+use mysten_metrics::spawn_monitored_task;
+use tokio::{sync::mpsc, task::JoinHandle};
+use tokio_stream::{wrappers::ReceiverStream, StreamExt};
+use tokio_util::sync::CancellationToken;
+use tracing::{debug, error, info, warn};
+
+use crate::{
+    db::Db,
+    metrics::IndexerMetrics,
+    pipeline::{Break, PipelineConfig, WatermarkPart},
+};
+
+use super::{Batched, Handler};
+
+/// If the committer needs to retry a commit, it will wait this long initially.
+const INITIAL_RETRY_INTERVAL: Duration = Duration::from_millis(100);
+
+/// If the commiter needs to retry a commit, it will wait at most this long between retries.
+const MAX_RETRY_INTERVAL: Duration = Duration::from_secs(1);
+
+/// The committer task is responsible for writing batches of rows to the database. It receives
+/// batches on `rx` and writes them out to the `db` concurrently (`config.write_concurrency`
+/// controls the degree of fan-out).
+///
+/// The writing of each batch will be repeatedly retried on an exponential back-off until it
+/// succeeds. Once the write succeeds, the [WatermarkPart]s for that batch are sent on `tx` to the
+/// watermark task.
+///
+/// This task will shutdown via its `cancel`lation token, or if its receiver or sender channels are
+/// closed.
+pub(super) fn committer(
+    config: PipelineConfig,
+    rx: mpsc::Receiver>,
+    tx: mpsc::Sender>,
+    db: Db,
+    metrics: Arc,
+    cancel: CancellationToken,
+) -> JoinHandle<()> {
+    spawn_monitored_task!(async move {
+        info!(pipeline = H::NAME, "Starting committer");
+
+        match ReceiverStream::new(rx)
+            .map(Ok)
+            .try_for_each_concurrent(config.write_concurrency, |Batched { values, watermark }| {
+                let values = Arc::new(values);
+                let tx = tx.clone();
+                let db = db.clone();
+                let metrics = metrics.clone();
+                let cancel = cancel.clone();
+
+                // Repeatedly try to get a connection to the DB and write the batch. Use an
+                // exponential backoff in case the failure is due to contention over the DB
+                // connection pool.
+                let backoff = ExponentialBackoff {
+                    initial_interval: INITIAL_RETRY_INTERVAL,
+                    current_interval: INITIAL_RETRY_INTERVAL,
+                    max_interval: MAX_RETRY_INTERVAL,
+                    max_elapsed_time: None,
+                    ..Default::default()
+                };
+
+                use backoff::Error as BE;
+                let commit = move || {
+                    let values = values.clone();
+                    let db = db.clone();
+                    let metrics = metrics.clone();
+                    async move {
+                        metrics
+                            .total_committer_batches_attempted
+                            .with_label_values(&[H::NAME])
+                            .inc();
+
+                        let affected = if values.is_empty() {
+                            0
+                        } else {
+                            let guard = metrics
+                                .committer_commit_latency
+                                .with_label_values(&[H::NAME])
+                                .start_timer();
+
+                            let mut conn = db.connect().await.map_err(|e| {
+                                warn!(
+                                    pipeline = H::NAME,
+                                    "Committed failed to get connection for DB"
+                                );
+                                BE::transient(Break::Err(e.into()))
+                            })?;
+
+                            let affected = H::commit(values.as_slice(), &mut conn).await;
+                            let elapsed = guard.stop_and_record();
+
+                            match affected {
+                                Ok(affected) => {
+                                    debug!(
+                                        pipeline = H::NAME,
+                                        elapsed_ms = elapsed * 1000.0,
+                                        affected,
+                                        committed = values.len(),
+                                        "Wrote batch",
+                                    );
+
+                                    affected
+                                }
+
+                                Err(e) => {
+                                    warn!(
+                                        pipeline = H::NAME,
+                                        elapsed_ms = elapsed * 1000.0,
+                                        committed = values.len(),
+                                        "Error writing batch: {e}",
+                                    );
+
+                                    return Err(BE::transient(Break::Err(e)));
+                                }
+                            }
+                        };
+
+                        metrics
+                            .total_committer_batches_succeeded
+                            .with_label_values(&[H::NAME])
+                            .inc();
+
+                        metrics
+                            .total_committer_rows_committed
+                            .with_label_values(&[H::NAME])
+                            .inc_by(values.len() as u64);
+
+                        metrics
+                            .total_committer_rows_affected
+                            .with_label_values(&[H::NAME])
+                            .inc_by(affected as u64);
+
+                        Ok(())
+                    }
+                };
+
+                async move {
+                    tokio::select! {
+                        _ = cancel.cancelled() => {
+                            return Err(Break::Cancel);
+                        }
+
+                        // Double check that the commit actually went through, (this backoff should
+                        // not produce any permanent errors, but if it does, we need to shutdown
+                        // the pipeline).
+                        commit = backoff::future::retry(backoff, commit) => {
+                            let () = commit?;
+                        }
+                    };
+
+                    if !config.skip_watermark && tx.send(watermark).await.is_err() {
+                        info!(pipeline = H::NAME, "Watermark closed channel");
+                        return Err(Break::Cancel);
+                    }
+
+                    Ok(())
+                }
+            })
+            .await
+        {
+            Ok(()) => {
+                info!(pipeline = H::NAME, "Batches done, stopping committer");
+            }
+
+            Err(Break::Cancel) => {
+                info!(pipeline = H::NAME, "Shutdown received, stopping committer");
+            }
+
+            Err(Break::Err(e)) => {
+                error!(pipeline = H::NAME, "Error from committer: {e}");
+                cancel.cancel();
+            }
+        }
+    })
+}
diff --git a/crates/sui-indexer-alt/src/pipeline/concurrent/mod.rs b/crates/sui-indexer-alt/src/pipeline/concurrent/mod.rs
new file mode 100644
index 0000000000000..3c3f91f0f89da
--- /dev/null
+++ b/crates/sui-indexer-alt/src/pipeline/concurrent/mod.rs
@@ -0,0 +1,153 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::sync::Arc;
+
+use sui_types::full_checkpoint_content::CheckpointData;
+use tokio::{sync::mpsc, task::JoinHandle};
+use tokio_util::sync::CancellationToken;
+
+use crate::{
+    db::{self, Db},
+    metrics::IndexerMetrics,
+    models::watermarks::CommitterWatermark,
+};
+
+use super::{processor::processor, PipelineConfig, Processor, WatermarkPart, PIPELINE_BUFFER};
+
+use self::{collector::collector, committer::committer, watermark::watermark};
+
+mod collector;
+mod committer;
+mod watermark;
+
+/// The maximum number of watermarks that can show up in a single batch. This limit exists to deal
+/// with pipelines that produce no data for a majority of checkpoints -- the size of these
+/// pipeline's batches will be dominated by watermark updates.
+const MAX_WATERMARK_UPDATES: usize = 10_000;
+
+/// Handlers implement the logic for a given indexing pipeline: How to process checkpoint data (by
+/// implementing [Processor]) into rows for their table, and how to write those rows to the database.
+///
+/// The handler is also responsible for tuning the various parameters of the pipeline (provided as
+/// associated values). Reasonable defaults have been chosen to balance concurrency with memory
+/// usage, but each handle may choose to override these defaults, e.g.
+///
+/// - Handlers that produce many small rows may wish to increase their batch/chunk/max-pending
+///   sizes).
+/// - Handlers that do more work during processing may wish to increase their fanout so more of it
+///   can be done concurrently, to preserve throughput.
+///
+/// Concurrent handlers can only be used in concurrent pipelines, where checkpoint data is
+/// processed and committed out-of-order and a watermark table is kept up-to-date with the latest
+/// checkpoint below which all data has been committed.
+///
+/// Back-pressure is handled through the `MAX_PENDING_SIZE` constant -- if more than this many rows
+/// build up, the collector will stop accepting new checkpoints, which will eventually propagate
+/// back to the ingestion service.
+#[async_trait::async_trait]
+pub trait Handler: Processor {
+    /// If at least this many rows are pending, the committer will commit them eagerly.
+    const MIN_EAGER_ROWS: usize = 50;
+
+    /// If there are more than this many rows pending, the committer will only commit this many in
+    /// one operation.
+    const MAX_CHUNK_ROWS: usize = 200;
+
+    /// If there are more than this many rows pending, the committer applies backpressure.
+    const MAX_PENDING_ROWS: usize = 1000;
+
+    /// Take a chunk of values and commit them to the database, returning the number of rows
+    /// affected.
+    async fn commit(values: &[Self::Value], conn: &mut db::Connection<'_>)
+        -> anyhow::Result;
+}
+
+/// Values ready to be written to the database. This is an internal type used to communicate
+/// between the collector and the committer parts of the pipeline.
+struct Batched {
+    /// The rows to write
+    values: Vec,
+    /// Proportions of all the watermarks that are represented in this chunk
+    watermark: Vec,
+}
+
+impl Batched {
+    fn new() -> Self {
+        Self {
+            values: vec![],
+            watermark: vec![],
+        }
+    }
+
+    /// Number of rows in this batch.
+    fn len(&self) -> usize {
+        self.values.len()
+    }
+
+    /// The batch is full if it has more than enough values to write to the database, or more than
+    /// enough watermarks to update.
+    fn is_full(&self) -> bool {
+        self.values.len() >= H::MAX_CHUNK_ROWS || self.watermark.len() >= MAX_WATERMARK_UPDATES
+    }
+}
+
+/// Start a new concurrent (out-of-order) indexing pipeline served by the handler, `H`. Starting
+/// strictly after the `watermark` (or from the beginning if no watermark was provided).
+///
+/// Each pipeline consists of a processor task which takes checkpoint data and breaks it down into
+/// rows, ready for insertion, a collector which batches those rows into an appropriate size for
+/// the database, a committer which writes the rows out concurrently, and a watermark task to
+/// update the high watermark.
+///
+/// Committing is performed out-of-order: the pipeline may write out checkpoints out-of-order,
+/// either because it received the checkpoints out-of-order or because of variance in processing
+/// time.
+///
+/// The pipeline also maintains a row in the `watermarks` table for the pipeline which tracks the
+/// watermark below which all data has been committed (modulo pruning).
+///
+/// Checkpoint data is fed into the pipeline through the `checkpoint_rx` channel, and internal
+/// channels are created to communicate between its various components. The pipeline can be
+/// shutdown using its `cancel` token, and will also shutdown if any of its independent tasks
+/// reports an issue.
+pub(crate) fn pipeline(
+    initial_watermark: Option>,
+    config: PipelineConfig,
+    db: Db,
+    checkpoint_rx: mpsc::Receiver>,
+    metrics: Arc,
+    cancel: CancellationToken,
+) -> (
+    JoinHandle<()>,
+    JoinHandle<()>,
+    JoinHandle<()>,
+    JoinHandle<()>,
+) {
+    let (processor_tx, collector_rx) = mpsc::channel(H::FANOUT + PIPELINE_BUFFER);
+    let (collector_tx, committer_rx) = mpsc::channel(config.write_concurrency + PIPELINE_BUFFER);
+    let (committer_tx, watermark_rx) = mpsc::channel(config.write_concurrency + PIPELINE_BUFFER);
+
+    let processor = processor::(checkpoint_rx, processor_tx, metrics.clone(), cancel.clone());
+
+    let collector = collector::(
+        config.clone(),
+        collector_rx,
+        collector_tx,
+        metrics.clone(),
+        cancel.clone(),
+    );
+
+    let committer = committer::(
+        config.clone(),
+        committer_rx,
+        committer_tx,
+        db.clone(),
+        metrics.clone(),
+        cancel.clone(),
+    );
+
+    let watermark = watermark::(initial_watermark, config, watermark_rx, db, metrics, cancel);
+
+    (processor, collector, committer, watermark)
+}
diff --git a/crates/sui-indexer-alt/src/pipeline/concurrent/watermark.rs b/crates/sui-indexer-alt/src/pipeline/concurrent/watermark.rs
new file mode 100644
index 0000000000000..073bbe298ea61
--- /dev/null
+++ b/crates/sui-indexer-alt/src/pipeline/concurrent/watermark.rs
@@ -0,0 +1,278 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::{
+    cmp::Ordering,
+    collections::{btree_map::Entry, BTreeMap},
+    sync::Arc,
+};
+
+use mysten_metrics::spawn_monitored_task;
+use tokio::{
+    sync::mpsc,
+    task::JoinHandle,
+    time::{interval, MissedTickBehavior},
+};
+use tokio_util::sync::CancellationToken;
+use tracing::{debug, error, info, warn};
+
+use crate::{
+    db::Db,
+    metrics::IndexerMetrics,
+    models::watermarks::CommitterWatermark,
+    pipeline::{
+        PipelineConfig, WatermarkPart, LOUD_WATERMARK_UPDATE_INTERVAL, WARN_PENDING_WATERMARKS,
+    },
+};
+
+use super::Handler;
+
+/// The watermark task is responsible for keeping track of a pipeline's out-of-order commits and
+/// updating its row in the `watermarks` table when a continuous run of checkpoints have landed
+/// since the last watermark update.
+///
+/// It receives watermark "parts" that detail the proportion of each checkpoint's data that has
+/// been written out by the committer and periodically (on a configurable interval) checks if the
+/// watermark for the pipeline can be pushed forward. The watermark can be pushed forward if there
+/// is one or more complete (all data for that checkpoint written out) watermarks spanning
+/// contiguously from the current high watermark into the future.
+///
+/// If it detects that more than [WARN_PENDING_WATERMARKS] watermarks have built up, it will issue
+/// a warning, as this could be the indication of a memory leak, and the caller probably intended
+/// to run the indexer with watermarking disabled (e.g. if they are running a backfill).
+///
+/// The task regularly traces its progress, outputting at a higher log level every
+/// [LOUD_WATERMARK_UPDATE_INTERVAL]-many checkpoints.
+///
+/// The task will shutdown if the `cancel` token is signalled, or if the `rx` channel closes and
+/// the watermark cannot be progressed. If the `config` specifies `skip_watermark`, the task will
+/// shutdown immediately.
+pub(super) fn watermark(
+    initial_watermark: Option>,
+    config: PipelineConfig,
+    mut rx: mpsc::Receiver>,
+    db: Db,
+    metrics: Arc,
+    cancel: CancellationToken,
+) -> JoinHandle<()> {
+    spawn_monitored_task!(async move {
+        if config.skip_watermark {
+            info!(pipeline = H::NAME, "Skipping watermark task");
+            return;
+        }
+
+        let mut poll = interval(config.watermark_interval);
+        poll.set_missed_tick_behavior(MissedTickBehavior::Delay);
+
+        // To correctly update the watermark, the task tracks the watermark it last tried to write
+        // and the watermark parts for any checkpoints that have been written since then
+        // ("pre-committed"). After each batch is written, the task will try to progress the
+        // watermark as much as possible without going over any holes in the sequence of
+        // checkpoints (entirely missing watermarks, or incomplete watermarks).
+        let mut precommitted: BTreeMap = BTreeMap::new();
+        let (mut watermark, mut next_checkpoint) = if let Some(watermark) = initial_watermark {
+            let next = watermark.checkpoint_hi_inclusive + 1;
+            (watermark, next)
+        } else {
+            (CommitterWatermark::initial(H::NAME.into()), 0)
+        };
+
+        // The watermark task will periodically output a log message at a higher log level to
+        // demonstrate that the pipeline is making progress.
+        let mut next_loud_watermark_update =
+            watermark.checkpoint_hi_inclusive + LOUD_WATERMARK_UPDATE_INTERVAL;
+
+        info!(pipeline = H::NAME, ?watermark, "Starting watermark");
+
+        loop {
+            tokio::select! {
+                _ = cancel.cancelled() => {
+                    info!(pipeline = H::NAME, "Shutdown received");
+                    break;
+                }
+
+                _ = poll.tick() => {
+                    if precommitted.len() > WARN_PENDING_WATERMARKS {
+                        warn!(
+                            pipeline = H::NAME,
+                            pending = precommitted.len(),
+                            "Pipeline has a large number of pending watermarks",
+                        );
+                    }
+
+                    let Ok(mut conn) = db.connect().await else {
+                        warn!(pipeline = H::NAME, "Committer failed to get connection for DB");
+                        continue;
+                    };
+
+                    // Check if the pipeline's watermark needs to be updated
+                    let guard = metrics
+                        .watermark_gather_latency
+                        .with_label_values(&[H::NAME])
+                        .start_timer();
+
+                    let mut watermark_needs_update = false;
+                    while let Some(pending) = precommitted.first_entry() {
+                        let part = pending.get();
+
+                        // Some rows from the next watermark have not landed yet.
+                        if !part.is_complete() {
+                            break;
+                        }
+
+                        match next_checkpoint.cmp(&part.watermark.checkpoint_hi_inclusive) {
+                            // Next pending checkpoint is from the future.
+                            Ordering::Less => break,
+
+                            // This is the next checkpoint -- include it.
+                            Ordering::Equal => {
+                                watermark = pending.remove().watermark;
+                                watermark_needs_update = true;
+                                next_checkpoint += 1;
+                            }
+
+                            // Next pending checkpoint is in the past. Out of order watermarks can
+                            // be encountered when a pipeline is starting up, because ingestion
+                            // must start at the lowest checkpoint across all pipelines, or because
+                            // of a backfill, where the initial checkpoint has been overridden.
+                            Ordering::Greater => {
+                                // Track how many we see to make sure it doesn't grow without
+                                // bound.
+                                metrics
+                                    .total_watermarks_out_of_order
+                                    .with_label_values(&[H::NAME])
+                                    .inc();
+
+                                pending.remove();
+                            }
+                        }
+                    }
+
+                    let elapsed = guard.stop_and_record();
+
+                    metrics
+                        .watermark_epoch
+                        .with_label_values(&[H::NAME])
+                        .set(watermark.epoch_hi_inclusive);
+
+                    metrics
+                        .watermark_checkpoint
+                        .with_label_values(&[H::NAME])
+                        .set(watermark.checkpoint_hi_inclusive);
+
+                    metrics
+                        .watermark_transaction
+                        .with_label_values(&[H::NAME])
+                        .set(watermark.tx_hi);
+
+                    metrics
+                        .watermark_timestamp_ms
+                        .with_label_values(&[H::NAME])
+                        .set(watermark.timestamp_ms_hi_inclusive);
+
+                    debug!(
+                        pipeline = H::NAME,
+                        elapsed_ms = elapsed * 1000.0,
+                        watermark = watermark.checkpoint_hi_inclusive,
+                        timestamp = %watermark.timestamp(),
+                        pending = precommitted.len(),
+                        "Gathered watermarks",
+                    );
+
+                    if watermark_needs_update {
+                        let guard = metrics
+                            .watermark_commit_latency
+                            .with_label_values(&[H::NAME])
+                            .start_timer();
+
+                        match watermark.update(&mut conn).await {
+                            // If there's an issue updating the watermark, log it but keep going,
+                            // it's OK for the watermark to lag from a correctness perspective.
+                            Err(e) => {
+                                let elapsed = guard.stop_and_record();
+                                error!(
+                                    pipeline = H::NAME,
+                                    elapsed_ms = elapsed * 1000.0,
+                                    ?watermark,
+                                    "Error updating watermark: {e}",
+                                );
+                            }
+
+                            Ok(updated) => {
+                                let elapsed = guard.stop_and_record();
+
+                                if updated {
+                                    metrics
+                                        .watermark_epoch_in_db
+                                        .with_label_values(&[H::NAME])
+                                        .set(watermark.epoch_hi_inclusive);
+
+                                    metrics
+                                        .watermark_checkpoint_in_db
+                                        .with_label_values(&[H::NAME])
+                                        .set(watermark.checkpoint_hi_inclusive);
+
+                                    metrics
+                                        .watermark_transaction_in_db
+                                        .with_label_values(&[H::NAME])
+                                        .set(watermark.tx_hi);
+
+                                    metrics
+                                        .watermark_timestamp_in_db_ms
+                                        .with_label_values(&[H::NAME])
+                                        .set(watermark.timestamp_ms_hi_inclusive);
+                                }
+
+                                if watermark.checkpoint_hi_inclusive > next_loud_watermark_update {
+                                    next_loud_watermark_update += LOUD_WATERMARK_UPDATE_INTERVAL;
+                                    info!(
+                                        pipeline = H::NAME,
+                                        epoch = watermark.epoch_hi_inclusive,
+                                        checkpoint = watermark.checkpoint_hi_inclusive,
+                                        transaction = watermark.tx_hi,
+                                        timestamp = %watermark.timestamp(),
+                                        updated,
+                                        elapsed_ms = elapsed * 1000.0,
+                                        "Watermark",
+                                    );
+                                } else {
+                                    debug!(
+                                        pipeline = H::NAME,
+                                        epoch = watermark.epoch_hi_inclusive,
+                                        checkpoint = watermark.checkpoint_hi_inclusive,
+                                        transaction = watermark.tx_hi,
+                                        timestamp = %watermark.timestamp(),
+                                        updated,
+                                        elapsed_ms = elapsed * 1000.0,
+                                        "Watermark",
+                                    );
+                                }
+                            }
+                        }
+                    }
+
+                    if rx.is_closed() && rx.is_empty() {
+                        info!(pipeline = H::NAME, "Committer closed channel");
+                        break;
+                    }
+                }
+
+                Some(parts) = rx.recv() => {
+                    for part in parts {
+                        match precommitted.entry(part.checkpoint()) {
+                            Entry::Vacant(entry) => {
+                                entry.insert(part);
+                            }
+
+                            Entry::Occupied(mut entry) => {
+                                entry.get_mut().add(part);
+                            }
+                        }
+                    }
+                }
+            }
+        }
+
+        info!(pipeline = H::NAME, ?watermark, "Stopping watermark task");
+    })
+}
diff --git a/crates/sui-indexer-alt/src/pipeline/mod.rs b/crates/sui-indexer-alt/src/pipeline/mod.rs
new file mode 100644
index 0000000000000..56005e6ba138e
--- /dev/null
+++ b/crates/sui-indexer-alt/src/pipeline/mod.rs
@@ -0,0 +1,152 @@
+// Copyright (c) Mysten Labs, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+use std::time::Duration;
+
+use crate::models::watermarks::CommitterWatermark;
+
+pub use processor::Processor;
+
+pub(crate) mod concurrent;
+mod processor;
+pub(crate) mod sequential;
+
+/// Tracing message for the watermark update will be logged at info level at least this many
+/// checkpoints.
+const LOUD_WATERMARK_UPDATE_INTERVAL: i64 = 5 * 10;
+
+/// Extra buffer added to channels between tasks in a pipeline. There does not need to be a huge
+/// capacity here because tasks already buffer rows to insert internally.
+const PIPELINE_BUFFER: usize = 5;
+
+/// Issue a warning every time the number of pending watermarks exceeds this number. This can
+/// happen if the pipeline was started with its initial checkpoint overridden to be strictly
+/// greater than its current watermark -- in that case, the pipeline will never be able to update
+/// its watermarks.
+///
+/// This may be a legitimate thing to do when backfilling a table, but in that case
+/// `--skip-watermarks` should be used.
+const WARN_PENDING_WATERMARKS: usize = 10000;
+
+#[derive(clap::Args, Debug, Clone)]
+pub struct PipelineConfig {
+    /// Number of concurrent writers per pipeline
+    #[arg(long, default_value_t = 5)]
+    write_concurrency: usize,
+
+    /// The collector will check for pending data at least this often
+    #[arg(
+        long,
+        default_value = "500",
+        value_name = "MILLISECONDS",
+        value_parser = |s: &str| s.parse().map(Duration::from_millis),
+    )]
+    collect_interval: Duration,
+
+    /// Watermark task will check for pending watermarks this often
+    #[arg(
+        long,
+        default_value = "500",
+        value_name = "MILLISECONDS",
+        value_parser = |s: &str| s.parse().map(Duration::from_millis),
+    )]
+    watermark_interval: Duration,
+
+    /// Avoid writing to the watermark table
+    #[arg(long)]
+    skip_watermark: bool,
+}
+
+/// Processed values associated with a single checkpoint. This is an internal type used to
+/// communicate between the processor and the collector parts of the pipeline.
+struct Indexed {
+    /// Values to be inserted into the database from this checkpoint
+    values: Vec,
+    /// The watermark associated with this checkpoint
+    watermark: CommitterWatermark<'static>,
+}
+
+/// A representation of the proportion of a watermark.
+#[derive(Debug)]
+struct WatermarkPart {
+    /// The watermark itself
+    watermark: CommitterWatermark<'static>,
+    /// The number of rows from this watermark that are in this part
+    batch_rows: usize,
+    /// The total number of rows from this watermark
+    total_rows: usize,
+}
+
+/// Internal type used by workers to propagate errors or shutdown signals up to their
+/// supervisor.
+#[derive(thiserror::Error, Debug)]
+enum Break {
+    #[error("Shutdown received")]
+    Cancel,
+
+    #[error(transparent)]
+    Err(#[from] anyhow::Error),
+}
+
+impl Indexed

{ + fn new( + epoch: u64, + cp_sequence_number: u64, + tx_hi: u64, + timestamp_ms: u64, + values: Vec, + ) -> Self { + Self { + watermark: CommitterWatermark { + pipeline: P::NAME.into(), + epoch_hi_inclusive: epoch as i64, + checkpoint_hi_inclusive: cp_sequence_number as i64, + tx_hi: tx_hi as i64, + timestamp_ms_hi_inclusive: timestamp_ms as i64, + }, + values, + } + } + + /// Number of rows from this checkpoint + fn len(&self) -> usize { + self.values.len() + } + + /// The checkpoint sequence number that this data is from + fn checkpoint(&self) -> u64 { + self.watermark.checkpoint_hi_inclusive as u64 + } +} + +impl WatermarkPart { + fn checkpoint(&self) -> u64 { + self.watermark.checkpoint_hi_inclusive as u64 + } + + /// Check if all the rows from this watermark are represented in this part. + fn is_complete(&self) -> bool { + self.batch_rows == self.total_rows + } + + /// Add the rows from `other` to this part. + fn add(&mut self, other: WatermarkPart) { + debug_assert_eq!(self.checkpoint(), other.checkpoint()); + self.batch_rows += other.batch_rows; + } + + /// Record that `rows` have been taken from this part. + fn take(&mut self, rows: usize) -> WatermarkPart { + debug_assert!( + self.batch_rows >= rows, + "Can't take more rows than are available" + ); + + self.batch_rows -= rows; + WatermarkPart { + watermark: self.watermark.clone(), + batch_rows: rows, + total_rows: self.total_rows, + } + } +} diff --git a/crates/sui-indexer-alt/src/pipeline/processor.rs b/crates/sui-indexer-alt/src/pipeline/processor.rs new file mode 100644 index 0000000000000..ce5f91194a536 --- /dev/null +++ b/crates/sui-indexer-alt/src/pipeline/processor.rs @@ -0,0 +1,128 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::sync::Arc; + +use futures::TryStreamExt; +use mysten_metrics::spawn_monitored_task; +use sui_types::full_checkpoint_content::CheckpointData; +use tokio::{sync::mpsc, task::JoinHandle}; +use tokio_stream::{wrappers::ReceiverStream, StreamExt}; +use tokio_util::sync::CancellationToken; +use tracing::{debug, error, info}; + +use crate::{metrics::IndexerMetrics, pipeline::Break}; + +use super::Indexed; + +/// Implementors of this trait are responsible for transforming checkpoint into rows for their +/// table. The `FANOUT` associated value controls how many concurrent workers will be used to +/// process checkpoint information. +pub trait Processor { + /// Used to identify the pipeline in logs and metrics. + const NAME: &'static str; + + /// How much concurrency to use when processing checkpoint data. + const FANOUT: usize = 10; + + /// The type of value being inserted by the handler. + type Value: Send + Sync + 'static; + + /// The processing logic for turning a checkpoint into rows of the table. + fn process(checkpoint: &Arc) -> anyhow::Result>; +} + +/// The processor task is responsible for taking checkpoint data and breaking it down into rows +/// ready to commit. It spins up a supervisor that waits on the `rx` channel for checkpoints, and +/// distributes them among `H::FANOUT` workers. +/// +/// Each worker processes a checkpoint into rows and sends them on to the committer using the `tx` +/// channel. +/// +/// The task will shutdown if the `cancel` token is cancelled, or if any of the workers encounters +/// an error -- there is no retry logic at this level. +pub(super) fn processor( + rx: mpsc::Receiver>, + tx: mpsc::Sender>, + metrics: Arc, + cancel: CancellationToken, +) -> JoinHandle<()> { + spawn_monitored_task!(async move { + info!(pipeline = P::NAME, "Starting processor"); + + match ReceiverStream::new(rx) + .map(Ok) + .try_for_each_concurrent(P::FANOUT, |checkpoint| { + let tx = tx.clone(); + let metrics = metrics.clone(); + let cancel = cancel.clone(); + async move { + if cancel.is_cancelled() { + return Err(Break::Cancel); + } + + metrics + .total_handler_checkpoints_received + .with_label_values(&[P::NAME]) + .inc(); + + let guard = metrics + .handler_checkpoint_latency + .with_label_values(&[P::NAME]) + .start_timer(); + + let values = P::process(&checkpoint)?; + let elapsed = guard.stop_and_record(); + + let epoch = checkpoint.checkpoint_summary.epoch; + let cp_sequence_number = checkpoint.checkpoint_summary.sequence_number; + let tx_hi = checkpoint.checkpoint_summary.network_total_transactions; + let timestamp_ms = checkpoint.checkpoint_summary.timestamp_ms; + + debug!( + pipeline = P::NAME, + checkpoint = cp_sequence_number, + elapsed_ms = elapsed * 1000.0, + "Processed checkpoint", + ); + + metrics + .total_handler_checkpoints_processed + .with_label_values(&[P::NAME]) + .inc(); + + metrics + .total_handler_rows_created + .with_label_values(&[P::NAME]) + .inc_by(values.len() as u64); + + tx.send(Indexed::new( + epoch, + cp_sequence_number, + tx_hi, + timestamp_ms, + values, + )) + .await + .map_err(|_| Break::Cancel)?; + + Ok(()) + } + }) + .await + { + Ok(()) => { + info!(pipeline = P::NAME, "Checkpoints done, stopping processor"); + } + + Err(Break::Cancel) => { + info!(pipeline = P::NAME, "Shutdown received, stopping processor"); + } + + Err(Break::Err(e)) => { + error!(pipeline = P::NAME, "Error from handler: {e}"); + cancel.cancel(); + } + }; + }) +} diff --git a/crates/sui-indexer-alt/src/pipeline/sequential/committer.rs b/crates/sui-indexer-alt/src/pipeline/sequential/committer.rs new file mode 100644 index 0000000000000..7b392099dd793 --- /dev/null +++ b/crates/sui-indexer-alt/src/pipeline/sequential/committer.rs @@ -0,0 +1,408 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::{cmp::Ordering, collections::BTreeMap, sync::Arc}; + +use diesel_async::{scoped_futures::ScopedFutureExt, AsyncConnection}; +use mysten_metrics::spawn_monitored_task; +use tokio::{ + sync::mpsc, + task::JoinHandle, + time::{interval, MissedTickBehavior}, +}; +use tokio_util::sync::CancellationToken; +use tracing::{debug, info, warn}; + +use crate::{ + db::Db, + metrics::IndexerMetrics, + models::watermarks::CommitterWatermark, + pipeline::{Indexed, PipelineConfig, LOUD_WATERMARK_UPDATE_INTERVAL, WARN_PENDING_WATERMARKS}, +}; + +use super::Handler; + +/// The committer task gathers rows into batches and writes them to the database. +/// +/// Data arrives out of order, grouped by checkpoint, on `rx`. The task orders them and waits to +/// write them until either a configural polling interval has passed (controlled by +/// `config.collect_interval`), or `H::BATCH_SIZE` rows have been accumulated and we have received +/// the next expected checkpoint. +/// +/// Writes are performed on checkpoint boundaries (more than one checkpoint can be present in a +/// single write), in a single transaction that includes all row updates and an update to the +/// watermark table. +/// +/// The committer can optionally be configured to lag behind the ingestion service by a fixed +/// number of checkpoints (configured by `checkpoint_lag`). +/// +/// Upon successful write, the task sends its new watermark back to the ingestion service, to +/// unblock its regulator. +/// +/// The task can be shutdown using its `cancel` token or if either of its channels are closed. +pub(super) fn committer( + config: PipelineConfig, + checkpoint_lag: Option, + watermark: Option>, + mut rx: mpsc::Receiver>, + tx: mpsc::UnboundedSender<(&'static str, u64)>, + db: Db, + metrics: Arc, + cancel: CancellationToken, +) -> JoinHandle<()> { + spawn_monitored_task!(async move { + // The `poll` interval controls the maximum time to wait between commits, regardless of the + // amount of data available. + let mut poll = interval(config.collect_interval); + poll.set_missed_tick_behavior(MissedTickBehavior::Delay); + + // Buffer to gather the next batch to write. A checkpoint's data is only added to the batch + // when it is known to come from the next checkpoint after `watermark` (the current tip of + // the batch), and data from previous checkpoints will be discarded to avoid double writes. + // + // The batch may be non-empty at top of a tick of the committer's loop if the previous + // attempt at a write failed. Attempt is incremented every time a batch write fails, and is + // reset when it succeeds. + let mut attempt = 0; + let mut batch = H::Batch::default(); + let mut batch_rows = 0; + let mut batch_checkpoints = 0; + + // The task keeps track of the highest (inclusive) checkpoint it has added to the batch, + // and whether that batch needs to be written out. By extension it also knows the next + // checkpoint to expect and add to the batch. + let mut watermark_needs_update = false; + let (mut watermark, mut next_checkpoint) = if let Some(watermark) = watermark { + let next = watermark.checkpoint_hi_inclusive as u64 + 1; + (watermark, next) + } else { + (CommitterWatermark::initial(H::NAME.into()), 0) + }; + + // The committer task will periodically output a log message at a higher log level to + // demonstrate that the pipeline is making progress. + let mut next_loud_watermark_update = + watermark.checkpoint_hi_inclusive + LOUD_WATERMARK_UPDATE_INTERVAL; + + // Data for checkpoint that haven't been written yet. Note that `pending_rows` includes + // rows in `batch`. + let mut pending: BTreeMap> = BTreeMap::new(); + let mut pending_rows = 0; + + info!(pipeline = H::NAME, ?watermark, "Starting committer"); + + loop { + tokio::select! { + _ = cancel.cancelled() => { + info!(pipeline = H::NAME, "Shutdown received"); + break; + } + + _ = poll.tick() => { + if pending.len() > WARN_PENDING_WATERMARKS { + warn!( + pipeline = H::NAME, + pending = pending.len(), + "Pipeline has a large number of pending watermarks", + ); + } + + let Ok(mut conn) = db.connect().await else { + warn!(pipeline = H::NAME, "Failed to get connection for DB"); + continue; + }; + + // Determine whether we need to hold back checkpoints from being committed + // because of checkpoint lag. + // + // TODO(amnn): Test this (depends on migrations and tempdb) + let commit_hi_inclusive = match (checkpoint_lag, pending.last_key_value()) { + (Some(lag), None) => { + debug!(pipeline = H::NAME, lag, "No pending checkpoints"); + if rx.is_closed() && rx.is_empty() { + info!(pipeline = H::NAME, "Processor closed channel before priming"); + break; + } else { + continue; + } + } + + (Some(lag), Some((pending_hi, _))) if *pending_hi < lag => { + debug!(pipeline = H::NAME, lag, pending_hi, "Priming pipeline"); + if rx.is_closed() && rx.is_empty() { + info!(pipeline = H::NAME, "Processor closed channel while priming"); + break; + } else { + continue; + } + } + + (Some(lag), Some((pending_hi, _))) => Some(*pending_hi - lag), + (None, _) => None, + }; + + let guard = metrics + .collector_gather_latency + .with_label_values(&[H::NAME]) + .start_timer(); + + // Push data into the next batch as long as it's from contiguous checkpoints, + // outside of the checkpoint lag and we haven't gathered information from too + // many checkpoints already. + // + // We don't worry about overall size because the handler may have optimized + // writes by combining rows, but we will limit the number of checkpoints we try + // and batch together as a way to impose some limit on the size of the batch + // (and therefore the length of the write transaction). + while batch_checkpoints < H::MAX_BATCH_CHECKPOINTS { + let Some(entry) = pending.first_entry() else { + break; + }; + + if matches!(commit_hi_inclusive, Some(hi) if hi < *entry.key()) { + break; + } + + match next_checkpoint.cmp(entry.key()) { + // Next pending checkpoint is from the future. + Ordering::Less => break, + + // This is the next checkpoint -- include it. + Ordering::Equal => { + let indexed = entry.remove(); + batch_rows += indexed.len(); + batch_checkpoints += 1; + H::batch(&mut batch, indexed.values); + watermark = indexed.watermark; + watermark_needs_update = true; + next_checkpoint += 1; + } + + // Next pending checkpoint is in the past, ignore it to avoid double + // writes. + Ordering::Greater => { + metrics + .total_watermarks_out_of_order + .with_label_values(&[H::NAME]) + .inc(); + let indexed = entry.remove(); + pending_rows -= indexed.len(); + continue; + } + } + } + + let elapsed = guard.stop_and_record(); + debug!( + pipeline = H::NAME, + elapsed_ms = elapsed * 1000.0, + rows = batch_rows, + pending = pending_rows, + "Gathered batch", + ); + + metrics + .collector_batch_size + .with_label_values(&[H::NAME]) + .observe(batch_rows as f64); + + metrics + .total_committer_batches_attempted + .with_label_values(&[H::NAME]) + .inc(); + + metrics + .watermark_epoch + .with_label_values(&[H::NAME]) + .set(watermark.epoch_hi_inclusive); + + metrics + .watermark_checkpoint + .with_label_values(&[H::NAME]) + .set(watermark.checkpoint_hi_inclusive); + + metrics + .watermark_transaction + .with_label_values(&[H::NAME]) + .set(watermark.tx_hi); + + metrics + .watermark_timestamp_ms + .with_label_values(&[H::NAME]) + .set(watermark.timestamp_ms_hi_inclusive); + + let guard = metrics + .committer_commit_latency + .with_label_values(&[H::NAME]) + .start_timer(); + + // Write all the object updates out along with the watermark update, in a + // single transaction. The handler's `commit` implementation is responsible for + // chunking up the writes into a manageable size. + let affected = conn.transaction::<_, anyhow::Error, _>(|conn| async { + watermark.update(conn).await?; + H::commit(&batch, conn).await + }.scope_boxed()).await; + + // Drop the connection eagerly to avoid it holding on to references borrowed by + // the transaction closure. + drop(conn); + + let elapsed = guard.stop_and_record(); + + let affected = match affected { + Ok(affected) => affected, + + Err(e) => { + warn!( + pipeline = H::NAME, + elapsed_ms = elapsed * 1000.0, + attempt, + committed = batch_rows, + pending = pending_rows, + "Error writing batch: {e}", + ); + + attempt += 1; + continue; + } + }; + + debug!( + pipeline = H::NAME, + elapsed_ms = elapsed * 1000.0, + attempt, + affected, + committed = batch_rows, + pending = pending_rows, + "Wrote batch", + ); + + metrics + .total_committer_batches_succeeded + .with_label_values(&[H::NAME]) + .inc(); + + metrics + .total_committer_rows_committed + .with_label_values(&[H::NAME]) + .inc_by(batch_rows as u64); + + metrics + .total_committer_rows_affected + .with_label_values(&[H::NAME]) + .inc_by(affected as u64); + + metrics + .watermark_epoch_in_db + .with_label_values(&[H::NAME]) + .set(watermark.epoch_hi_inclusive); + + metrics + .watermark_checkpoint_in_db + .with_label_values(&[H::NAME]) + .set(watermark.checkpoint_hi_inclusive); + + metrics + .watermark_transaction_in_db + .with_label_values(&[H::NAME]) + .set(watermark.tx_hi); + + metrics + .watermark_timestamp_in_db_ms + .with_label_values(&[H::NAME]) + .set(watermark.timestamp_ms_hi_inclusive); + + if watermark.checkpoint_hi_inclusive > next_loud_watermark_update { + next_loud_watermark_update += LOUD_WATERMARK_UPDATE_INTERVAL; + info!( + pipeline = H::NAME, + epoch = watermark.epoch_hi_inclusive, + checkpoint = watermark.checkpoint_hi_inclusive, + transaction = watermark.tx_hi, + timestamp = %watermark.timestamp(), + "Watermark", + ); + } else { + debug!( + pipeline = H::NAME, + epoch = watermark.epoch_hi_inclusive, + checkpoint = watermark.checkpoint_hi_inclusive, + transaction = watermark.tx_hi, + timestamp = %watermark.timestamp(), + "Watermark", + ); + } + + if watermark_needs_update { + // Ignore the result -- the ingestion service will close this channel + // once it is done, but there may still be checkpoints buffered that need + // processing. + let _ = tx.send((H::NAME, watermark.checkpoint_hi_inclusive as u64)); + } + + let _ = std::mem::take(&mut batch); + watermark_needs_update = false; + pending_rows -= batch_rows; + batch_checkpoints = 0; + batch_rows = 0; + attempt = 0; + + // If there is a pending checkpoint, no greater than the expected next + // checkpoint, and less than or equal to the inclusive upperbound due to + // checkpoint lag, then the pipeline can do more work immediately (without + // waiting). + // + // Otherwise, if its channels have been closed, we know that it is guaranteed + // not to make any more progress, and we can stop the task. + if pending + .first_key_value() + .is_some_and(|(next, _)| { + *next <= next_checkpoint && commit_hi_inclusive.map_or(true, |hi| *next <= hi) + }) + { + poll.reset_immediately(); + } else if rx.is_closed() && rx.is_empty() { + info!(pipeline = H::NAME, "Processor closed channel, pending rows empty"); + break; + } + } + + Some(indexed) = rx.recv() => { + pending_rows += indexed.len(); + pending.insert(indexed.checkpoint(), indexed); + + // Once data has been inserted, check if we need to schedule a write before the + // next polling interval. This is appropriate if there are a minimum number of + // rows to write, and they are already in the batch, or we can process the next + // checkpoint to extract them. + + if pending_rows < H::MIN_EAGER_ROWS { + continue; + } + + if batch_rows > 0 { + poll.reset_immediately(); + continue; + } + + let Some((next, _)) = pending.first_key_value() else { + continue; + }; + + match (checkpoint_lag, pending.last_key_value()) { + (Some(_), None) => continue, + (Some(lag), Some((last, _))) if last.saturating_sub(lag) <= *next => { + continue; + } + _ => if *next <= next_checkpoint { + poll.reset_immediately(); + } + } + } + } + } + + info!(pipeline = H::NAME, ?watermark, "Stopping committer"); + }) +} diff --git a/crates/sui-indexer-alt/src/pipeline/sequential/mod.rs b/crates/sui-indexer-alt/src/pipeline/sequential/mod.rs new file mode 100644 index 0000000000000..4ad82276a315e --- /dev/null +++ b/crates/sui-indexer-alt/src/pipeline/sequential/mod.rs @@ -0,0 +1,114 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::sync::Arc; + +use sui_types::full_checkpoint_content::CheckpointData; +use tokio::{sync::mpsc, task::JoinHandle}; +use tokio_util::sync::CancellationToken; + +use crate::{ + db::{self, Db}, + metrics::IndexerMetrics, + models::watermarks::CommitterWatermark, +}; + +use super::{processor::processor, PipelineConfig, Processor, PIPELINE_BUFFER}; + +use self::committer::committer; + +mod committer; + +/// Handlers implement the logic for a given indexing pipeline: How to process checkpoint data (by +/// implementing [Processor]) into rows for their table, how to combine multiple rows into a single +/// DB operation, and then how to write those rows atomically to the database. +/// +/// The handler is also responsible for tuning the various parameters of the pipeline (provided as +/// associated values). +/// +/// Sequential handlers can only be used in sequential pipelines, where checkpoint data is +/// processed out-of-order, but then gathered and written in order. If multiple checkpoints are +/// available, the pipeline will attempt to combine their writes taking advantage of batching to +/// avoid emitting redundant writes. +/// +/// Back-pressure is handled by setting a high watermark on the ingestion service: The pipeline +/// notifies the ingestion service of the checkpoint it last successfully wrote to the database +/// for, and in turn the ingestion service will only run ahead by its buffer size. This guarantees +/// liveness and limits the amount of memory the pipeline can consume, by bounding the number of +/// checkpoints that can be received before the next checkpoint. +#[async_trait::async_trait] +pub trait Handler: Processor { + /// If at least this many rows are pending, the committer will commit them eagerly. + const MIN_EAGER_ROWS: usize = 50; + + /// Maximum number of checkpoints to try and write in a single batch. The larger this number + /// is, the more chances the pipeline has to merge redundant writes, but the longer each write + /// transaction is likely to be. + const MAX_BATCH_CHECKPOINTS: usize = 5 * 60; + + /// A type to combine multiple `Self::Value`-s into. This can be used to avoid redundant writes + /// by combining multiple rows into one (e.g. if one row supersedes another, the latter can be + /// omitted). + type Batch: Default + Send + Sync + 'static; + + /// Add `values` from processing a checkpoint to the current `batch`. Checkpoints are + /// guaranteed to be presented to the batch in checkpoint order. + fn batch(batch: &mut Self::Batch, values: Vec); + + /// Take a batch of values and commit them to the database, returning the number of rows + /// affected. + async fn commit(batch: &Self::Batch, conn: &mut db::Connection<'_>) -> anyhow::Result; +} + +/// Start a new sequential (in-order) indexing pipeline, served by the handler, `H`. Starting +/// strictly after the `watermark` (or from the beginning if no watermark was provided). +/// +/// Each pipeline consists of a processor which takes checkpoint data and breaks it down into rows, +/// ready for insertion, and a committer which orders the rows and combines them into batches to +/// write to the database. +/// +/// Commits are performed in checkpoint order, potentially involving multiple checkpoints at a +/// time. The call to [Handler::commit] and the associated watermark update are performed in a +/// transaction to ensure atomicity. Unlike in the case of concurrent pipelines, the data passed to +/// [Handler::commit] is not chunked up, so the handler must perform this step itself, if +/// necessary. +/// +/// The pipeline can optionally be configured to lag behind the ingestion service by a fixed number +/// of checkpoints (configured by `checkpoint_lag`). +/// +/// Watermarks are also shared with the ingestion service, which is guaranteed to bound the +/// checkpoint height it pre-fetches to some constant additive factor above the pipeline's +/// watermark. +/// +/// Checkpoint data is fed into the pipeline through the `checkpoint_rx` channel, watermark updates +/// are communicated to the ingestion service through the `watermark_tx` channel and internal +/// channels are created to communicate between its various components. The pipeline can be +/// shutdown using its `cancel` token, and will also shutdown if any of its input or output +/// channels close, or any of its independent tasks fail. +pub(crate) fn pipeline( + initial_watermark: Option>, + config: PipelineConfig, + checkpoint_lag: Option, + db: Db, + checkpoint_rx: mpsc::Receiver>, + watermark_tx: mpsc::UnboundedSender<(&'static str, u64)>, + metrics: Arc, + cancel: CancellationToken, +) -> (JoinHandle<()>, JoinHandle<()>) { + let (processor_tx, committer_rx) = mpsc::channel(H::FANOUT + PIPELINE_BUFFER); + + let processor = processor::(checkpoint_rx, processor_tx, metrics.clone(), cancel.clone()); + + let committer = committer::( + config.clone(), + checkpoint_lag, + initial_watermark, + committer_rx, + watermark_tx, + db.clone(), + metrics.clone(), + cancel.clone(), + ); + + (processor, committer) +} diff --git a/crates/sui-indexer-alt/src/schema.rs b/crates/sui-indexer-alt/src/schema.rs new file mode 100644 index 0000000000000..7b492f46d9753 --- /dev/null +++ b/crates/sui-indexer-alt/src/schema.rs @@ -0,0 +1,152 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +// @generated automatically by Diesel CLI. + +diesel::table! { + ev_emit_mod (package, module, tx_sequence_number) { + package -> Bytea, + module -> Text, + tx_sequence_number -> Int8, + sender -> Bytea, + } +} + +diesel::table! { + ev_struct_inst (package, module, name, instantiation, tx_sequence_number) { + package -> Bytea, + module -> Text, + name -> Text, + instantiation -> Bytea, + tx_sequence_number -> Int8, + sender -> Bytea, + } +} + +diesel::table! { + kv_checkpoints (sequence_number) { + sequence_number -> Int8, + certified_checkpoint -> Bytea, + checkpoint_contents -> Bytea, + } +} + +diesel::table! { + kv_objects (object_id, object_version) { + object_id -> Bytea, + object_version -> Int8, + serialized_object -> Nullable, + } +} + +diesel::table! { + kv_transactions (tx_digest) { + tx_digest -> Bytea, + cp_sequence_number -> Int8, + timestamp_ms -> Int8, + raw_transaction -> Bytea, + raw_effects -> Bytea, + events -> Bytea, + } +} + +diesel::table! { + obj_versions (object_id, object_version) { + object_id -> Bytea, + object_version -> Int8, + object_digest -> Bytea, + cp_sequence_number -> Int8, + } +} + +diesel::table! { + sum_coin_balances (object_id) { + object_id -> Bytea, + object_version -> Int8, + owner_id -> Bytea, + coin_type -> Bytea, + coin_balance -> Int8, + } +} + +diesel::table! { + sum_obj_types (object_id) { + object_id -> Bytea, + object_version -> Int8, + owner_kind -> Int2, + owner_id -> Nullable, + package -> Nullable, + module -> Nullable, + name -> Nullable, + instantiation -> Nullable, + } +} + +diesel::table! { + tx_affected_objects (affected, tx_sequence_number) { + tx_sequence_number -> Int8, + affected -> Bytea, + sender -> Bytea, + } +} + +diesel::table! { + tx_balance_changes (tx_sequence_number) { + tx_sequence_number -> Int8, + balance_changes -> Bytea, + } +} + +diesel::table! { + wal_coin_balances (object_id, object_version) { + object_id -> Bytea, + object_version -> Int8, + owner_id -> Nullable, + coin_type -> Nullable, + coin_balance -> Nullable, + cp_sequence_number -> Int8, + } +} + +diesel::table! { + wal_obj_types (object_id, object_version) { + object_id -> Bytea, + object_version -> Int8, + owner_kind -> Nullable, + owner_id -> Nullable, + package -> Nullable, + module -> Nullable, + name -> Nullable, + instantiation -> Nullable, + cp_sequence_number -> Int8, + } +} + +diesel::table! { + watermarks (pipeline) { + pipeline -> Text, + epoch_hi_inclusive -> Int8, + checkpoint_hi_inclusive -> Int8, + tx_hi -> Int8, + timestamp_ms_hi_inclusive -> Int8, + epoch_lo -> Int8, + reader_lo -> Int8, + pruner_timestamp_ms -> Int8, + pruner_hi -> Int8, + } +} + +diesel::allow_tables_to_appear_in_same_query!( + ev_emit_mod, + ev_struct_inst, + kv_checkpoints, + kv_objects, + kv_transactions, + obj_versions, + sum_coin_balances, + sum_obj_types, + tx_affected_objects, + tx_balance_changes, + wal_coin_balances, + wal_obj_types, + watermarks, +); diff --git a/crates/sui-indexer-alt/src/task.rs b/crates/sui-indexer-alt/src/task.rs new file mode 100644 index 0000000000000..d027541a78310 --- /dev/null +++ b/crates/sui-indexer-alt/src/task.rs @@ -0,0 +1,43 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::iter; + +use futures::future::{self, Either}; +use tokio::{signal, sync::oneshot, task::JoinHandle}; +use tokio_util::sync::CancellationToken; + +/// Manages cleanly exiting the process, either because one of its constituent services has stopped +/// or because an interrupt signal was sent to the process. +pub async fn graceful_shutdown( + services: impl IntoIterator>, + cancel: CancellationToken, +) { + // If the service is naturalling winding down, we don't need to wait for an interrupt signal. + // This channel is used to short-circuit the await in that case. + let (cancel_ctrl_c_tx, cancel_ctrl_c_rx) = oneshot::channel(); + + let interrupt = async { + tokio::select! { + _ = cancel_ctrl_c_rx => {} + _ = cancel.cancelled() => {} + _ = signal::ctrl_c() => cancel.cancel(), + } + + Ok(()) + }; + + tokio::pin!(interrupt); + let futures: Vec<_> = services + .into_iter() + .map(Either::Left) + .chain(iter::once(Either::Right(interrupt))) + .collect(); + + // Wait for the first service to finish, or for an interrupt signal. + let (_, _, rest) = future::select_all(futures).await; + let _ = cancel_ctrl_c_tx.send(()); + + // Wait for the remaining services to finish. + let _ = future::join_all(rest).await; +} diff --git a/crates/sui-indexer-builder/src/indexer_builder.rs b/crates/sui-indexer-builder/src/indexer_builder.rs index b89ff975ae562..30f96ed2bb15c 100644 --- a/crates/sui-indexer-builder/src/indexer_builder.rs +++ b/crates/sui-indexer-builder/src/indexer_builder.rs @@ -310,6 +310,11 @@ impl Indexer { { &self.storage } + + #[cfg(any(feature = "test-utils", test))] + pub fn test_only_name(&self) -> String { + self.name.clone() + } } #[async_trait] diff --git a/crates/sui-indexer/Cargo.toml b/crates/sui-indexer/Cargo.toml index fa4490741b163..0b22f81ff2bba 100644 --- a/crates/sui-indexer/Cargo.toml +++ b/crates/sui-indexer/Cargo.toml @@ -8,7 +8,7 @@ edition = "2021" [dependencies] anyhow.workspace = true -rand = "0.8.5" +rand = "0.8.5" async-trait.workspace = true axum.workspace = true backoff.workspace = true @@ -64,6 +64,7 @@ sui-protocol-config.workspace = true telemetry-subscribers.workspace = true sui-rest-api.workspace = true sui-transaction-builder.workspace = true +sui-synthetic-ingestion.workspace = true move-core-types.workspace = true move-bytecode-utils.workspace = true @@ -77,6 +78,7 @@ dashmap.workspace = true [dev-dependencies] sui-keys.workspace = true sui-move-build.workspace = true +sui-swarm-config.workspace = true sui-test-transaction-builder.workspace = true test-cluster.workspace = true ntest.workspace = true diff --git a/crates/sui-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/down.sql b/crates/sui-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/down.sql new file mode 100644 index 0000000000000..807c01dca462d --- /dev/null +++ b/crates/sui-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/down.sql @@ -0,0 +1,6 @@ +CREATE INDEX IF NOT EXISTS objects_history_owner ON objects_history (checkpoint_sequence_number, owner_type, owner_id) WHERE owner_type BETWEEN 1 AND 2 AND owner_id IS NOT NULL; +CREATE INDEX IF NOT EXISTS objects_history_coin_owner ON objects_history (checkpoint_sequence_number, owner_id, coin_type, object_id) WHERE coin_type IS NOT NULL AND owner_type = 1; +CREATE INDEX IF NOT EXISTS objects_history_coin_only ON objects_history (checkpoint_sequence_number, coin_type, object_id) WHERE coin_type IS NOT NULL; +CREATE INDEX IF NOT EXISTS objects_history_type ON objects_history (checkpoint_sequence_number, object_type); +CREATE INDEX IF NOT EXISTS objects_history_package_module_name_full_type ON objects_history (checkpoint_sequence_number, object_type_package, object_type_module, object_type_name, object_type); +CREATE INDEX IF NOT EXISTS objects_history_owner_package_module_name_full_type ON objects_history (checkpoint_sequence_number, owner_id, object_type_package, object_type_module, object_type_name, object_type); diff --git a/crates/sui-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/up.sql b/crates/sui-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/up.sql new file mode 100644 index 0000000000000..754e719819f1e --- /dev/null +++ b/crates/sui-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/up.sql @@ -0,0 +1,6 @@ +DROP INDEX IF EXISTS objects_history_owner; +DROP INDEX IF EXISTS objects_history_coin_owner; +DROP INDEX IF EXISTS objects_history_coin_only; +DROP INDEX IF EXISTS objects_history_type; +DROP INDEX IF EXISTS objects_history_package_module_name_full_type; +DROP INDEX IF EXISTS objects_history_owner_package_module_name_full_type; diff --git a/crates/sui-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/down.sql b/crates/sui-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/down.sql new file mode 100644 index 0000000000000..b9fcef3e1f439 --- /dev/null +++ b/crates/sui-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/down.sql @@ -0,0 +1,18 @@ +CREATE TABLE tx_senders ( + tx_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(sender, tx_sequence_number) +); + +CREATE INDEX IF NOT EXISTS tx_senders_tx_sequence_number + ON tx_senders (tx_sequence_number); + +CREATE TABLE tx_recipients ( + tx_sequence_number BIGINT NOT NULL, + recipient BYTEA NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(recipient, tx_sequence_number) +); + +CREATE INDEX IF NOT EXISTS tx_recipients_sender + ON tx_recipients (sender, recipient, tx_sequence_number); diff --git a/crates/sui-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/up.sql b/crates/sui-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/up.sql new file mode 100644 index 0000000000000..fb259ea615d84 --- /dev/null +++ b/crates/sui-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/up.sql @@ -0,0 +1,2 @@ +DROP TABLE IF EXISTS tx_senders; +DROP TABLE IF EXISTS tx_recipients; diff --git a/crates/sui-indexer/src/apis/read_api.rs b/crates/sui-indexer/src/apis/read_api.rs index 78b8715e16ce7..3e3de5343869d 100644 --- a/crates/sui-indexer/src/apis/read_api.rs +++ b/crates/sui-indexer/src/apis/read_api.rs @@ -87,7 +87,11 @@ impl ReadApiServer for ReadApi { object_read_to_object_response(&self.inner, object_read, options.clone()).await }); - futures::future::try_join_all(futures).await + let mut objects = futures::future::try_join_all(futures).await?; + // Resort the objects by the order of the object id. + objects.sort_by_key(|obj| obj.data.as_ref().map(|data| data.object_id)); + + Ok(objects) } async fn get_total_transaction_blocks(&self) -> RpcResult> { diff --git a/crates/sui-indexer/src/backfill/backfill_instances/ingestion_backfills/digest_task.rs b/crates/sui-indexer/src/backfill/backfill_instances/ingestion_backfills/digest_task.rs new file mode 100644 index 0000000000000..8273bcdaa3b7b --- /dev/null +++ b/crates/sui-indexer/src/backfill/backfill_instances/ingestion_backfills/digest_task.rs @@ -0,0 +1,26 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::backfill::backfill_instances::ingestion_backfills::IngestionBackfillTrait; +use crate::database::ConnectionPool; +use sui_types::full_checkpoint_content::CheckpointData; +use tracing::info; + +/// Dummy backfill that only prints the sequence number and checkpoint of the digest. Intended to +/// benchmark backfill performance. +pub struct DigestBackfill; + +#[async_trait::async_trait] +impl IngestionBackfillTrait for DigestBackfill { + type ProcessedType = (); + + fn process_checkpoint(checkpoint: &CheckpointData) -> Vec { + let cp = checkpoint.checkpoint_summary.sequence_number; + let digest = checkpoint.checkpoint_summary.content_digest; + info!("{cp}: {digest}"); + + vec![] + } + + async fn commit_chunk(_pool: ConnectionPool, _processed_data: Vec) {} +} diff --git a/crates/sui-indexer/src/backfill/backfill_instances/ingestion_backfills/mod.rs b/crates/sui-indexer/src/backfill/backfill_instances/ingestion_backfills/mod.rs index 17bbc29d7dc5c..935ba5562bd9c 100644 --- a/crates/sui-indexer/src/backfill/backfill_instances/ingestion_backfills/mod.rs +++ b/crates/sui-indexer/src/backfill/backfill_instances/ingestion_backfills/mod.rs @@ -1,6 +1,7 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +pub(crate) mod digest_task; pub(crate) mod ingestion_backfill_task; pub(crate) mod raw_checkpoints; pub(crate) mod tx_affected_objects; diff --git a/crates/sui-indexer/src/backfill/backfill_instances/mod.rs b/crates/sui-indexer/src/backfill/backfill_instances/mod.rs index 27c96dd6c9234..304ed4e715e1d 100644 --- a/crates/sui-indexer/src/backfill/backfill_instances/mod.rs +++ b/crates/sui-indexer/src/backfill/backfill_instances/mod.rs @@ -1,6 +1,7 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use crate::backfill::backfill_instances::ingestion_backfills::digest_task::DigestBackfill; use crate::backfill::backfill_instances::ingestion_backfills::ingestion_backfill_task::IngestionBackfillTask; use crate::backfill::backfill_instances::ingestion_backfills::raw_checkpoints::RawCheckpointsBackFill; use crate::backfill::backfill_instances::ingestion_backfills::tx_affected_objects::TxAffectedObjectsBackfill; @@ -28,6 +29,13 @@ pub async fn get_backfill_task( kind, remote_store_url, } => match kind { + IngestionBackfillKind::Digest => Arc::new( + IngestionBackfillTask::::new( + remote_store_url, + range_start as CheckpointSequenceNumber, + ) + .await, + ), IngestionBackfillKind::RawCheckpoints => Arc::new( IngestionBackfillTask::::new( remote_store_url, diff --git a/crates/sui-indexer/src/backfill/mod.rs b/crates/sui-indexer/src/backfill/mod.rs index 453d11baeeed2..e17ba40628ef1 100644 --- a/crates/sui-indexer/src/backfill/mod.rs +++ b/crates/sui-indexer/src/backfill/mod.rs @@ -29,6 +29,7 @@ pub enum BackfillTaskKind { #[derive(ValueEnum, Clone, Debug)] pub enum IngestionBackfillKind { + Digest, RawCheckpoints, TxAffectedObjects, } diff --git a/crates/sui-indexer/src/benchmark.rs b/crates/sui-indexer/src/benchmark.rs new file mode 100644 index 0000000000000..96df25cba9fa6 --- /dev/null +++ b/crates/sui-indexer/src/benchmark.rs @@ -0,0 +1,130 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::config::{BenchmarkConfig, IngestionConfig, IngestionSources, UploadOptions}; +use crate::database::ConnectionPool; +use crate::db::{reset_database, run_migrations}; +use crate::errors::IndexerError; +use crate::indexer::Indexer; +use crate::metrics::IndexerMetrics; +use crate::store::PgIndexerStore; +use std::path::PathBuf; +use sui_synthetic_ingestion::benchmark::{run_benchmark, BenchmarkableIndexer}; +use sui_synthetic_ingestion::{IndexerProgress, SyntheticIngestionConfig}; +use tokio::sync::watch; +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; + +pub async fn run_indexer_benchmark( + config: BenchmarkConfig, + pool: ConnectionPool, + metrics: IndexerMetrics, +) { + if config.reset_db { + reset_database(pool.dedicated_connection().await.unwrap()) + .await + .unwrap(); + } else { + run_migrations(pool.dedicated_connection().await.unwrap()) + .await + .unwrap(); + } + let store = PgIndexerStore::new(pool, UploadOptions::default(), metrics.clone()); + let ingestion_dir = config + .workload_dir + .clone() + .unwrap_or_else(|| tempfile::tempdir().unwrap().into_path()); + // If we are using a non-temp directory, we should not delete the ingestion directory. + let gc_checkpoint_files = config.workload_dir.is_none(); + let synthetic_ingestion_config = SyntheticIngestionConfig { + ingestion_dir: ingestion_dir.clone(), + checkpoint_size: config.checkpoint_size, + num_checkpoints: config.num_checkpoints, + starting_checkpoint: config.starting_checkpoint, + }; + let indexer = BenchmarkIndexer::new(store, metrics, ingestion_dir, gc_checkpoint_files); + run_benchmark(synthetic_ingestion_config, indexer).await; +} + +pub struct BenchmarkIndexer { + inner: Option, + cancel: CancellationToken, + committed_checkpoints_rx: watch::Receiver>, + handle: Option>>, +} + +struct BenchmarkIndexerInner { + ingestion_dir: PathBuf, + gc_checkpoint_files: bool, + store: PgIndexerStore, + metrics: IndexerMetrics, + committed_checkpoints_tx: watch::Sender>, +} + +impl BenchmarkIndexer { + pub fn new( + store: PgIndexerStore, + metrics: IndexerMetrics, + ingestion_dir: PathBuf, + gc_checkpoint_files: bool, + ) -> Self { + let cancel = CancellationToken::new(); + let (committed_checkpoints_tx, committed_checkpoints_rx) = watch::channel(None); + Self { + inner: Some(BenchmarkIndexerInner { + ingestion_dir, + gc_checkpoint_files, + store, + metrics, + committed_checkpoints_tx, + }), + cancel, + committed_checkpoints_rx, + handle: None, + } + } +} + +#[async_trait::async_trait] +impl BenchmarkableIndexer for BenchmarkIndexer { + fn subscribe_to_committed_checkpoints(&self) -> watch::Receiver> { + self.committed_checkpoints_rx.clone() + } + + async fn start(&mut self) { + let BenchmarkIndexerInner { + ingestion_dir, + gc_checkpoint_files, + store, + metrics, + committed_checkpoints_tx, + } = self.inner.take().unwrap(); + let ingestion_config = IngestionConfig { + sources: IngestionSources { + data_ingestion_path: Some(ingestion_dir), + ..Default::default() + }, + gc_checkpoint_files, + ..Default::default() + }; + let cancel = self.cancel.clone(); + let handle = tokio::task::spawn(async move { + Indexer::start_writer( + ingestion_config, + store, + metrics, + Default::default(), + None, + cancel, + Some(committed_checkpoints_tx), + ) + .await + }); + self.handle = Some(handle); + } + + async fn stop(mut self) { + self.cancel.cancel(); + self.handle.unwrap().await.unwrap().unwrap(); + } +} diff --git a/crates/sui-indexer/src/config.rs b/crates/sui-indexer/src/config.rs index f51d18ab1ff88..6db349aa64747 100644 --- a/crates/sui-indexer/src/config.rs +++ b/crates/sui-indexer/src/config.rs @@ -114,6 +114,16 @@ pub struct IngestionConfig { )] pub checkpoint_download_queue_size: usize, + /// Start checkpoint to ingest from, this is optional and if not provided, the ingestion will + /// start from the next checkpoint after the latest committed checkpoint. + #[arg(long, env = "START_CHECKPOINT")] + pub start_checkpoint: Option, + + /// End checkpoint to ingest until, this is optional and if not provided, the ingestion will + /// continue until u64::MAX. + #[arg(long, env = "END_CHECKPOINT")] + pub end_checkpoint: Option, + #[arg( long, default_value_t = Self::DEFAULT_CHECKPOINT_DOWNLOAD_TIMEOUT, @@ -129,6 +139,11 @@ pub struct IngestionConfig { env = "CHECKPOINT_PROCESSING_BATCH_DATA_LIMIT", )] pub checkpoint_download_queue_size_bytes: usize, + + /// Whether to delete processed checkpoint files from the local directory, + /// when running Fullnode-colocated indexer. + #[arg(long, default_value_t = true)] + pub gc_checkpoint_files: bool, } impl IngestionConfig { @@ -141,10 +156,13 @@ impl Default for IngestionConfig { fn default() -> Self { Self { sources: Default::default(), + start_checkpoint: None, + end_checkpoint: None, checkpoint_download_queue_size: Self::DEFAULT_CHECKPOINT_DOWNLOAD_QUEUE_SIZE, checkpoint_download_timeout: Self::DEFAULT_CHECKPOINT_DOWNLOAD_TIMEOUT, checkpoint_download_queue_size_bytes: Self::DEFAULT_CHECKPOINT_DOWNLOAD_QUEUE_SIZE_BYTES, + gc_checkpoint_files: true, } } } @@ -186,6 +204,10 @@ pub enum Command { ResetDatabase { #[clap(long)] force: bool, + /// If true, only drop all tables but do not run the migrations. + /// That is, no tables will exist in the DB after the reset. + #[clap(long, default_value_t = false)] + skip_migrations: bool, }, /// Run through the migration scripts. RunMigrations, @@ -210,6 +232,7 @@ pub enum Command { }, /// Restore the database from formal snaphots. Restore(RestoreConfig), + Benchmark(BenchmarkConfig), } #[derive(Args, Default, Debug, Clone)] @@ -378,6 +401,41 @@ impl Default for RestoreConfig { } } +#[derive(Args, Debug, Clone)] +pub struct BenchmarkConfig { + #[arg( + long, + default_value_t = 200, + help = "Number of transactions in a checkpoint." + )] + pub checkpoint_size: u64, + #[arg( + long, + default_value_t = 2000, + help = "Total number of synthetic checkpoints to generate." + )] + pub num_checkpoints: u64, + #[arg( + long, + default_value_t = 1, + help = "Customize the first checkpoint sequence number to be committed, must be non-zero." + )] + pub starting_checkpoint: u64, + #[arg( + long, + default_value_t = false, + help = "Whether to reset the database before running." + )] + pub reset_db: bool, + #[arg( + long, + help = "Path to workload directory. If not provided, a temporary directory will be created.\ + If provided, synthetic workload generator will either load data from it if it exists or generate new data.\ + This avoids repeat generation of the same data." + )] + pub workload_dir: Option, +} + #[cfg(test)] mod test { use super::*; diff --git a/crates/sui-indexer/src/db.rs b/crates/sui-indexer/src/db.rs index 9937b61ce2655..4a2893603bb10 100644 --- a/crates/sui-indexer/src/db.rs +++ b/crates/sui-indexer/src/db.rs @@ -196,7 +196,14 @@ pub mod setup_postgres { pub async fn reset_database(mut conn: Connection<'static>) -> Result<(), anyhow::Error> { info!("Resetting PG database ..."); + clear_database(&mut conn).await?; + run_migrations(conn).await?; + info!("Reset database complete."); + Ok(()) + } + pub async fn clear_database(conn: &mut Connection<'static>) -> Result<(), anyhow::Error> { + info!("Clearing the database..."); let drop_all_tables = " DO $$ DECLARE r RECORD; @@ -206,9 +213,7 @@ pub mod setup_postgres { EXECUTE 'DROP TABLE IF EXISTS ' || quote_ident(r.tablename) || ' CASCADE'; END LOOP; END $$;"; - diesel::sql_query(drop_all_tables) - .execute(&mut conn) - .await?; + diesel::sql_query(drop_all_tables).execute(conn).await?; info!("Dropped all tables."); let drop_all_procedures = " @@ -222,9 +227,7 @@ pub mod setup_postgres { EXECUTE 'DROP PROCEDURE IF EXISTS ' || quote_ident(r.proname) || '(' || r.argtypes || ') CASCADE'; END LOOP; END $$;"; - diesel::sql_query(drop_all_procedures) - .execute(&mut conn) - .await?; + diesel::sql_query(drop_all_procedures).execute(conn).await?; info!("Dropped all procedures."); let drop_all_functions = " @@ -238,17 +241,13 @@ pub mod setup_postgres { EXECUTE 'DROP FUNCTION IF EXISTS ' || quote_ident(r.proname) || '(' || r.argtypes || ') CASCADE'; END LOOP; END $$;"; - diesel::sql_query(drop_all_functions) - .execute(&mut conn) - .await?; - info!("Dropped all functions."); - - run_migrations(conn).await?; - info!("Reset database complete."); + diesel::sql_query(drop_all_functions).execute(conn).await?; + info!("Database cleared."); Ok(()) } pub async fn run_migrations(conn: Connection<'static>) -> Result<(), anyhow::Error> { + info!("Running migrations ..."); conn.run_pending_migrations(MIGRATIONS) .await .map_err(|e| anyhow!("Failed to run migrations {e}"))?; diff --git a/crates/sui-indexer/src/handlers/checkpoint_handler.rs b/crates/sui-indexer/src/handlers/checkpoint_handler.rs index be4e0d375a923..170bda5ff6108 100644 --- a/crates/sui-indexer/src/handlers/checkpoint_handler.rs +++ b/crates/sui-indexer/src/handlers/checkpoint_handler.rs @@ -8,12 +8,13 @@ use async_trait::async_trait; use itertools::Itertools; use sui_types::dynamic_field::DynamicFieldInfo; use tokio_util::sync::CancellationToken; -use tracing::info; +use tracing::{info, warn}; use move_core_types::language_storage::{StructTag, TypeTag}; use mysten_metrics::{get_metrics, spawn_monitored_task}; use sui_data_ingestion_core::Worker; use sui_rest_api::{CheckpointData, CheckpointTransaction}; +use sui_synthetic_ingestion::IndexerProgress; use sui_types::dynamic_field::DynamicFieldType; use sui_types::effects::{ObjectChange, TransactionEffectsAPI}; use sui_types::event::SystemEpochInfoEvent; @@ -24,12 +25,13 @@ use sui_types::object::Object; use sui_types::object::Owner; use sui_types::sui_system_state::{get_sui_system_state, SuiSystemStateTrait}; use sui_types::transaction::TransactionDataAPI; +use tokio::sync::watch; use crate::errors::IndexerError; use crate::handlers::committer::start_tx_checkpoint_commit_task; use crate::metrics::IndexerMetrics; use crate::models::display::StoredDisplay; -use crate::models::epoch::{EndOfEpochUpdate, StartOfEpochUpdate}; +use crate::models::epoch::{EndOfEpochUpdate, EpochEndInfo, EpochStartInfo, StartOfEpochUpdate}; use crate::models::obj_indices::StoredObjectVersion; use crate::store::{IndexerStore, PgIndexerStore}; use crate::types::{ @@ -48,9 +50,20 @@ const CHECKPOINT_QUEUE_SIZE: usize = 100; pub async fn new_handlers( state: PgIndexerStore, metrics: IndexerMetrics, - next_checkpoint_sequence_number: CheckpointSequenceNumber, cancel: CancellationToken, -) -> Result { + committed_checkpoints_tx: Option>>, + start_checkpoint_opt: Option, + end_checkpoint_opt: Option, +) -> Result<(CheckpointHandler, u64), IndexerError> { + let start_checkpoint = match start_checkpoint_opt { + Some(start_checkpoint) => start_checkpoint, + None => state + .get_latest_checkpoint_sequence_number() + .await? + .map(|seq| seq.saturating_add(1)) + .unwrap_or_default(), + }; + let checkpoint_queue_size = std::env::var("CHECKPOINT_QUEUE_SIZE") .unwrap_or(CHECKPOINT_QUEUE_SIZE.to_string()) .parse::() @@ -70,13 +83,14 @@ pub async fn new_handlers( state_clone, metrics_clone, indexed_checkpoint_receiver, - next_checkpoint_sequence_number, - cancel.clone() + cancel.clone(), + committed_checkpoints_tx, + start_checkpoint, + end_checkpoint_opt, )); - Ok(CheckpointHandler::new( - state, - metrics, - indexed_checkpoint_sender, + Ok(( + CheckpointHandler::new(state, metrics, indexed_checkpoint_sender), + start_checkpoint, )) } @@ -153,12 +167,7 @@ impl CheckpointHandler { get_sui_system_state(&checkpoint_object_store)?.into_sui_system_state_summary(); return Ok(Some(EpochToCommit { last_epoch: None, - new_epoch: StartOfEpochUpdate::new( - system_state_summary, - 0, //first_checkpoint_id - 0, // first_tx_sequence_number - None, - ), + new_epoch: StartOfEpochUpdate::new(system_state_summary, EpochStartInfo::default()), })); } @@ -170,24 +179,34 @@ impl CheckpointHandler { let system_state_summary = get_sui_system_state(&checkpoint_object_store)?.into_sui_system_state_summary(); - let epoch_event = transactions + let epoch_event_opt = transactions .iter() - .flat_map(|t| t.events.as_ref().map(|e| &e.data)) - .flatten() - .find(|ev| ev.is_system_epoch_info_event()) - .unwrap_or_else(|| { - panic!( - "Can't find SystemEpochInfoEvent in epoch end checkpoint {}", - checkpoint_summary.sequence_number() - ) - }); - - let event = bcs::from_bytes::(&epoch_event.contents)?; + .find_map(|t| { + t.events.as_ref()?.data.iter().find_map(|ev| { + if ev.is_system_epoch_info_event() { + Some(bcs::from_bytes::(&ev.contents)) + } else { + None + } + }) + }) + .transpose()?; + if epoch_event_opt.is_none() { + warn!( + "No SystemEpochInfoEvent found at end of epoch {}, some epoch data will be set to default.", + checkpoint_summary.epoch, + ); + assert!( + system_state_summary.safe_mode, + "Sui is not in safe mode but no SystemEpochInfoEvent found at end of epoch {}", + checkpoint_summary.epoch + ); + } // At some point while committing data in epoch X - 1, we will encounter a new epoch X. We // want to retrieve X - 2's network total transactions to calculate the number of // transactions that occurred in epoch X - 1. - let network_tx_count_prev_epoch = match system_state_summary.epoch { + let first_tx_sequence_number = match system_state_summary.epoch { // If first epoch change, this number is 0 1 => Ok(0), _ => { @@ -204,18 +223,20 @@ impl CheckpointHandler { } }?; + let epoch_end_info = EpochEndInfo::new(epoch_event_opt.as_ref()); + let epoch_start_info = EpochStartInfo::new( + checkpoint_summary.sequence_number.saturating_add(1), + checkpoint_summary.network_total_transactions, + epoch_event_opt.as_ref(), + ); + Ok(Some(EpochToCommit { last_epoch: Some(EndOfEpochUpdate::new( checkpoint_summary, - &event, - network_tx_count_prev_epoch, + first_tx_sequence_number, + epoch_end_info, )), - new_epoch: StartOfEpochUpdate::new( - system_state_summary, - checkpoint_summary.sequence_number + 1, // first_checkpoint_id - checkpoint_summary.network_total_transactions, - Some(&event), - ), + new_epoch: StartOfEpochUpdate::new(system_state_summary, epoch_start_info), })) } diff --git a/crates/sui-indexer/src/handlers/committer.rs b/crates/sui-indexer/src/handlers/committer.rs index e9b06191047e8..b63e8b42a981e 100644 --- a/crates/sui-indexer/src/handlers/committer.rs +++ b/crates/sui-indexer/src/handlers/committer.rs @@ -3,14 +3,16 @@ use std::collections::{BTreeMap, HashMap}; +use sui_synthetic_ingestion::IndexerProgress; +use sui_types::messages_checkpoint::CheckpointSequenceNumber; use tap::tap::TapFallible; +use tokio::sync::watch; use tokio_util::sync::CancellationToken; use tracing::instrument; use tracing::{error, info}; -use sui_types::messages_checkpoint::CheckpointSequenceNumber; - use crate::metrics::IndexerMetrics; +use crate::models::raw_checkpoints::StoredRawCheckpoint; use crate::store::IndexerStore; use crate::types::IndexerResult; @@ -22,8 +24,10 @@ pub async fn start_tx_checkpoint_commit_task( state: S, metrics: IndexerMetrics, tx_indexing_receiver: mysten_metrics::metered_channel::Receiver, - mut next_checkpoint_sequence_number: CheckpointSequenceNumber, cancel: CancellationToken, + mut committed_checkpoints_tx: Option>>, + mut next_checkpoint_sequence_number: CheckpointSequenceNumber, + end_checkpoint_opt: Option, ) -> IndexerResult<()> where S: IndexerStore + Clone + Sync + Send + 'static, @@ -60,7 +64,14 @@ where // The batch will consist of contiguous checkpoints and at most one epoch boundary at // the end. if batch.len() == checkpoint_commit_batch_size || epoch.is_some() { - commit_checkpoints(&state, batch, epoch, &metrics).await; + commit_checkpoints( + &state, + batch, + epoch, + &metrics, + &mut committed_checkpoints_tx, + ) + .await; batch = vec![]; } if let Some(epoch_number) = epoch_number_option { @@ -72,11 +83,24 @@ where ); })?; } + // stop adding to the commit batch if we've reached the end checkpoint + if let Some(end_checkpoint_sequence_number) = end_checkpoint_opt { + if next_checkpoint_sequence_number > end_checkpoint_sequence_number { + break; + } + } } if !batch.is_empty() { - commit_checkpoints(&state, batch, None, &metrics).await; + commit_checkpoints(&state, batch, None, &metrics, &mut committed_checkpoints_tx).await; batch = vec![]; } + + // stop the commit task if we've reached the end checkpoint + if let Some(end_checkpoint_sequence_number) = end_checkpoint_opt { + if next_checkpoint_sequence_number > end_checkpoint_sequence_number { + break; + } + } } Ok(()) } @@ -95,6 +119,7 @@ async fn commit_checkpoints( indexed_checkpoint_batch: Vec, epoch: Option, metrics: &IndexerMetrics, + committed_checkpoints_tx: &mut Option>>, ) where S: IndexerStore + Clone + Sync + Send + 'static, { @@ -135,8 +160,13 @@ async fn commit_checkpoints( packages_batch.push(packages); } - let first_checkpoint_seq = checkpoint_batch.first().as_ref().unwrap().sequence_number; - let committer_watermark = CommitterWatermark::from(checkpoint_batch.last().unwrap()); + let first_checkpoint_seq = checkpoint_batch.first().unwrap().sequence_number; + let last_checkpoint = checkpoint_batch.last().unwrap(); + let indexer_progress = IndexerProgress { + checkpoint: last_checkpoint.sequence_number, + network_total_transactions: last_checkpoint.network_total_transactions, + }; + let committer_watermark = CommitterWatermark::from(last_checkpoint); let guard = metrics.checkpoint_db_commit_latency.start_timer(); let tx_batch = tx_batch.into_iter().flatten().collect::>(); @@ -156,7 +186,7 @@ async fn commit_checkpoints( let raw_checkpoints_batch = checkpoint_batch .iter() .map(|c| c.into()) - .collect::>(); + .collect::>(); { let _step_1_guard = metrics.checkpoint_db_commit_latency_step_1.start_timer(); @@ -267,4 +297,13 @@ async fn commit_checkpoints( metrics .thousand_transaction_avg_db_commit_latency .observe(elapsed * 1000.0 / tx_count as f64); + + if let Some(committed_checkpoints_tx) = committed_checkpoints_tx.as_mut() { + if let Err(err) = committed_checkpoints_tx.send(Some(indexer_progress)) { + error!( + "Failed to send committed checkpoints to the watch channel: {}", + err + ); + } + } } diff --git a/crates/sui-indexer/src/handlers/mod.rs b/crates/sui-indexer/src/handlers/mod.rs index a6c6412f3a42c..403ee8e22706c 100644 --- a/crates/sui-indexer/src/handlers/mod.rs +++ b/crates/sui-indexer/src/handlers/mod.rs @@ -92,6 +92,8 @@ impl CommonHandler { &self, cp_receiver: mysten_metrics::metered_channel::Receiver<(CommitterWatermark, T)>, cancel: CancellationToken, + start_checkpoint: u64, + end_checkpoint_opt: Option, ) -> IndexerResult<()> { let checkpoint_commit_batch_size = std::env::var("CHECKPOINT_COMMIT_BATCH_SIZE") .unwrap_or(CHECKPOINT_COMMIT_BATCH_SIZE.to_string()) @@ -104,12 +106,7 @@ impl CommonHandler { // just the checkpoint sequence number, and the tuple is (CommitterWatermark, T). let mut unprocessed: BTreeMap = BTreeMap::new(); let mut tuple_batch = vec![]; - let mut next_cp_to_process = self - .handler - .get_watermark_hi() - .await? - .map(|n| n.saturating_add(1)) - .unwrap_or_default(); + let mut next_cp_to_process = start_checkpoint; loop { if cancel.is_cancelled() { @@ -140,7 +137,12 @@ impl CommonHandler { // Process unprocessed checkpoints, even no new checkpoints from stream let checkpoint_lag_limiter = self.handler.get_max_committable_checkpoint().await?; - while next_cp_to_process <= checkpoint_lag_limiter { + let max_commitable_cp = std::cmp::min( + checkpoint_lag_limiter, + end_checkpoint_opt.unwrap_or(u64::MAX), + ); + // Stop pushing to tuple_batch if we've reached the end checkpoint. + while next_cp_to_process <= max_commitable_cp { if let Some(data_tuple) = unprocessed.remove(&next_cp_to_process) { tuple_batch.push(data_tuple); next_cp_to_process += 1; @@ -162,6 +164,16 @@ impl CommonHandler { self.handler.set_watermark_hi(committer_watermark).await?; tuple_batch = vec![]; } + + if let Some(end_checkpoint) = end_checkpoint_opt { + if next_cp_to_process > end_checkpoint { + tracing::info!( + "Reached end checkpoint, stopping handler {}...", + self.handler.name() + ); + return Ok(()); + } + } } Err(IndexerError::ChannelClosed(format!( "Checkpoint channel is closed unexpectedly for handler {}", diff --git a/crates/sui-indexer/src/handlers/objects_snapshot_handler.rs b/crates/sui-indexer/src/handlers/objects_snapshot_handler.rs index 816b416fc3743..d37d532827947 100644 --- a/crates/sui-indexer/src/handlers/objects_snapshot_handler.rs +++ b/crates/sui-indexer/src/handlers/objects_snapshot_handler.rs @@ -90,6 +90,8 @@ pub async fn start_objects_snapshot_handler( metrics: IndexerMetrics, snapshot_config: SnapshotLagConfig, cancel: CancellationToken, + start_checkpoint_opt: Option, + end_checkpoint_opt: Option, ) -> IndexerResult<(ObjectsSnapshotHandler, u64)> { info!("Starting object snapshot handler..."); @@ -104,10 +106,20 @@ pub async fn start_objects_snapshot_handler( let objects_snapshot_handler = ObjectsSnapshotHandler::new(store.clone(), sender, metrics.clone(), snapshot_config); - let watermark_hi = objects_snapshot_handler.get_watermark_hi().await?; + let next_cp_from_db = objects_snapshot_handler + .get_watermark_hi() + .await? + .map(|cp| cp.saturating_add(1)) + .unwrap_or_default(); + let start_checkpoint = start_checkpoint_opt.unwrap_or(next_cp_from_db); let common_handler = CommonHandler::new(Box::new(objects_snapshot_handler.clone())); - spawn_monitored_task!(common_handler.start_transform_and_load(receiver, cancel)); - Ok((objects_snapshot_handler, watermark_hi.unwrap_or_default())) + spawn_monitored_task!(common_handler.start_transform_and_load( + receiver, + cancel, + start_checkpoint, + end_checkpoint_opt, + )); + Ok((objects_snapshot_handler, start_checkpoint)) } impl ObjectsSnapshotHandler { diff --git a/crates/sui-indexer/src/indexer.rs b/crates/sui-indexer/src/indexer.rs index 240e295179094..d1819a90a7416 100644 --- a/crates/sui-indexer/src/indexer.rs +++ b/crates/sui-indexer/src/indexer.rs @@ -6,7 +6,7 @@ use std::env; use anyhow::Result; use prometheus::Registry; -use tokio::sync::oneshot; +use tokio::sync::{oneshot, watch}; use tokio_util::sync::CancellationToken; use tracing::info; @@ -16,6 +16,7 @@ use mysten_metrics::spawn_monitored_task; use sui_data_ingestion_core::{ DataIngestionMetrics, IndexerExecutor, ProgressStore, ReaderOptions, WorkerPool, }; +use sui_synthetic_ingestion::IndexerProgress; use sui_types::messages_checkpoint::CheckpointSequenceNumber; use crate::build_json_rpc_server; @@ -33,12 +34,13 @@ pub struct Indexer; impl Indexer { pub async fn start_writer( - config: &IngestionConfig, + config: IngestionConfig, store: PgIndexerStore, metrics: IndexerMetrics, snapshot_config: SnapshotLagConfig, retention_config: Option, cancel: CancellationToken, + committed_checkpoints_tx: Option>>, ) -> Result<(), IndexerError> { info!( "Sui Indexer Writer (version {:?}) started...", @@ -46,17 +48,11 @@ impl Indexer { ); info!("Sui Indexer Writer config: {config:?}",); - let primary_watermark = store - .get_latest_checkpoint_sequence_number() - .await - .expect("Failed to get latest tx checkpoint sequence number from DB") - .map(|seq| seq + 1) - .unwrap_or_default(); - let extra_reader_options = ReaderOptions { batch_size: config.checkpoint_download_queue_size, timeout_secs: config.checkpoint_download_timeout, data_limit: config.checkpoint_download_queue_size_bytes, + gc_checkpoint_files: config.gc_checkpoint_files, ..Default::default() }; @@ -66,6 +62,8 @@ impl Indexer { metrics.clone(), snapshot_config, cancel.clone(), + config.start_checkpoint, + config.end_checkpoint, ) .await?; @@ -87,6 +85,16 @@ impl Indexer { let mut exit_senders = vec![]; let mut executors = vec![]; + + let (worker, primary_watermark) = new_handlers( + store, + metrics, + cancel.clone(), + committed_checkpoints_tx, + config.start_checkpoint, + config.end_checkpoint, + ) + .await?; // Ingestion task watermarks are snapshotted once on indexer startup based on the // corresponding watermark table before being handed off to the ingestion task. let progress_store = ShimIndexerProgressStore::new(vec![ @@ -98,7 +106,7 @@ impl Indexer { 2, DataIngestionMetrics::new(&Registry::new()), ); - let worker = new_handlers(store, metrics, primary_watermark, cancel.clone()).await?; + let worker_pool = WorkerPool::new( worker, "primary".to_string(), diff --git a/crates/sui-indexer/src/lib.rs b/crates/sui-indexer/src/lib.rs index e759370c72798..f40b0fdfcfb8a 100644 --- a/crates/sui-indexer/src/lib.rs +++ b/crates/sui-indexer/src/lib.rs @@ -27,6 +27,7 @@ use errors::IndexerError; pub mod apis; pub mod backfill; +pub mod benchmark; pub mod config; pub mod database; pub mod db; diff --git a/crates/sui-indexer/src/main.rs b/crates/sui-indexer/src/main.rs index 8978d072d8dea..85782cff9689e 100644 --- a/crates/sui-indexer/src/main.rs +++ b/crates/sui-indexer/src/main.rs @@ -3,8 +3,10 @@ use clap::Parser; use sui_indexer::backfill::backfill_runner::BackfillRunner; +use sui_indexer::benchmark::run_indexer_benchmark; use sui_indexer::config::{Command, UploadOptions}; use sui_indexer::database::ConnectionPool; +use sui_indexer::db::setup_postgres::clear_database; use sui_indexer::db::{ check_db_migration_consistency, check_prunable_tables_valid, reset_database, run_migrations, }; @@ -55,12 +57,13 @@ async fn main() -> anyhow::Result<()> { let store = PgIndexerStore::new(pool, upload_options, indexer_metrics.clone()); Indexer::start_writer( - &ingestion_config, + ingestion_config, store, indexer_metrics, snapshot_config, retention_config, CancellationToken::new(), + None, ) .await?; } @@ -70,14 +73,21 @@ async fn main() -> anyhow::Result<()> { Indexer::start_reader(&json_rpc_config, ®istry, pool, CancellationToken::new()) .await?; } - Command::ResetDatabase { force } => { + Command::ResetDatabase { + force, + skip_migrations, + } => { if !force { return Err(anyhow::anyhow!( "Resetting the DB requires use of the `--force` flag", )); } - reset_database(pool.dedicated_connection().await?).await?; + if skip_migrations { + clear_database(&mut pool.dedicated_connection().await?).await?; + } else { + reset_database(pool.dedicated_connection().await?).await?; + } } Command::RunMigrations => { run_migrations(pool.dedicated_connection().await?).await?; @@ -98,6 +108,9 @@ async fn main() -> anyhow::Result<()> { IndexerFormalSnapshotRestorer::new(store, restore_config).await?; formal_restorer.restore().await?; } + Command::Benchmark(benchmark_config) => { + run_indexer_benchmark(benchmark_config, pool, indexer_metrics).await; + } } Ok(()) diff --git a/crates/sui-indexer/src/models/epoch.rs b/crates/sui-indexer/src/models/epoch.rs index 0918e50c72c35..d8e943f4c245c 100644 --- a/crates/sui-indexer/src/models/epoch.rs +++ b/crates/sui-indexer/src/models/epoch.rs @@ -117,36 +117,81 @@ pub struct QueryableEpochSystemState { pub system_state: Vec, } -impl StartOfEpochUpdate { +#[derive(Default)] +pub struct EpochStartInfo { + pub first_checkpoint_id: u64, + pub first_tx_sequence_number: u64, + pub total_stake: u64, + pub storage_fund_balance: u64, +} + +impl EpochStartInfo { pub fn new( - new_system_state_summary: SuiSystemStateSummary, first_checkpoint_id: u64, first_tx_sequence_number: u64, - event: Option<&SystemEpochInfoEvent>, + epoch_event_opt: Option<&SystemEpochInfoEvent>, + ) -> Self { + Self { + first_checkpoint_id, + first_tx_sequence_number, + total_stake: epoch_event_opt.map(|e| e.total_stake).unwrap_or_default(), + storage_fund_balance: epoch_event_opt + .map(|e| e.storage_fund_balance) + .unwrap_or_default(), + } + } +} + +impl StartOfEpochUpdate { + pub fn new( + new_system_state_summary: SuiSystemStateSummary, + epoch_start_info: EpochStartInfo, ) -> Self { Self { epoch: new_system_state_summary.epoch as i64, system_state_summary_json: serde_json::to_value(new_system_state_summary.clone()) .unwrap(), - first_checkpoint_id: first_checkpoint_id as i64, - first_tx_sequence_number: first_tx_sequence_number as i64, + first_checkpoint_id: epoch_start_info.first_checkpoint_id as i64, + first_tx_sequence_number: epoch_start_info.first_tx_sequence_number as i64, epoch_start_timestamp: new_system_state_summary.epoch_start_timestamp_ms as i64, reference_gas_price: new_system_state_summary.reference_gas_price as i64, protocol_version: new_system_state_summary.protocol_version as i64, - // NOTE: total_stake and storage_fund_balance are about new epoch, - // although the event is generated at the end of the previous epoch, - // the event is optional b/c no such event for the first epoch. - total_stake: event.map(|e| e.total_stake as i64).unwrap_or(0), - storage_fund_balance: event.map(|e| e.storage_fund_balance as i64).unwrap_or(0), + total_stake: epoch_start_info.total_stake as i64, + storage_fund_balance: epoch_start_info.storage_fund_balance as i64, } } } +#[derive(Default)] +pub struct EpochEndInfo { + pub storage_fund_reinvestment: u64, + pub storage_charge: u64, + pub storage_rebate: u64, + pub leftover_storage_fund_inflow: u64, + pub stake_subsidy_amount: u64, + pub total_gas_fees: u64, + pub total_stake_rewards_distributed: u64, +} + +impl EpochEndInfo { + pub fn new(epoch_event_opt: Option<&SystemEpochInfoEvent>) -> Self { + epoch_event_opt.map_or_else(Self::default, |epoch_event| Self { + storage_fund_reinvestment: epoch_event.storage_fund_reinvestment, + storage_charge: epoch_event.storage_charge, + storage_rebate: epoch_event.storage_rebate, + leftover_storage_fund_inflow: epoch_event.leftover_storage_fund_inflow, + stake_subsidy_amount: epoch_event.stake_subsidy_amount, + total_gas_fees: epoch_event.total_gas_fees, + total_stake_rewards_distributed: epoch_event.total_stake_rewards_distributed, + }) + } +} + impl EndOfEpochUpdate { pub fn new( last_checkpoint_summary: &CertifiedCheckpointSummary, - event: &SystemEpochInfoEvent, first_tx_sequence_number: u64, + epoch_end_info: EpochEndInfo, ) -> Self { Self { epoch: last_checkpoint_summary.epoch as i64, @@ -154,13 +199,13 @@ impl EndOfEpochUpdate { - first_tx_sequence_number) as i64, last_checkpoint_id: *last_checkpoint_summary.sequence_number() as i64, epoch_end_timestamp: last_checkpoint_summary.timestamp_ms as i64, - storage_fund_reinvestment: event.storage_fund_reinvestment as i64, - storage_charge: event.storage_charge as i64, - storage_rebate: event.storage_rebate as i64, - leftover_storage_fund_inflow: event.leftover_storage_fund_inflow as i64, - stake_subsidy_amount: event.stake_subsidy_amount as i64, - total_gas_fees: event.total_gas_fees as i64, - total_stake_rewards_distributed: event.total_stake_rewards_distributed as i64, + storage_fund_reinvestment: epoch_end_info.storage_fund_reinvestment as i64, + storage_charge: epoch_end_info.storage_charge as i64, + storage_rebate: epoch_end_info.storage_rebate as i64, + leftover_storage_fund_inflow: epoch_end_info.leftover_storage_fund_inflow as i64, + stake_subsidy_amount: epoch_end_info.stake_subsidy_amount as i64, + total_gas_fees: epoch_end_info.total_gas_fees as i64, + total_stake_rewards_distributed: epoch_end_info.total_stake_rewards_distributed as i64, epoch_commitments: bcs::to_bytes( &last_checkpoint_summary .end_of_epoch_data diff --git a/crates/sui-indexer/src/schema.rs b/crates/sui-indexer/src/schema.rs index aceb54597c9c5..447b45557922c 100644 --- a/crates/sui-indexer/src/schema.rs +++ b/crates/sui-indexer/src/schema.rs @@ -354,21 +354,6 @@ diesel::table! { } } -diesel::table! { - tx_recipients (recipient, tx_sequence_number) { - tx_sequence_number -> Int8, - recipient -> Bytea, - sender -> Bytea, - } -} - -diesel::table! { - tx_senders (sender, tx_sequence_number) { - tx_sequence_number -> Int8, - sender -> Bytea, - } -} - diesel::table! { watermarks (pipeline) { pipeline -> Text, @@ -415,7 +400,5 @@ diesel::allow_tables_to_appear_in_same_query!( tx_digests, tx_input_objects, tx_kinds, - tx_recipients, - tx_senders, watermarks, ); diff --git a/crates/sui-indexer/src/store/pg_indexer_store.rs b/crates/sui-indexer/src/store/pg_indexer_store.rs index 2f9aaa5d81cb3..b1d1af7b31ed6 100644 --- a/crates/sui-indexer/src/store/pg_indexer_store.rs +++ b/crates/sui-indexer/src/store/pg_indexer_store.rs @@ -298,13 +298,10 @@ impl PgIndexerStore { let mut connection = self.pool.get().await?; - watermarks::table - .select(watermarks::checkpoint_hi_inclusive) - .filter(watermarks::pipeline.eq("objects_snapshot")) - .first::(&mut connection) + objects_snapshot::table + .select(max(objects_snapshot::checkpoint_sequence_number)) + .first::>(&mut connection) .await - // Handle case where the watermark is not set yet - .optional() .map_err(Into::into) .map(|v| v.map(|v| v as u64)) .context( diff --git a/crates/sui-indexer/src/test_utils.rs b/crates/sui-indexer/src/test_utils.rs index 6a208f8e4c6db..431d0dc5854bc 100644 --- a/crates/sui-indexer/src/test_utils.rs +++ b/crates/sui-indexer/src/test_utils.rs @@ -75,6 +75,8 @@ pub async fn start_indexer_writer_for_testing( retention_config: Option, data_ingestion_path: Option, cancel: Option, + start_checkpoint: Option, + end_checkpoint: Option, ) -> ( PgIndexerStore, JoinHandle>, @@ -117,18 +119,23 @@ pub async fn start_indexer_writer_for_testing( crate::db::reset_database(connection).await.unwrap(); let store_clone = store.clone(); - let mut ingestion_config = IngestionConfig::default(); + let mut ingestion_config = IngestionConfig { + start_checkpoint, + end_checkpoint, + ..Default::default() + }; ingestion_config.sources.data_ingestion_path = data_ingestion_path; let token_clone = token.clone(); tokio::spawn(async move { Indexer::start_writer( - &ingestion_config, + ingestion_config, store_clone, indexer_metrics, snapshot_config, retention_config, token_clone, + None, ) .await }) @@ -250,6 +257,42 @@ pub async fn set_up( None, Some(data_ingestion_path), None, /* cancel */ + None, /* start_checkpoint */ + None, /* end_checkpoint */ + ) + .await; + (server_handle, pg_store, pg_handle, database) +} + +pub async fn set_up_with_start_and_end_checkpoints( + sim: Arc, + data_ingestion_path: PathBuf, + start_checkpoint: u64, + end_checkpoint: u64, +) -> ( + JoinHandle<()>, + PgIndexerStore, + JoinHandle>, + TempDb, +) { + let database = TempDb::new().unwrap(); + let server_url: SocketAddr = format!("127.0.0.1:{}", get_available_port()) + .parse() + .unwrap(); + let server_handle = tokio::spawn(async move { + sui_rest_api::RestService::new_without_version(sim) + .start_service(server_url) + .await; + }); + // Starts indexer + let (pg_store, pg_handle, _) = start_indexer_writer_for_testing( + database.database().url().as_str().to_owned(), + None, + None, + Some(data_ingestion_path), + None, /* cancel */ + Some(start_checkpoint), + Some(end_checkpoint), ) .await; (server_handle, pg_store, pg_handle, database) diff --git a/crates/sui-indexer/tests/ingestion_tests.rs b/crates/sui-indexer/tests/ingestion_tests.rs index f2207d5091783..2b6a31286b27a 100644 --- a/crates/sui-indexer/tests/ingestion_tests.rs +++ b/crates/sui-indexer/tests/ingestion_tests.rs @@ -14,7 +14,9 @@ use sui_indexer::models::{ }; use sui_indexer::schema::{checkpoints, objects, objects_snapshot, transactions}; use sui_indexer::store::indexer_store::IndexerStore; -use sui_indexer::test_utils::{set_up, wait_for_checkpoint, wait_for_objects_snapshot}; +use sui_indexer::test_utils::{ + set_up, set_up_with_start_and_end_checkpoints, wait_for_checkpoint, wait_for_objects_snapshot, +}; use sui_indexer::types::EventIndex; use sui_indexer::types::IndexedDeletedObject; use sui_indexer::types::IndexedObject; @@ -71,6 +73,72 @@ pub async fn test_transaction_table() -> Result<(), IndexerError> { Ok(()) } +#[tokio::test] +pub async fn test_checkpoint_range_ingestion() -> Result<(), IndexerError> { + let tempdir = tempdir().unwrap(); + let mut sim = Simulacrum::new(); + let data_ingestion_path = tempdir.path().to_path_buf(); + sim.set_data_ingestion_path(data_ingestion_path.clone()); + + // Create multiple checkpoints + for _ in 0..10 { + let transfer_recipient = SuiAddress::random_for_testing_only(); + let (transaction, _) = sim.transfer_txn(transfer_recipient); + let (_, err) = sim.execute_transaction(transaction).unwrap(); + assert!(err.is_none()); + sim.create_checkpoint(); + } + + // Set up indexer with specific start and end checkpoints + let start_checkpoint = 2; + let end_checkpoint = 4; + let (_, pg_store, _, _database) = set_up_with_start_and_end_checkpoints( + Arc::new(sim), + data_ingestion_path, + start_checkpoint, + end_checkpoint, + ) + .await; + + // Wait for the indexer to catch up to the end checkpoint + wait_for_checkpoint(&pg_store, end_checkpoint).await?; + + // Verify that only checkpoints within the specified range were ingested + let mut connection = pg_store.pool().dedicated_connection().await.unwrap(); + let checkpoint_count: i64 = checkpoints::table + .count() + .get_result(&mut connection) + .await + .expect("Failed to count checkpoints"); + assert_eq!(checkpoint_count, 3, "Expected 3 checkpoints to be ingested"); + + // Verify the range of ingested checkpoints + let min_checkpoint = checkpoints::table + .select(diesel::dsl::min(checkpoints::sequence_number)) + .first::>(&mut connection) + .await + .expect("Failed to get min checkpoint") + .expect("Min checkpoint should be Some"); + let max_checkpoint = checkpoints::table + .select(diesel::dsl::max(checkpoints::sequence_number)) + .first::>(&mut connection) + .await + .expect("Failed to get max checkpoint") + .expect("Max checkpoint should be Some"); + assert_eq!( + min_checkpoint, start_checkpoint as i64, + "Minimum ingested checkpoint should be {}", + start_checkpoint + ); + assert_eq!( + max_checkpoint, end_checkpoint as i64, + "Maximum ingested checkpoint should be {}", + end_checkpoint + ); + + Ok(()) +} + #[tokio::test] pub async fn test_object_type() -> Result<(), IndexerError> { let tempdir = tempdir().unwrap(); diff --git a/crates/sui-indexer/tests/json_rpc_tests.rs b/crates/sui-indexer/tests/json_rpc_tests.rs new file mode 100644 index 0000000000000..15e501a5f0aa2 --- /dev/null +++ b/crates/sui-indexer/tests/json_rpc_tests.rs @@ -0,0 +1,243 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::path::PathBuf; + +use sui_json_rpc_api::{CoinReadApiClient, IndexerApiClient, ReadApiClient}; +use sui_json_rpc_types::{ + CoinPage, EventFilter, SuiObjectDataOptions, SuiObjectResponse, SuiObjectResponseQuery, +}; +use sui_swarm_config::genesis_config::DEFAULT_GAS_AMOUNT; +use sui_test_transaction_builder::publish_package; +use sui_types::{event::EventID, transaction::CallArg}; +use test_cluster::TestClusterBuilder; + +#[tokio::test] +async fn test_get_owned_objects() -> Result<(), anyhow::Error> { + let cluster = TestClusterBuilder::new() + .with_indexer_backed_rpc() + .build() + .await; + + let http_client = cluster.rpc_client(); + let address = cluster.get_address_0(); + + let data_option = SuiObjectDataOptions::new().with_owner(); + let objects = http_client + .get_owned_objects( + address, + Some(SuiObjectResponseQuery::new_with_options( + data_option.clone(), + )), + None, + None, + ) + .await? + .data; + let fullnode_objects = cluster + .fullnode_handle + .rpc_client + .get_owned_objects( + address, + Some(SuiObjectResponseQuery::new_with_options( + data_option.clone(), + )), + None, + None, + ) + .await? + .data; + assert_eq!(5, objects.len()); + // TODO: right now we compare the results from indexer and fullnode, but as we deprecate fullnode rpc, + // we should change this to compare the results with the object id/digest from genesis potentially. + assert_eq!(objects, fullnode_objects); + + for obj in &objects { + let oref = obj.clone().into_object().unwrap(); + let result = http_client + .get_object(oref.object_id, Some(data_option.clone())) + .await?; + assert!( + matches!(result, SuiObjectResponse { data: Some(object), .. } if oref.object_id == object.object_id && object.owner.unwrap().get_owner_address()? == address) + ); + } + + // Multiget objectIDs test + let object_ids: Vec<_> = objects + .iter() + .map(|o| o.object().unwrap().object_id) + .collect(); + + let object_resp = http_client + .multi_get_objects(object_ids.clone(), None) + .await?; + let fullnode_object_resp = cluster + .fullnode_handle + .rpc_client + .multi_get_objects(object_ids, None) + .await?; + assert_eq!(5, object_resp.len()); + // TODO: right now we compare the results from indexer and fullnode, but as we deprecate fullnode rpc, + // we should change this to compare the results with the object id/digest from genesis potentially. + assert_eq!(object_resp, fullnode_object_resp); + Ok(()) +} + +#[tokio::test] +async fn test_get_coins() -> Result<(), anyhow::Error> { + let cluster = TestClusterBuilder::new() + .with_indexer_backed_rpc() + .build() + .await; + let http_client = cluster.rpc_client(); + let address = cluster.get_address_0(); + + let result: CoinPage = http_client.get_coins(address, None, None, None).await?; + assert_eq!(5, result.data.len()); + assert!(!result.has_next_page); + + // We should get 0 coins for a non-existent coin type. + let result: CoinPage = http_client + .get_coins(address, Some("0x2::sui::TestCoin".into()), None, None) + .await?; + assert_eq!(0, result.data.len()); + + // We should get all the 5 coins for SUI with the right balance. + let result: CoinPage = http_client + .get_coins(address, Some("0x2::sui::SUI".into()), None, None) + .await?; + assert_eq!(5, result.data.len()); + assert_eq!(result.data[0].balance, DEFAULT_GAS_AMOUNT); + assert!(!result.has_next_page); + + // When we have more than 3 coins, we should get a next page. + let result: CoinPage = http_client + .get_coins(address, Some("0x2::sui::SUI".into()), None, Some(3)) + .await?; + assert_eq!(3, result.data.len()); + assert!(result.has_next_page); + + // We should get the remaining 2 coins with the next page. + let result: CoinPage = http_client + .get_coins( + address, + Some("0x2::sui::SUI".into()), + result.next_cursor, + Some(3), + ) + .await?; + assert_eq!(2, result.data.len(), "{:?}", result); + assert!(!result.has_next_page); + + // No more coins after the last page. + let result: CoinPage = http_client + .get_coins( + address, + Some("0x2::sui::SUI".into()), + result.next_cursor, + None, + ) + .await?; + assert_eq!(0, result.data.len(), "{:?}", result); + assert!(!result.has_next_page); + + Ok(()) +} + +#[tokio::test] +async fn test_events() -> Result<(), anyhow::Error> { + let cluster = TestClusterBuilder::new() + .with_indexer_backed_rpc() + .build() + .await; + + // publish package + let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + path.push("tests/move_test_code"); + let move_package = publish_package(&cluster.wallet, path).await.0; + + // execute a transaction to generate events + let function = "emit_3"; + let arguments = vec![CallArg::Pure(bcs::to_bytes(&5u64).unwrap())]; + let transaction = cluster + .test_transaction_builder() + .await + .move_call(move_package, "events_queries", function, arguments) + .build(); + let signed_transaction = cluster.wallet.sign_transaction(&transaction); + cluster.execute_transaction(signed_transaction).await; + + // query for events + let http_client = cluster.rpc_client(); + + // start with ascending order + let event_filter = EventFilter::All([]); + let mut cursor: Option = None; + let mut limit = None; + let mut descending_order = Some(false); + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(3, result.data.len()); + assert!(!result.has_next_page); + let forward_paginated_events = result.data; + + // Fetch the initial event + limit = Some(1); + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(1, result.data.len()); + assert!(result.has_next_page); + assert_eq!(forward_paginated_events[0], result.data[0]); + + // Fetch remaining events + cursor = result.next_cursor; + limit = None; + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(2, result.data.len()); + assert_eq!(forward_paginated_events[1..], result.data[..]); + + // now descending order - make sure to reset parameters + cursor = None; + descending_order = Some(true); + limit = None; + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(3, result.data.len()); + assert!(!result.has_next_page); + let backward_paginated_events = result.data; + + // Fetch the initial event + limit = Some(1); + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(1, result.data.len()); + assert!(result.has_next_page); + assert_eq!(backward_paginated_events[0], result.data[0]); + assert_eq!(forward_paginated_events[2], result.data[0]); + + // Fetch remaining events + cursor = result.next_cursor; + limit = None; + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(2, result.data.len()); + assert_eq!(backward_paginated_events[1..], result.data[..]); + + // check that the forward and backward paginated events are in reverse order + assert_eq!( + forward_paginated_events + .into_iter() + .rev() + .collect::>(), + backward_paginated_events + ); + + Ok(()) +} diff --git a/crates/sui-indexer/tests/move_test_code/Move.toml b/crates/sui-indexer/tests/move_test_code/Move.toml new file mode 100644 index 0000000000000..09e9e50f000f0 --- /dev/null +++ b/crates/sui-indexer/tests/move_test_code/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "move_test_code" +version = "0.0.1" +edition = "2024.beta" + +[dependencies] +Sui = { local = "../../../sui-framework/packages/sui-framework" } + +[addresses] +move_test_code = "0x0" diff --git a/crates/sui-indexer/tests/move_test_code/sources/events.move b/crates/sui-indexer/tests/move_test_code/sources/events.move new file mode 100644 index 0000000000000..f32cc7fe109f3 --- /dev/null +++ b/crates/sui-indexer/tests/move_test_code/sources/events.move @@ -0,0 +1,26 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + + +module move_test_code::events_queries { + use sui::event; + + public struct EventA has copy, drop { + new_value: u64 + } + + public entry fun emit_1(value: u64) { + event::emit(EventA { new_value: value }) + } + + public entry fun emit_2(value: u64) { + event::emit(EventA { new_value: value }); + event::emit(EventA { new_value: value + 1}) + } + + public entry fun emit_3(value: u64) { + event::emit(EventA { new_value: value }); + event::emit(EventA { new_value: value + 1}); + event::emit(EventA { new_value: value + 2}); + } +} diff --git a/crates/sui-json-rpc-api/src/lib.rs b/crates/sui-json-rpc-api/src/lib.rs index 4e7072c73f07e..3f14c38516083 100644 --- a/crates/sui-json-rpc-api/src/lib.rs +++ b/crates/sui-json-rpc-api/src/lib.rs @@ -337,6 +337,7 @@ pub fn read_size_from_env(var_name: &str) -> Option { .ok() } +pub const CLIENT_REQUEST_METHOD_HEADER: &str = "client-request-method"; pub const CLIENT_SDK_TYPE_HEADER: &str = "client-sdk-type"; /// The version number of the SDK itself. This can be different from the API version. pub const CLIENT_SDK_VERSION_HEADER: &str = "client-sdk-version"; diff --git a/crates/sui-json-rpc-types/src/sui_transaction.rs b/crates/sui-json-rpc-types/src/sui_transaction.rs index 67859f5591354..d67345aa35db1 100644 --- a/crates/sui-json-rpc-types/src/sui_transaction.rs +++ b/crates/sui-json-rpc-types/src/sui_transaction.rs @@ -2345,7 +2345,7 @@ impl From for SuiTransactionBlockEffects { #[serde_as] #[derive(Clone, Debug, JsonSchema, Serialize, Deserialize)] pub enum TransactionFilter { - /// Query by checkpoint. + /// CURRENTLY NOT SUPPORTED. Query by checkpoint. Checkpoint( #[schemars(with = "BigInt")] #[serde_as(as = "Readable, _>")] @@ -2369,7 +2369,7 @@ pub enum TransactionFilter { ToAddress(SuiAddress), /// Query by sender and recipient address. FromAndToAddress { from: SuiAddress, to: SuiAddress }, - /// Query txs that have a given address as sender or recipient. + /// CURRENTLY NOT SUPPORTED. Query txs that have a given address as sender or recipient. FromOrToAddress { addr: SuiAddress }, /// Query by transaction kind TransactionKind(String), diff --git a/crates/sui-json-rpc/src/balance_changes.rs b/crates/sui-json-rpc/src/balance_changes.rs index eaf4480832d08..60eb9199e9028 100644 --- a/crates/sui-json-rpc/src/balance_changes.rs +++ b/crates/sui-json-rpc/src/balance_changes.rs @@ -148,7 +148,7 @@ async fn fetch_coins, E>( o.owner, coin_type, // we know this is a coin, safe to unwrap - Coin::extract_balance_if_coin(&o).unwrap().unwrap(), + Coin::extract_balance_if_coin(&o).unwrap().unwrap().1, )) } } diff --git a/crates/sui-json-rpc/src/coin_api.rs b/crates/sui-json-rpc/src/coin_api.rs index aeda2cb6dcb38..43a880f525513 100644 --- a/crates/sui-json-rpc/src/coin_api.rs +++ b/crates/sui-json-rpc/src/coin_api.rs @@ -276,9 +276,7 @@ async fn find_package_object_id( spawn_monitored_task!(async move { let publish_txn_digest = state.find_publish_txn_digest(package_id)?; - let (_, effect) = state - .get_executed_transaction_and_effects(publish_txn_digest, kv_store) - .await?; + let effect = kv_store.get_fx_by_tx_digest(publish_txn_digest).await?; for ((id, _, _), _) in effect.created() { if let Ok(object_read) = state.get_object_read(&id) { @@ -290,7 +288,7 @@ async fn find_package_object_id( } } Err(SuiRpcInputError::GenericNotFound(format!( - "Cannot find object [{}] from [{}] package event.", + "Cannot find object with type [{}] from [{}] package created objects.", object_struct_tag, package_id, )) .into()) @@ -1401,8 +1399,8 @@ mod tests { .expect_find_publish_txn_digest() .return_once(move |_| Ok(transaction_digest)); mock_state - .expect_get_executed_transaction_and_effects() - .return_once(move |_, _| Ok((create_fake_transaction(), transaction_effects))); + .expect_multi_get() + .return_once(move |_, _, _| Ok((vec![], vec![Some(transaction_effects)], vec![]))); let coin_read_api = CoinReadApi::new_for_tests(Arc::new(mock_state), None); let response = coin_read_api.get_total_supply(coin_name.clone()).await; @@ -1410,9 +1408,9 @@ mod tests { assert!(response.is_err()); let error_result = response.unwrap_err(); let error_object: ErrorObjectOwned = error_result.into(); - let expected = expect!["-32602"]; + let expected = expect!["-32000"]; expected.assert_eq(&error_object.code().to_string()); - let expected = expect!["Cannot find object [0x2::coin::TreasuryCap<0xf::test_coin::TEST_COIN>] from [0x000000000000000000000000000000000000000000000000000000000000000f] package event."]; + let expected = expect!["task 1 panicked"]; expected.assert_eq(error_object.message()); } diff --git a/crates/sui-json-rpc/src/lib.rs b/crates/sui-json-rpc/src/lib.rs index 3075471af9d1b..d9704bbcef767 100644 --- a/crates/sui-json-rpc/src/lib.rs +++ b/crates/sui-json-rpc/src/lib.rs @@ -25,7 +25,8 @@ pub use balance_changes::*; pub use object_changes::*; pub use sui_config::node::ServerType; use sui_json_rpc_api::{ - CLIENT_SDK_TYPE_HEADER, CLIENT_SDK_VERSION_HEADER, CLIENT_TARGET_API_VERSION_HEADER, + CLIENT_REQUEST_METHOD_HEADER, CLIENT_SDK_TYPE_HEADER, CLIENT_SDK_VERSION_HEADER, + CLIENT_TARGET_API_VERSION_HEADER, }; use sui_open_rpc::{Module, Project}; @@ -121,6 +122,7 @@ impl JsonRpcServerBuilder { HeaderName::from_static(CLIENT_SDK_VERSION_HEADER), HeaderName::from_static(CLIENT_TARGET_API_VERSION_HEADER), HeaderName::from_static(APP_NAME_HEADER), + HeaderName::from_static(CLIENT_REQUEST_METHOD_HEADER), ]); Ok(cors) } diff --git a/crates/sui-json/Cargo.toml b/crates/sui-json/Cargo.toml index 15ae8d06f647b..c05042ef52717 100644 --- a/crates/sui-json/Cargo.toml +++ b/crates/sui-json/Cargo.toml @@ -18,7 +18,7 @@ sui-types.workspace = true move-binary-format.workspace = true move-bytecode-utils.workspace = true move-core-types.workspace = true -fastcrypto = { workspace = true } +fastcrypto.workspace = true [dev-dependencies] test-fuzz.workspace = true diff --git a/crates/sui-kvstore/Cargo.toml b/crates/sui-kvstore/Cargo.toml new file mode 100644 index 0000000000000..a06bacd9ace66 --- /dev/null +++ b/crates/sui-kvstore/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "sui-kvstore" +authors = ["Mysten Labs "] +license = "Apache-2.0" +publish = false +edition = "2021" +version.workspace = true + +[dependencies] +anyhow.workspace = true +async-trait.workspace = true +base64.workspace = true +bcs.workspace = true +http.workspace = true +gcp_auth.workspace = true +prometheus.workspace = true +prost.workspace = true +prost-types.workspace = true +serde.workspace = true +sui-data-ingestion-core.workspace = true +sui-types.workspace = true +telemetry-subscribers.workspace = true +tokio = { workspace = true, features = ["full"] } +tonic = {version = "0.12.2",features = ["tls", "transport"] } +tracing.workspace = true diff --git a/crates/sui-kvstore/src/bigtable/README.md b/crates/sui-kvstore/src/bigtable/README.md new file mode 100644 index 0000000000000..bc1abd9166323 --- /dev/null +++ b/crates/sui-kvstore/src/bigtable/README.md @@ -0,0 +1,16 @@ +## Setup + +### Local development +- install the `cbt` CLI tool +```sh +gcloud components install cbt +``` +- start the emulator +```sh +gcloud beta emulators bigtable start +``` +- set `BIGTABLE_EMULATOR_HOST` environment variable +```sh +$(gcloud beta emulators bigtable env-init) +``` +- Run `./src/bigtable/init.sh` to configure the emulator \ No newline at end of file diff --git a/crates/sui-kvstore/src/bigtable/client.rs b/crates/sui-kvstore/src/bigtable/client.rs new file mode 100644 index 0000000000000..9bbd7b138d023 --- /dev/null +++ b/crates/sui-kvstore/src/bigtable/client.rs @@ -0,0 +1,463 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::bigtable::proto::bigtable::v2::bigtable_client::BigtableClient as BigtableInternalClient; +use crate::bigtable::proto::bigtable::v2::mutate_rows_request::Entry; +use crate::bigtable::proto::bigtable::v2::mutation::SetCell; +use crate::bigtable::proto::bigtable::v2::read_rows_response::cell_chunk::RowStatus; +use crate::bigtable::proto::bigtable::v2::{ + mutation, MutateRowsRequest, MutateRowsResponse, Mutation, ReadRowsRequest, RowSet, +}; +use crate::{Checkpoint, KeyValueStoreReader, KeyValueStoreWriter, TransactionData}; +use anyhow::{anyhow, Result}; +use async_trait::async_trait; +use gcp_auth::{Token, TokenProvider}; +use http::{HeaderValue, Request, Response}; +use std::future::Future; +use std::pin::Pin; +use std::sync::{Arc, RwLock}; +use std::task::{Context, Poll}; +use std::time::Duration; +use sui_types::base_types::TransactionDigest; +use sui_types::digests::CheckpointDigest; +use sui_types::full_checkpoint_content::CheckpointData; +use sui_types::messages_checkpoint::CheckpointSequenceNumber; +use sui_types::object::Object; +use sui_types::storage::ObjectKey; +use tonic::body::BoxBody; +use tonic::codegen::Service; +use tonic::transport::{Certificate, Channel, ClientTlsConfig}; +use tonic::Streaming; +use tracing::error; + +const OBJECTS_TABLE: &str = "objects"; +const TRANSACTIONS_TABLE: &str = "transactions"; +const CHECKPOINTS_TABLE: &str = "checkpoints"; +const CHECKPOINTS_BY_DIGEST_TABLE: &str = "checkpoints_by_digest"; + +const COLUMN_FAMILY_NAME: &str = "sui"; +const DEFAULT_COLUMN_QUALIFIER: &str = ""; +const CHECKPOINT_SUMMARY_COLUMN_QUALIFIER: &str = "s"; +const CHECKPOINT_SIGNATURES_COLUMN_QUALIFIER: &str = "sg"; +const CHECKPOINT_CONTENTS_COLUMN_QUALIFIER: &str = "c"; +const TRANSACTION_COLUMN_QUALIFIER: &str = "tx"; +const EFFECTS_COLUMN_QUALIFIER: &str = "ef"; +const EVENTS_COLUMN_QUALIFIER: &str = "ev"; +const TIMESTAMP_COLUMN_QUALIFIER: &str = "ts"; +const CHECKPOINT_NUMBER_COLUMN_QUALIFIER: &str = "cn"; + +type Bytes = Vec; + +#[derive(Clone)] +struct AuthChannel { + channel: Channel, + policy: String, + token_provider: Option>, + token: Arc>>>, +} + +#[derive(Clone)] +pub struct BigTableClient { + table_prefix: String, + client: BigtableInternalClient, +} + +#[async_trait] +impl KeyValueStoreWriter for BigTableClient { + async fn save_objects(&mut self, objects: &[&Object]) -> Result<()> { + let mut items = Vec::with_capacity(objects.len()); + for object in objects { + let object_key = ObjectKey(object.id(), object.version()); + items.push(( + Self::raw_object_key(&object_key)?, + vec![(DEFAULT_COLUMN_QUALIFIER, bcs::to_bytes(object)?)], + )); + } + self.multi_set(OBJECTS_TABLE, items).await + } + + async fn save_transactions(&mut self, transactions: &[TransactionData]) -> Result<()> { + let mut items = Vec::with_capacity(transactions.len()); + for transaction in transactions { + let cells = vec![ + ( + TRANSACTION_COLUMN_QUALIFIER, + bcs::to_bytes(&transaction.transaction)?, + ), + ( + EFFECTS_COLUMN_QUALIFIER, + bcs::to_bytes(&transaction.effects)?, + ), + (EVENTS_COLUMN_QUALIFIER, bcs::to_bytes(&transaction.events)?), + ( + TIMESTAMP_COLUMN_QUALIFIER, + bcs::to_bytes(&transaction.timestamp)?, + ), + ( + CHECKPOINT_NUMBER_COLUMN_QUALIFIER, + bcs::to_bytes(&transaction.checkpoint_number)?, + ), + ]; + items.push((transaction.transaction.digest().inner().to_vec(), cells)); + } + self.multi_set(TRANSACTIONS_TABLE, items).await + } + + async fn save_checkpoint(&mut self, checkpoint: &CheckpointData) -> Result<()> { + let summary = &checkpoint.checkpoint_summary.data(); + let contents = &checkpoint.checkpoint_contents; + let signatures = &checkpoint.checkpoint_summary.auth_sig(); + let key = summary.sequence_number.to_be_bytes().to_vec(); + let cells = vec![ + (CHECKPOINT_SUMMARY_COLUMN_QUALIFIER, bcs::to_bytes(summary)?), + ( + CHECKPOINT_SIGNATURES_COLUMN_QUALIFIER, + bcs::to_bytes(signatures)?, + ), + ( + CHECKPOINT_CONTENTS_COLUMN_QUALIFIER, + bcs::to_bytes(contents)?, + ), + ]; + self.multi_set(CHECKPOINTS_TABLE, [(key.clone(), cells)]) + .await?; + self.multi_set( + CHECKPOINTS_BY_DIGEST_TABLE, + [( + checkpoint.checkpoint_summary.digest().inner().to_vec(), + vec![(DEFAULT_COLUMN_QUALIFIER, key)], + )], + ) + .await + } +} + +#[async_trait] +impl KeyValueStoreReader for BigTableClient { + async fn get_objects(&mut self, object_keys: &[ObjectKey]) -> Result> { + let keys: Result<_, _> = object_keys.iter().map(Self::raw_object_key).collect(); + let mut objects = vec![]; + for row in self.multi_get(OBJECTS_TABLE, keys?).await? { + for (_, value) in row { + objects.push(bcs::from_bytes(&value)?); + } + } + Ok(objects) + } + + async fn get_transactions( + &mut self, + transactions: &[TransactionDigest], + ) -> Result> { + let keys = transactions.iter().map(|tx| tx.inner().to_vec()).collect(); + let mut result = vec![]; + for row in self.multi_get(TRANSACTIONS_TABLE, keys).await? { + let mut transaction = None; + let mut effects = None; + let mut events = None; + let mut timestamp = 0; + let mut checkpoint_number = 0; + + for (column, value) in row { + match std::str::from_utf8(&column)? { + TRANSACTION_COLUMN_QUALIFIER => transaction = Some(bcs::from_bytes(&value)?), + EFFECTS_COLUMN_QUALIFIER => effects = Some(bcs::from_bytes(&value)?), + EVENTS_COLUMN_QUALIFIER => events = Some(bcs::from_bytes(&value)?), + TIMESTAMP_COLUMN_QUALIFIER => timestamp = bcs::from_bytes(&value)?, + CHECKPOINT_NUMBER_COLUMN_QUALIFIER => { + checkpoint_number = bcs::from_bytes(&value)? + } + _ => error!("unexpected column {:?} in transactions table", column), + } + } + result.push(TransactionData { + transaction: transaction.ok_or_else(|| anyhow!("transaction field is missing"))?, + effects: effects.ok_or_else(|| anyhow!("effects field is missing"))?, + events: events.ok_or_else(|| anyhow!("events field is missing"))?, + timestamp, + checkpoint_number, + }) + } + Ok(result) + } + + async fn get_checkpoints( + &mut self, + sequence_numbers: &[CheckpointSequenceNumber], + ) -> Result> { + let keys = sequence_numbers + .iter() + .map(|sq| sq.to_be_bytes().to_vec()) + .collect(); + let mut checkpoints = vec![]; + for row in self.multi_get(CHECKPOINTS_TABLE, keys).await? { + let mut summary = None; + let mut contents = None; + let mut signatures = None; + for (column, value) in row { + match std::str::from_utf8(&column)? { + CHECKPOINT_SUMMARY_COLUMN_QUALIFIER => summary = Some(bcs::from_bytes(&value)?), + CHECKPOINT_CONTENTS_COLUMN_QUALIFIER => { + contents = Some(bcs::from_bytes(&value)?) + } + CHECKPOINT_SIGNATURES_COLUMN_QUALIFIER => { + signatures = Some(bcs::from_bytes(&value)?) + } + _ => error!("unexpected column {:?} in checkpoints table", column), + } + } + let checkpoint = Checkpoint { + summary: summary.ok_or_else(|| anyhow!("summary field is missing"))?, + contents: contents.ok_or_else(|| anyhow!("contents field is missing"))?, + signatures: signatures.ok_or_else(|| anyhow!("signatures field is missing"))?, + }; + checkpoints.push(checkpoint); + } + Ok(checkpoints) + } + + async fn get_checkpoint_by_digest( + &mut self, + digest: CheckpointDigest, + ) -> Result> { + let key = digest.inner().to_vec(); + let mut response = self + .multi_get(CHECKPOINTS_BY_DIGEST_TABLE, vec![key]) + .await?; + if let Some(row) = response.pop() { + if let Some((_, value)) = row.into_iter().next() { + let sequence_number = u64::from_be_bytes(value.as_slice().try_into()?); + if let Some(chk) = self.get_checkpoints(&[sequence_number]).await?.pop() { + return Ok(Some(chk)); + } + } + } + Ok(None) + } +} + +impl BigTableClient { + pub async fn new_local(instance_id: String) -> Result { + let emulator_host = std::env::var("BIGTABLE_EMULATOR_HOST")?; + let auth_channel = AuthChannel { + channel: Channel::from_shared(format!("http://{emulator_host}"))?.connect_lazy(), + policy: "https://www.googleapis.com/auth/bigtable.data".to_string(), + token_provider: None, + token: Arc::new(RwLock::new(None)), + }; + Ok(Self { + table_prefix: format!("projects/emulator/instances/{}/tables/", instance_id), + client: BigtableInternalClient::new(auth_channel), + }) + } + + pub async fn new_remote( + instance_id: String, + is_read_only: bool, + timeout: Option, + ) -> Result { + let policy = if is_read_only { + "https://www.googleapis.com/auth/bigtable.data.readonly" + } else { + "https://www.googleapis.com/auth/bigtable.data" + }; + let token_provider = gcp_auth::provider().await?; + let tls_config = ClientTlsConfig::new() + .ca_certificate(Certificate::from_pem(include_bytes!("./proto/google.pem"))) + .domain_name("bigtable.googleapis.com"); + let mut endpoint = Channel::from_static("https://bigtable.googleapis.com") + .http2_keep_alive_interval(Duration::from_secs(60)) + .keep_alive_while_idle(true) + .tls_config(tls_config)?; + if let Some(timeout) = timeout { + endpoint = endpoint.timeout(timeout); + } + let table_prefix = format!( + "projects/{}/instances/{}/tables/", + token_provider.project_id().await?, + instance_id + ); + let auth_channel = AuthChannel { + channel: endpoint.connect_lazy(), + policy: policy.to_string(), + token_provider: Some(token_provider), + token: Arc::new(RwLock::new(None)), + }; + Ok(Self { + table_prefix, + client: BigtableInternalClient::new(auth_channel), + }) + } + + pub async fn mutate_rows( + &mut self, + request: MutateRowsRequest, + ) -> Result> { + Ok(self.client.mutate_rows(request).await?.into_inner()) + } + + pub async fn read_rows( + &mut self, + request: ReadRowsRequest, + ) -> Result, Vec<(Vec, Vec)>)>> { + let mut result = vec![]; + let mut response = self.client.read_rows(request).await?.into_inner(); + + let mut row_key = None; + let mut row = vec![]; + let mut cell_value = vec![]; + let mut cell_name = None; + let mut timestamp = 0; + + while let Some(message) = response.message().await? { + for mut chunk in message.chunks.into_iter() { + // new row check + if !chunk.row_key.is_empty() { + row_key = Some(chunk.row_key); + } + match chunk.qualifier { + // new cell started + Some(qualifier) => { + if let Some(cell_name) = cell_name { + row.push((cell_name, cell_value)); + cell_value = vec![]; + } + cell_name = Some(qualifier); + timestamp = chunk.timestamp_micros; + cell_value.append(&mut chunk.value); + } + None => { + if chunk.timestamp_micros == 0 { + cell_value.append(&mut chunk.value); + } else if chunk.timestamp_micros >= timestamp { + // newer version of cell is available + timestamp = chunk.timestamp_micros; + cell_value = chunk.value; + } + } + } + if chunk.row_status.is_some() { + if let Some(RowStatus::CommitRow(_)) = chunk.row_status { + if let Some(cell_name) = cell_name { + row.push((cell_name, cell_value)); + } + if let Some(row_key) = row_key { + result.push((row_key, row)); + } + } + row_key = None; + row = vec![]; + cell_value = vec![]; + cell_name = None; + } + } + } + Ok(result) + } + + async fn multi_set( + &mut self, + table_name: &str, + values: impl IntoIterator)> + std::marker::Send, + ) -> Result<()> { + let mut entries = vec![]; + for (row_key, cells) in values { + let mutations = cells + .into_iter() + .map(|(column_name, value)| Mutation { + mutation: Some(mutation::Mutation::SetCell(SetCell { + family_name: COLUMN_FAMILY_NAME.to_string(), + column_qualifier: column_name.to_owned().into_bytes(), + // The timestamp of the cell into which new data should be written. + // Use -1 for current Bigtable server time. + timestamp_micros: -1, + value, + })), + }) + .collect(); + entries.push(Entry { row_key, mutations }); + } + let request = MutateRowsRequest { + table_name: format!("{}{}", self.table_prefix, table_name), + entries, + ..MutateRowsRequest::default() + }; + self.mutate_rows(request).await?; + Ok(()) + } + + pub async fn multi_get( + &mut self, + table_name: &str, + keys: Vec>, + ) -> Result>> { + let request = ReadRowsRequest { + table_name: format!("{}{}", self.table_prefix, table_name), + rows_limit: keys.len() as i64, + rows: Some(RowSet { + row_keys: keys, + row_ranges: vec![], + }), + ..ReadRowsRequest::default() + }; + let mut result = vec![]; + for (_, cells) in self.read_rows(request).await? { + result.push(cells); + } + Ok(result) + } + + fn raw_object_key(object_key: &ObjectKey) -> Result> { + let mut raw_key = object_key.0.to_vec(); + raw_key.extend(object_key.1.value().to_be_bytes()); + Ok(raw_key) + } +} + +impl Service> for AuthChannel { + type Response = Response; + type Error = Box; + #[allow(clippy::type_complexity)] + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.channel.poll_ready(cx).map_err(Into::into) + } + + fn call(&mut self, mut request: Request) -> Self::Future { + let cloned_channel = self.channel.clone(); + let cloned_token = self.token.clone(); + let mut inner = std::mem::replace(&mut self.channel, cloned_channel); + let policy = self.policy.clone(); + let token_provider = self.token_provider.clone(); + + let mut auth_token = None; + if token_provider.is_some() { + let guard = self.token.read().expect("failed to acquire a read lock"); + if let Some(token) = &*guard { + if !token.has_expired() { + auth_token = Some(token.clone()); + } + } + } + + Box::pin(async move { + if let Some(ref provider) = token_provider { + let token = match auth_token { + None => { + let new_token = provider.token(&[policy.as_ref()]).await?; + let mut guard = cloned_token.write().unwrap(); + *guard = Some(new_token.clone()); + new_token + } + Some(token) => token, + }; + let token_string = token.as_str().parse::()?; + let header = + HeaderValue::from_str(format!("Bearer {}", token_string.as_str()).as_str())?; + request.headers_mut().insert("authorization", header); + } + Ok(inner.call(request).await?) + }) + } +} diff --git a/crates/sui-kvstore/src/bigtable/init.sh b/crates/sui-kvstore/src/bigtable/init.sh new file mode 100755 index 0000000000000..f96ac5c1e9827 --- /dev/null +++ b/crates/sui-kvstore/src/bigtable/init.sh @@ -0,0 +1,20 @@ +# Copyright (c) Mysten Labs, Inc. +# SPDX-License-Identifier: Apache-2.0 +INSTANCE_ID=${1:-sui} +command=( + cbt + -instance + "$INSTANCE_ID" +) +if [[ -n $BIGTABLE_EMULATOR_HOST ]]; then + command+=(-project emulator) +fi + +for table in objects transactions checkpoints checkpoints_by_digest; do + ( + set -x + "${command[@]}" createtable $table + "${command[@]}" createfamily $table sui + "${command[@]}" setgcpolicy $table sui maxversions=1 + ) +done diff --git a/crates/sui-kvstore/src/bigtable/mod.rs b/crates/sui-kvstore/src/bigtable/mod.rs new file mode 100644 index 0000000000000..9be9541c15ec4 --- /dev/null +++ b/crates/sui-kvstore/src/bigtable/mod.rs @@ -0,0 +1,6 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +pub(crate) mod client; +mod proto; +pub(crate) mod worker; diff --git a/crates/sui-kvstore/src/bigtable/proto.rs b/crates/sui-kvstore/src/bigtable/proto.rs new file mode 100644 index 0000000000000..3d976cad4b54f --- /dev/null +++ b/crates/sui-kvstore/src/bigtable/proto.rs @@ -0,0 +1,14 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +#![allow(clippy::all)] +#[path = "proto"] +pub mod bigtable { + #[path = "google.bigtable.v2.rs"] + pub mod v2; +} + +#[path = "proto/google.rpc.rs"] +pub mod rpc; + +#[path = "proto/google.api.rs"] +pub mod api; diff --git a/crates/sui-kvstore/src/bigtable/proto/google.api.rs b/crates/sui-kvstore/src/bigtable/proto/google.api.rs new file mode 100644 index 0000000000000..36a4d5390e66e --- /dev/null +++ b/crates/sui-kvstore/src/bigtable/proto/google.api.rs @@ -0,0 +1,1591 @@ +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Http { + /// A list of HTTP configuration rules that apply to individual API methods. + /// + /// **NOTE:** All service configuration rules follow "last one wins" order. + #[prost(message, repeated, tag = "1")] + pub rules: ::prost::alloc::vec::Vec, + /// When set to true, URL path parameters will be fully URI-decoded except in + /// cases of single segment matches in reserved expansion, where "%2F" will be + /// left encoded. + /// + /// The default behavior is to not decode RFC 6570 reserved characters in multi + /// segment matches. + #[prost(bool, tag = "2")] + pub fully_decode_reserved_expansion: bool, +} +/// ```ignore +/// # gRPC Transcoding +/// +/// gRPC Transcoding is a feature for mapping between a gRPC method and one or +/// more HTTP REST endpoints. It allows developers to build a single API service +/// that supports both gRPC APIs and REST APIs. Many systems, including [Google +/// APIs](), +/// [Cloud Endpoints](), [gRPC +/// Gateway](), +/// and \[Envoy\]() proxy support this feature +/// and use it for large scale production services. +/// +/// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +/// how different portions of the gRPC request message are mapped to the URL +/// path, URL query parameters, and HTTP request body. It also controls how the +/// gRPC response message is mapped to the HTTP response body. `HttpRule` is +/// typically specified as an `google.api.http` annotation on the gRPC method. +/// +/// Each mapping specifies a URL path template and an HTTP method. The path +/// template may refer to one or more fields in the gRPC request message, as long +/// as each field is a non-repeated field with a primitive (non-message) type. +/// The path template controls how fields of the request message are mapped to +/// the URL path. +/// +/// Example: +/// +/// service Messaging { +/// rpc GetMessage(GetMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// get: "/v1/{name=messages/*}" +/// }; +/// } +/// } +/// message GetMessageRequest { +/// string name = 1; // Mapped to URL path. +/// } +/// message Message { +/// string text = 1; // The resource content. +/// } +/// +/// This enables an HTTP REST to gRPC mapping as below: +/// +/// HTTP | gRPC +/// -----|----- +/// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +/// +/// Any fields in the request message which are not bound by the path template +/// automatically become HTTP query parameters if there is no HTTP request body. +/// For example: +/// +/// service Messaging { +/// rpc GetMessage(GetMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// get:"/v1/messages/{message_id}" +/// }; +/// } +/// } +/// message GetMessageRequest { +/// message SubMessage { +/// string subfield = 1; +/// } +/// string message_id = 1; // Mapped to URL path. +/// int64 revision = 2; // Mapped to URL query parameter `revision`. +/// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +/// } +/// +/// This enables a HTTP JSON to RPC mapping as below: +/// +/// HTTP | gRPC +/// -----|----- +/// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | +/// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: +/// "foo"))` +/// +/// Note that fields which are mapped to URL query parameters must have a +/// primitive type or a repeated primitive type or a non-repeated message type. +/// In the case of a repeated type, the parameter can be repeated in the URL +/// as `...?param=A¶m=B`. In the case of a message type, each field of the +/// message is mapped to a separate parameter, such as +/// `...?foo.a=A&foo.b=B&foo.c=C`. +/// +/// For HTTP methods that allow a request body, the `body` field +/// specifies the mapping. Consider a REST update method on the +/// message resource collection: +/// +/// service Messaging { +/// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// patch: "/v1/messages/{message_id}" +/// body: "message" +/// }; +/// } +/// } +/// message UpdateMessageRequest { +/// string message_id = 1; // mapped to the URL +/// Message message = 2; // mapped to the body +/// } +/// +/// The following HTTP JSON to RPC mapping is enabled, where the +/// representation of the JSON in the request body is determined by +/// protos JSON encoding: +/// +/// HTTP | gRPC +/// -----|----- +/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +/// "123456" message { text: "Hi!" })` +/// +/// The special name `*` can be used in the body mapping to define that +/// every field not bound by the path template should be mapped to the +/// request body. This enables the following alternative definition of +/// the update method: +/// +/// service Messaging { +/// rpc UpdateMessage(Message) returns (Message) { +/// option (google.api.http) = { +/// patch: "/v1/messages/{message_id}" +/// body: "*" +/// }; +/// } +/// } +/// message Message { +/// string message_id = 1; +/// string text = 2; +/// } +/// +/// +/// The following HTTP JSON to RPC mapping is enabled: +/// +/// HTTP | gRPC +/// -----|----- +/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +/// "123456" text: "Hi!")` +/// +/// Note that when using `*` in the body mapping, it is not possible to +/// have HTTP parameters, as all fields not bound by the path end in +/// the body. This makes this option more rarely used in practice when +/// defining REST APIs. The common usage of `*` is in custom methods +/// which don't use the URL at all for transferring data. +/// +/// It is possible to define multiple HTTP methods for one RPC by using +/// the `additional_bindings` option. Example: +/// +/// service Messaging { +/// rpc GetMessage(GetMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// get: "/v1/messages/{message_id}" +/// additional_bindings { +/// get: "/v1/users/{user_id}/messages/{message_id}" +/// } +/// }; +/// } +/// } +/// message GetMessageRequest { +/// string message_id = 1; +/// string user_id = 2; +/// } +/// +/// This enables the following two alternative HTTP JSON to RPC mappings: +/// +/// HTTP | gRPC +/// -----|----- +/// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +/// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: +/// "123456")` +/// +/// ## Rules for HTTP mapping +/// +/// 1. Leaf request fields (recursive expansion nested messages in the request +/// message) are classified into three categories: +/// - Fields referred by the path template. They are passed via the URL path. +/// - Fields referred by the \[HttpRule.body][google.api.HttpRule.body\]. They +/// are passed via the HTTP +/// request body. +/// - All other fields are passed via the URL query parameters, and the +/// parameter name is the field path in the request message. A repeated +/// field can be represented as multiple query parameters under the same +/// name. +/// 2. If \[HttpRule.body][google.api.HttpRule.body\] is "*", there is no URL +/// query parameter, all fields +/// are passed via URL path and HTTP request body. +/// 3. If \[HttpRule.body][google.api.HttpRule.body\] is omitted, there is no HTTP +/// request body, all +/// fields are passed via URL path and URL query parameters. +/// +/// ### Path template syntax +/// +/// Template = "/" Segments [ Verb ] ; +/// Segments = Segment { "/" Segment } ; +/// Segment = "*" | "**" | LITERAL | Variable ; +/// Variable = "{" FieldPath [ "=" Segments ] "}" ; +/// FieldPath = IDENT { "." IDENT } ; +/// Verb = ":" LITERAL ; +/// +/// The syntax `*` matches a single URL path segment. The syntax `**` matches +/// zero or more URL path segments, which must be the last part of the URL path +/// except the `Verb`. +/// +/// The syntax `Variable` matches part of the URL path as specified by its +/// template. A variable template must not contain other variables. If a variable +/// matches a single path segment, its template may be omitted, e.g. `{var}` +/// is equivalent to `{var=*}`. +/// +/// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +/// contains any reserved character, such characters should be percent-encoded +/// before the matching. +/// +/// If a variable contains exactly one path segment, such as `"{var}"` or +/// `"{var=*}"`, when such a variable is expanded into a URL path on the client +/// side, all characters except `\[-_.~0-9a-zA-Z\]` are percent-encoded. The +/// server side does the reverse decoding. Such variables show up in the +/// [Discovery +/// Document]() as +/// `{var}`. +/// +/// If a variable contains multiple path segments, such as `"{var=foo/*}"` +/// or `"{var=**}"`, when such a variable is expanded into a URL path on the +/// client side, all characters except `\[-_.~/0-9a-zA-Z\]` are percent-encoded. +/// The server side does the reverse decoding, except "%2F" and "%2f" are left +/// unchanged. Such variables show up in the +/// [Discovery +/// Document]() as +/// `{+var}`. +/// +/// ## Using gRPC API Service Configuration +/// +/// gRPC API Service Configuration (service config) is a configuration language +/// for configuring a gRPC service to become a user-facing product. The +/// service config is simply the YAML representation of the `google.api.Service` +/// proto message. +/// +/// As an alternative to annotating your proto file, you can configure gRPC +/// transcoding in your service config YAML files. You do this by specifying a +/// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +/// effect as the proto annotation. This can be particularly useful if you +/// have a proto that is reused in multiple services. Note that any transcoding +/// specified in the service config will override any matching transcoding +/// configuration in the proto. +/// +/// Example: +/// +/// http: +/// rules: +/// # Selects a gRPC method and applies HttpRule to it. +/// - selector: example.v1.Messaging.GetMessage +/// get: /v1/messages/{message_id}/{sub.subfield} +/// +/// ## Special notes +/// +/// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +/// proto to JSON conversion must follow the [proto3 +/// specification](). +/// +/// While the single segment variable follows the semantics of +/// [RFC 6570]() Section 3.2.2 Simple String +/// Expansion, the multi segment variable **does not** follow RFC 6570 Section +/// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +/// does not expand special characters like `?` and `#`, which would lead +/// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +/// for multi segment variables. +/// +/// The path variables **must not** refer to any repeated or mapped field, +/// because client libraries are not capable of handling such variable expansion. +/// +/// The path variables **must not** capture the leading "/" character. The reason +/// is that the most common use case "{var}" does not capture the leading "/" +/// character. For consistency, all path variables must share the same behavior. +/// +/// Repeated message fields must not be mapped to URL query parameters, because +/// no client library can support such complicated mapping. +/// +/// If an API needs to use a JSON array for request or response body, it can map +/// the request or response body to a repeated field. However, some gRPC +/// Transcoding implementations may not support this feature. +/// ``` +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HttpRule { + /// Selects a method to which this rule applies. + /// + /// Refer to \[selector][google.api.DocumentationRule.selector\] for syntax + /// details. + #[prost(string, tag = "1")] + pub selector: ::prost::alloc::string::String, + /// The name of the request field whose value is mapped to the HTTP request + /// body, or `*` for mapping all request fields not captured by the path + /// pattern to the HTTP body, or omitted for not having any HTTP request body. + /// + /// NOTE: the referred field must be present at the top-level of the request + /// message type. + #[prost(string, tag = "7")] + pub body: ::prost::alloc::string::String, + /// Optional. The name of the response field whose value is mapped to the HTTP + /// response body. When omitted, the entire response message will be used + /// as the HTTP response body. + /// + /// NOTE: The referred field must be present at the top-level of the response + /// message type. + #[prost(string, tag = "12")] + pub response_body: ::prost::alloc::string::String, + /// Additional HTTP bindings for the selector. Nested bindings must + /// not contain an `additional_bindings` field themselves (that is, + /// the nesting may only be one level deep). + #[prost(message, repeated, tag = "11")] + pub additional_bindings: ::prost::alloc::vec::Vec, + /// Determines the URL pattern is matched by this rules. This pattern can be + /// used with any of the {get|put|post|delete|patch} methods. A custom method + /// can be defined using the 'custom' field. + #[prost(oneof = "http_rule::Pattern", tags = "2, 3, 4, 5, 6, 8")] + pub pattern: ::core::option::Option, +} +/// Nested message and enum types in `HttpRule`. +pub mod http_rule { + /// Determines the URL pattern is matched by this rules. This pattern can be + /// used with any of the {get|put|post|delete|patch} methods. A custom method + /// can be defined using the 'custom' field. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Pattern { + /// Maps to HTTP GET. Used for listing and getting information about + /// resources. + #[prost(string, tag = "2")] + Get(::prost::alloc::string::String), + /// Maps to HTTP PUT. Used for replacing a resource. + #[prost(string, tag = "3")] + Put(::prost::alloc::string::String), + /// Maps to HTTP POST. Used for creating a resource or performing an action. + #[prost(string, tag = "4")] + Post(::prost::alloc::string::String), + /// Maps to HTTP DELETE. Used for deleting a resource. + #[prost(string, tag = "5")] + Delete(::prost::alloc::string::String), + /// Maps to HTTP PATCH. Used for updating a resource. + #[prost(string, tag = "6")] + Patch(::prost::alloc::string::String), + /// The custom pattern is used for specifying an HTTP method that is not + /// included in the `pattern` field, such as HEAD, or "*" to leave the + /// HTTP method unspecified for this rule. The wild-card rule is useful + /// for services that provide content to Web (HTML) clients. + #[prost(message, tag = "8")] + Custom(super::CustomHttpPattern), + } +} +/// A custom pattern is used for defining custom HTTP verb. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CustomHttpPattern { + /// The name of this custom HTTP verb. + #[prost(string, tag = "1")] + pub kind: ::prost::alloc::string::String, + /// The path matched by this custom verb. + #[prost(string, tag = "2")] + pub path: ::prost::alloc::string::String, +} +/// The launch stage as defined by [Google Cloud Platform +/// Launch Stages](). +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum LaunchStage { + /// Do not use this default value. + Unspecified = 0, + /// The feature is not yet implemented. Users can not use it. + Unimplemented = 6, + /// Prelaunch features are hidden from users and are only visible internally. + Prelaunch = 7, + /// Early Access features are limited to a closed group of testers. To use + /// these features, you must sign up in advance and sign a Trusted Tester + /// agreement (which includes confidentiality provisions). These features may + /// be unstable, changed in backward-incompatible ways, and are not + /// guaranteed to be released. + EarlyAccess = 1, + /// Alpha is a limited availability test for releases before they are cleared + /// for widespread use. By Alpha, all significant design issues are resolved + /// and we are in the process of verifying functionality. Alpha customers + /// need to apply for access, agree to applicable terms, and have their + /// projects allowlisted. Alpha releases don't have to be feature complete, + /// no SLAs are provided, and there are no technical support obligations, but + /// they will be far enough along that customers can actually use them in + /// test environments or for limited-use tests -- just like they would in + /// normal production cases. + Alpha = 2, + /// Beta is the point at which we are ready to open a release for any + /// customer to use. There are no SLA or technical support obligations in a + /// Beta release. Products will be complete from a feature perspective, but + /// may have some open outstanding issues. Beta releases are suitable for + /// limited production use cases. + Beta = 3, + /// GA features are open to all developers and are considered stable and + /// fully qualified for production use. + Ga = 4, + /// Deprecated features are scheduled to be shut down and removed. For more + /// information, see the "Deprecation Policy" section of our [Terms of + /// Service]() + /// and the [Google Cloud Platform Subject to the Deprecation + /// Policy]() documentation. + Deprecated = 5, +} +impl LaunchStage { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + LaunchStage::Unspecified => "LAUNCH_STAGE_UNSPECIFIED", + LaunchStage::Unimplemented => "UNIMPLEMENTED", + LaunchStage::Prelaunch => "PRELAUNCH", + LaunchStage::EarlyAccess => "EARLY_ACCESS", + LaunchStage::Alpha => "ALPHA", + LaunchStage::Beta => "BETA", + LaunchStage::Ga => "GA", + LaunchStage::Deprecated => "DEPRECATED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "LAUNCH_STAGE_UNSPECIFIED" => Some(Self::Unspecified), + "UNIMPLEMENTED" => Some(Self::Unimplemented), + "PRELAUNCH" => Some(Self::Prelaunch), + "EARLY_ACCESS" => Some(Self::EarlyAccess), + "ALPHA" => Some(Self::Alpha), + "BETA" => Some(Self::Beta), + "GA" => Some(Self::Ga), + "DEPRECATED" => Some(Self::Deprecated), + _ => None, + } + } +} +/// Required information for every language. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CommonLanguageSettings { + /// Link to automatically generated reference documentation. Example: + /// + #[deprecated] + #[prost(string, tag = "1")] + pub reference_docs_uri: ::prost::alloc::string::String, + /// The destination where API teams want this client library to be published. + #[prost(enumeration = "ClientLibraryDestination", repeated, tag = "2")] + pub destinations: ::prost::alloc::vec::Vec, +} +/// Details about how and where to publish client libraries. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ClientLibrarySettings { + /// Version of the API to apply these settings to. This is the full protobuf + /// package for the API, ending in the version element. + /// Examples: "google.cloud.speech.v1" and "google.spanner.admin.database.v1". + #[prost(string, tag = "1")] + pub version: ::prost::alloc::string::String, + /// Launch stage of this version of the API. + #[prost(enumeration = "LaunchStage", tag = "2")] + pub launch_stage: i32, + /// When using transport=rest, the client request will encode enums as + /// numbers rather than strings. + #[prost(bool, tag = "3")] + pub rest_numeric_enums: bool, + /// Settings for legacy Java features, supported in the Service YAML. + #[prost(message, optional, tag = "21")] + pub java_settings: ::core::option::Option, + /// Settings for C++ client libraries. + #[prost(message, optional, tag = "22")] + pub cpp_settings: ::core::option::Option, + /// Settings for PHP client libraries. + #[prost(message, optional, tag = "23")] + pub php_settings: ::core::option::Option, + /// Settings for Python client libraries. + #[prost(message, optional, tag = "24")] + pub python_settings: ::core::option::Option, + /// Settings for Node client libraries. + #[prost(message, optional, tag = "25")] + pub node_settings: ::core::option::Option, + /// Settings for .NET client libraries. + #[prost(message, optional, tag = "26")] + pub dotnet_settings: ::core::option::Option, + /// Settings for Ruby client libraries. + #[prost(message, optional, tag = "27")] + pub ruby_settings: ::core::option::Option, + /// Settings for Go client libraries. + #[prost(message, optional, tag = "28")] + pub go_settings: ::core::option::Option, +} +/// This message configures the settings for publishing [Google Cloud Client +/// libraries]() +/// generated from the service config. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Publishing { + /// A list of API method settings, e.g. the behavior for methods that use the + /// long-running operation pattern. + #[prost(message, repeated, tag = "2")] + pub method_settings: ::prost::alloc::vec::Vec, + /// Link to a *public* URI where users can report issues. Example: + /// + #[prost(string, tag = "101")] + pub new_issue_uri: ::prost::alloc::string::String, + /// Link to product home page. Example: + /// + #[prost(string, tag = "102")] + pub documentation_uri: ::prost::alloc::string::String, + /// Used as a tracking tag when collecting data about the APIs developer + /// relations artifacts like docs, packages delivered to package managers, + /// etc. Example: "speech". + #[prost(string, tag = "103")] + pub api_short_name: ::prost::alloc::string::String, + /// GitHub label to apply to issues and pull requests opened for this API. + #[prost(string, tag = "104")] + pub github_label: ::prost::alloc::string::String, + /// GitHub teams to be added to CODEOWNERS in the directory in GitHub + /// containing source code for the client libraries for this API. + #[prost(string, repeated, tag = "105")] + pub codeowner_github_teams: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// A prefix used in sample code when demarking regions to be included in + /// documentation. + #[prost(string, tag = "106")] + pub doc_tag_prefix: ::prost::alloc::string::String, + /// For whom the client library is being published. + #[prost(enumeration = "ClientLibraryOrganization", tag = "107")] + pub organization: i32, + /// Client library settings. If the same version string appears multiple + /// times in this list, then the last one wins. Settings from earlier + /// settings with the same version string are discarded. + #[prost(message, repeated, tag = "109")] + pub library_settings: ::prost::alloc::vec::Vec, + /// Optional link to proto reference documentation. Example: + /// + #[prost(string, tag = "110")] + pub proto_reference_documentation_uri: ::prost::alloc::string::String, +} +/// Settings for Java client libraries. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JavaSettings { + /// The package name to use in Java. Clobbers the java_package option + /// set in the protobuf. This should be used **only** by APIs + /// who have already set the language_settings.java.package_name" field + /// in gapic.yaml. API teams should use the protobuf java_package option + /// where possible. + /// + /// Example of a YAML configuration:: + /// + /// publishing: + /// java_settings: + /// library_package: com.google.cloud.pubsub.v1 + #[prost(string, tag = "1")] + pub library_package: ::prost::alloc::string::String, + /// Configure the Java class name to use instead of the service's for its + /// corresponding generated GAPIC client. Keys are fully-qualified + /// service names as they appear in the protobuf (including the full + /// the language_settings.java.interface_names" field in gapic.yaml. API + /// teams should otherwise use the service name as it appears in the + /// protobuf. + /// + /// Example of a YAML configuration:: + /// + /// publishing: + /// java_settings: + /// service_class_names: + /// - google.pubsub.v1.Publisher: TopicAdmin + /// - google.pubsub.v1.Subscriber: SubscriptionAdmin + #[prost(map = "string, string", tag = "2")] + pub service_class_names: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + /// Some settings. + #[prost(message, optional, tag = "3")] + pub common: ::core::option::Option, +} +/// Settings for C++ client libraries. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CppSettings { + /// Some settings. + #[prost(message, optional, tag = "1")] + pub common: ::core::option::Option, +} +/// Settings for Php client libraries. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PhpSettings { + /// Some settings. + #[prost(message, optional, tag = "1")] + pub common: ::core::option::Option, +} +/// Settings for Python client libraries. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PythonSettings { + /// Some settings. + #[prost(message, optional, tag = "1")] + pub common: ::core::option::Option, +} +/// Settings for Node client libraries. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NodeSettings { + /// Some settings. + #[prost(message, optional, tag = "1")] + pub common: ::core::option::Option, +} +/// Settings for Dotnet client libraries. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DotnetSettings { + /// Some settings. + #[prost(message, optional, tag = "1")] + pub common: ::core::option::Option, + /// Map from original service names to renamed versions. + /// This is used when the default generated types + /// would cause a naming conflict. (Neither name is + /// fully-qualified.) + /// Example: Subscriber to SubscriberServiceApi. + #[prost(map = "string, string", tag = "2")] + pub renamed_services: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + /// Map from full resource types to the effective short name + /// for the resource. This is used when otherwise resource + /// named from different services would cause naming collisions. + /// Example entry: + /// "datalabeling.googleapis.com/Dataset": "DataLabelingDataset" + #[prost(map = "string, string", tag = "3")] + pub renamed_resources: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + /// List of full resource types to ignore during generation. + /// This is typically used for API-specific Location resources, + /// which should be handled by the generator as if they were actually + /// the common Location resources. + /// Example entry: "documentai.googleapis.com/Location" + #[prost(string, repeated, tag = "4")] + pub ignored_resources: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Namespaces which must be aliased in snippets due to + /// a known (but non-generator-predictable) naming collision + #[prost(string, repeated, tag = "5")] + pub forced_namespace_aliases: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Method signatures (in the form "service.method(signature)") + /// which are provided separately, so shouldn't be generated. + /// Snippets *calling* these methods are still generated, however. + #[prost(string, repeated, tag = "6")] + pub handwritten_signatures: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// Settings for Ruby client libraries. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RubySettings { + /// Some settings. + #[prost(message, optional, tag = "1")] + pub common: ::core::option::Option, +} +/// Settings for Go client libraries. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GoSettings { + /// Some settings. + #[prost(message, optional, tag = "1")] + pub common: ::core::option::Option, +} +/// Describes the generator configuration for a method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MethodSettings { + /// The fully qualified name of the method, for which the options below apply. + /// This is used to find the method to apply the options. + #[prost(string, tag = "1")] + pub selector: ::prost::alloc::string::String, + /// Describes settings to use for long-running operations when generating + /// API methods for RPCs. Complements RPCs that use the annotations in + /// google/longrunning/operations.proto. + /// + /// Example of a YAML configuration:: + /// + /// publishing: + /// method_settings: + /// - selector: google.cloud.speech.v2.Speech.BatchRecognize + /// long_running: + /// initial_poll_delay: + /// seconds: 60 # 1 minute + /// poll_delay_multiplier: 1.5 + /// max_poll_delay: + /// seconds: 360 # 6 minutes + /// total_poll_timeout: + /// seconds: 54000 # 90 minutes + #[prost(message, optional, tag = "2")] + pub long_running: ::core::option::Option, + /// List of top-level fields of the request message, that should be + /// automatically populated by the client libraries based on their + /// (google.api.field_info).format. Currently supported format: UUID4. + /// + /// Example of a YAML configuration: + /// + /// publishing: + /// method_settings: + /// - selector: google.example.v1.ExampleService.CreateExample + /// auto_populated_fields: + /// - request_id + #[prost(string, repeated, tag = "3")] + pub auto_populated_fields: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// Nested message and enum types in `MethodSettings`. +pub mod method_settings { + /// Describes settings to use when generating API methods that use the + /// long-running operation pattern. + /// All default values below are from those used in the client library + /// generators (e.g. + /// \[Java\]()). + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct LongRunning { + /// Initial delay after which the first poll request will be made. + /// Default value: 5 seconds. + #[prost(message, optional, tag = "1")] + pub initial_poll_delay: ::core::option::Option<::prost_types::Duration>, + /// Multiplier to gradually increase delay between subsequent polls until it + /// reaches max_poll_delay. + /// Default value: 1.5. + #[prost(float, tag = "2")] + pub poll_delay_multiplier: f32, + /// Maximum time between two subsequent poll requests. + /// Default value: 45 seconds. + #[prost(message, optional, tag = "3")] + pub max_poll_delay: ::core::option::Option<::prost_types::Duration>, + /// Total polling timeout. + /// Default value: 5 minutes. + #[prost(message, optional, tag = "4")] + pub total_poll_timeout: ::core::option::Option<::prost_types::Duration>, + } +} +/// The organization for which the client libraries are being published. +/// Affects the url where generated docs are published, etc. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ClientLibraryOrganization { + /// Not useful. + Unspecified = 0, + /// Google Cloud Platform Org. + Cloud = 1, + /// Ads (Advertising) Org. + Ads = 2, + /// Photos Org. + Photos = 3, + /// Street View Org. + StreetView = 4, + /// Shopping Org. + Shopping = 5, + /// Geo Org. + Geo = 6, + /// Generative AI - + GenerativeAi = 7, +} +impl ClientLibraryOrganization { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ClientLibraryOrganization::Unspecified => "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED", + ClientLibraryOrganization::Cloud => "CLOUD", + ClientLibraryOrganization::Ads => "ADS", + ClientLibraryOrganization::Photos => "PHOTOS", + ClientLibraryOrganization::StreetView => "STREET_VIEW", + ClientLibraryOrganization::Shopping => "SHOPPING", + ClientLibraryOrganization::Geo => "GEO", + ClientLibraryOrganization::GenerativeAi => "GENERATIVE_AI", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED" => Some(Self::Unspecified), + "CLOUD" => Some(Self::Cloud), + "ADS" => Some(Self::Ads), + "PHOTOS" => Some(Self::Photos), + "STREET_VIEW" => Some(Self::StreetView), + "SHOPPING" => Some(Self::Shopping), + "GEO" => Some(Self::Geo), + "GENERATIVE_AI" => Some(Self::GenerativeAi), + _ => None, + } + } +} +/// To where should client libraries be published? +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ClientLibraryDestination { + /// Client libraries will neither be generated nor published to package + /// managers. + Unspecified = 0, + /// Generate the client library in a repo under github.com/googleapis, + /// but don't publish it to package managers. + Github = 10, + /// Publish the library to package managers like nuget.org and npmjs.com. + PackageManager = 20, +} +impl ClientLibraryDestination { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ClientLibraryDestination::Unspecified => "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED", + ClientLibraryDestination::Github => "GITHUB", + ClientLibraryDestination::PackageManager => "PACKAGE_MANAGER", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED" => Some(Self::Unspecified), + "GITHUB" => Some(Self::Github), + "PACKAGE_MANAGER" => Some(Self::PackageManager), + _ => None, + } + } +} +/// An indicator of the behavior of a given field (for example, that a field +/// is required in requests, or given as output but ignored as input). +/// This **does not** change the behavior in protocol buffers itself; it only +/// denotes the behavior and may affect how API tooling handles the field. +/// +/// Note: This enum **may** receive new values in the future. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum FieldBehavior { + /// Conventional default for enums. Do not use this. + Unspecified = 0, + /// Specifically denotes a field as optional. + /// While all fields in protocol buffers are optional, this may be specified + /// for emphasis if appropriate. + Optional = 1, + /// Denotes a field as required. + /// This indicates that the field **must** be provided as part of the request, + /// and failure to do so will cause an error (usually `INVALID_ARGUMENT`). + Required = 2, + /// Denotes a field as output only. + /// This indicates that the field is provided in responses, but including the + /// field in a request does nothing (the server *must* ignore it and + /// *must not* throw an error as a result of the field's presence). + OutputOnly = 3, + /// Denotes a field as input only. + /// This indicates that the field is provided in requests, and the + /// corresponding field is not included in output. + InputOnly = 4, + /// Denotes a field as immutable. + /// This indicates that the field may be set once in a request to create a + /// resource, but may not be changed thereafter. + Immutable = 5, + /// Denotes that a (repeated) field is an unordered list. + /// This indicates that the service may provide the elements of the list + /// in any arbitrary order, rather than the order the user originally + /// provided. Additionally, the list's order may or may not be stable. + UnorderedList = 6, + /// Denotes that this field returns a non-empty default value if not set. + /// This indicates that if the user provides the empty value in a request, + /// a non-empty value will be returned. The user will not be aware of what + /// non-empty value to expect. + NonEmptyDefault = 7, + /// Denotes that the field in a resource (a message annotated with + /// google.api.resource) is used in the resource name to uniquely identify the + /// resource. For AIP-compliant APIs, this should only be applied to the + /// `name` field on the resource. + /// + /// This behavior should not be applied to references to other resources within + /// the message. + /// + /// The identifier field of resources often have different field behavior + /// depending on the request it is embedded in (e.g. for Create methods name + /// is optional and unused, while for Update methods it is required). Instead + /// of method-specific annotations, only `IDENTIFIER` is required. + Identifier = 8, +} +impl FieldBehavior { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + FieldBehavior::Unspecified => "FIELD_BEHAVIOR_UNSPECIFIED", + FieldBehavior::Optional => "OPTIONAL", + FieldBehavior::Required => "REQUIRED", + FieldBehavior::OutputOnly => "OUTPUT_ONLY", + FieldBehavior::InputOnly => "INPUT_ONLY", + FieldBehavior::Immutable => "IMMUTABLE", + FieldBehavior::UnorderedList => "UNORDERED_LIST", + FieldBehavior::NonEmptyDefault => "NON_EMPTY_DEFAULT", + FieldBehavior::Identifier => "IDENTIFIER", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "FIELD_BEHAVIOR_UNSPECIFIED" => Some(Self::Unspecified), + "OPTIONAL" => Some(Self::Optional), + "REQUIRED" => Some(Self::Required), + "OUTPUT_ONLY" => Some(Self::OutputOnly), + "INPUT_ONLY" => Some(Self::InputOnly), + "IMMUTABLE" => Some(Self::Immutable), + "UNORDERED_LIST" => Some(Self::UnorderedList), + "NON_EMPTY_DEFAULT" => Some(Self::NonEmptyDefault), + "IDENTIFIER" => Some(Self::Identifier), + _ => None, + } + } +} +/// ```ignore +/// A simple descriptor of a resource type. +/// +/// ResourceDescriptor annotates a resource message (either by means of a +/// protobuf annotation or use in the service config), and associates the +/// resource's schema, the resource type, and the pattern of the resource name. +/// +/// Example: +/// +/// message Topic { +/// // Indicates this message defines a resource schema. +/// // Declares the resource type in the format of {service}/{kind}. +/// // For Kubernetes resources, the format is {api group}/{kind}. +/// option (google.api.resource) = { +/// type: "pubsub.googleapis.com/Topic" +/// pattern: "projects/{project}/topics/{topic}" +/// }; +/// } +/// +/// The ResourceDescriptor Yaml config will look like: +/// +/// resources: +/// - type: "pubsub.googleapis.com/Topic" +/// pattern: "projects/{project}/topics/{topic}" +/// +/// Sometimes, resources have multiple patterns, typically because they can +/// live under multiple parents. +/// +/// Example: +/// +/// message LogEntry { +/// option (google.api.resource) = { +/// type: "logging.googleapis.com/LogEntry" +/// pattern: "projects/{project}/logs/{log}" +/// pattern: "folders/{folder}/logs/{log}" +/// pattern: "organizations/{organization}/logs/{log}" +/// pattern: "billingAccounts/{billing_account}/logs/{log}" +/// }; +/// } +/// +/// The ResourceDescriptor Yaml config will look like: +/// +/// resources: +/// - type: 'logging.googleapis.com/LogEntry' +/// pattern: "projects/{project}/logs/{log}" +/// pattern: "folders/{folder}/logs/{log}" +/// pattern: "organizations/{organization}/logs/{log}" +/// pattern: "billingAccounts/{billing_account}/logs/{log}" +/// ``` +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResourceDescriptor { + /// The resource type. It must be in the format of + /// {service_name}/{resource_type_kind}. The `resource_type_kind` must be + /// singular and must not include version numbers. + /// + /// Example: `storage.googleapis.com/Bucket` + /// + /// The value of the resource_type_kind must follow the regular expression + /// /\[A-Za-z][a-zA-Z0-9\]+/. It should start with an upper case character and + /// should use PascalCase (UpperCamelCase). The maximum number of + /// characters allowed for the `resource_type_kind` is 100. + #[prost(string, tag = "1")] + pub r#type: ::prost::alloc::string::String, + /// ```ignore + /// Optional. The relative resource name pattern associated with this resource + /// type. The DNS prefix of the full resource name shouldn't be specified here. + /// + /// The path pattern must follow the syntax, which aligns with HTTP binding + /// syntax: + /// + /// Template = Segment { "/" Segment } ; + /// Segment = LITERAL | Variable ; + /// Variable = "{" LITERAL "}" ; + /// + /// Examples: + /// + /// - "projects/{project}/topics/{topic}" + /// - "projects/{project}/knowledgeBases/{knowledge_base}" + /// + /// The components in braces correspond to the IDs for each resource in the + /// hierarchy. It is expected that, if multiple patterns are provided, + /// the same component name (e.g. "project") refers to IDs of the same + /// type of resource. + /// ``` + #[prost(string, repeated, tag = "2")] + pub pattern: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Optional. The field on the resource that designates the resource name + /// field. If omitted, this is assumed to be "name". + #[prost(string, tag = "3")] + pub name_field: ::prost::alloc::string::String, + /// ```ignore + /// Optional. The historical or future-looking state of the resource pattern. + /// + /// Example: + /// + /// // The InspectTemplate message originally only supported resource + /// // names with organization, and project was added later. + /// message InspectTemplate { + /// option (google.api.resource) = { + /// type: "dlp.googleapis.com/InspectTemplate" + /// pattern: + /// "organizations/{organization}/inspectTemplates/{inspect_template}" + /// pattern: "projects/{project}/inspectTemplates/{inspect_template}" + /// history: ORIGINALLY_SINGLE_PATTERN + /// }; + /// } + /// ``` + #[prost(enumeration = "resource_descriptor::History", tag = "4")] + pub history: i32, + /// The plural name used in the resource name and permission names, such as + /// 'projects' for the resource name of 'projects/{project}' and the permission + /// name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same + /// concept of the `plural` field in k8s CRD spec + /// + /// + /// Note: The plural form is required even for singleton resources. See + /// + #[prost(string, tag = "5")] + pub plural: ::prost::alloc::string::String, + /// The same concept of the `singular` field in k8s CRD spec + /// + /// Such as "project" for the `resourcemanager.googleapis.com/Project` type. + #[prost(string, tag = "6")] + pub singular: ::prost::alloc::string::String, + /// Style flag(s) for this resource. + /// These indicate that a resource is expected to conform to a given + /// style. See the specific style flags for additional information. + #[prost(enumeration = "resource_descriptor::Style", repeated, tag = "10")] + pub style: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `ResourceDescriptor`. +pub mod resource_descriptor { + /// A description of the historical or future-looking state of the + /// resource pattern. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum History { + /// The "unset" value. + Unspecified = 0, + /// The resource originally had one pattern and launched as such, and + /// additional patterns were added later. + OriginallySinglePattern = 1, + /// The resource has one pattern, but the API owner expects to add more + /// later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents + /// that from being necessary once there are multiple patterns.) + FutureMultiPattern = 2, + } + impl History { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + History::Unspecified => "HISTORY_UNSPECIFIED", + History::OriginallySinglePattern => "ORIGINALLY_SINGLE_PATTERN", + History::FutureMultiPattern => "FUTURE_MULTI_PATTERN", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "HISTORY_UNSPECIFIED" => Some(Self::Unspecified), + "ORIGINALLY_SINGLE_PATTERN" => Some(Self::OriginallySinglePattern), + "FUTURE_MULTI_PATTERN" => Some(Self::FutureMultiPattern), + _ => None, + } + } + } + /// A flag representing a specific style that a resource claims to conform to. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum Style { + /// The unspecified value. Do not use. + Unspecified = 0, + /// This resource is intended to be "declarative-friendly". + /// + /// Declarative-friendly resources must be more strictly consistent, and + /// setting this to true communicates to tools that this resource should + /// adhere to declarative-friendly expectations. + /// + /// Note: This is used by the API linter (linter.aip.dev) to enable + /// additional checks. + DeclarativeFriendly = 1, + } + impl Style { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Style::Unspecified => "STYLE_UNSPECIFIED", + Style::DeclarativeFriendly => "DECLARATIVE_FRIENDLY", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "STYLE_UNSPECIFIED" => Some(Self::Unspecified), + "DECLARATIVE_FRIENDLY" => Some(Self::DeclarativeFriendly), + _ => None, + } + } + } +} +/// Defines a proto annotation that describes a string field that refers to +/// an API resource. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResourceReference { + /// ```ignore + /// The resource type that the annotated field references. + /// + /// Example: + /// + /// message Subscription { + /// string topic = 2 [(google.api.resource_reference) = { + /// type: "pubsub.googleapis.com/Topic" + /// }]; + /// } + /// + /// Occasionally, a field may reference an arbitrary resource. In this case, + /// APIs use the special value * in their resource reference. + /// + /// Example: + /// + /// message GetIamPolicyRequest { + /// string resource = 2 [(google.api.resource_reference) = { + /// type: "*" + /// }]; + /// } + /// ``` + #[prost(string, tag = "1")] + pub r#type: ::prost::alloc::string::String, + /// ```ignore + /// The resource type of a child collection that the annotated field + /// references. This is useful for annotating the `parent` field that + /// doesn't have a fixed resource type. + /// + /// Example: + /// + /// message ListLogEntriesRequest { + /// string parent = 1 [(google.api.resource_reference) = { + /// child_type: "logging.googleapis.com/LogEntry" + /// }; + /// } + /// ``` + #[prost(string, tag = "2")] + pub child_type: ::prost::alloc::string::String, +} +/// ```ignore +/// Specifies the routing information that should be sent along with the request +/// in the form of routing header. +/// **NOTE:** All service configuration rules follow the "last one wins" order. +/// +/// The examples below will apply to an RPC which has the following request type: +/// +/// Message Definition: +/// +/// message Request { +/// // The name of the Table +/// // Values can be of the following formats: +/// // - `projects//tables/` +/// // - `projects//instances//tables/
` +/// // - `region//zones//tables/
` +/// string table_name = 1; +/// +/// // This value specifies routing for replication. +/// // It can be in the following formats: +/// // - `profiles/` +/// // - a legacy `profile_id` that can be any string +/// string app_profile_id = 2; +/// } +/// +/// Example message: +/// +/// { +/// table_name: projects/proj_foo/instances/instance_bar/table/table_baz, +/// app_profile_id: profiles/prof_qux +/// } +/// +/// The routing header consists of one or multiple key-value pairs. Every key +/// and value must be percent-encoded, and joined together in the format of +/// `key1=value1&key2=value2`. +/// In the examples below I am skipping the percent-encoding for readablity. +/// +/// Example 1 +/// +/// Extracting a field from the request to put into the routing header +/// unchanged, with the key equal to the field name. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // Take the `app_profile_id`. +/// routing_parameters { +/// field: "app_profile_id" +/// } +/// }; +/// +/// result: +/// +/// x-goog-request-params: app_profile_id=profiles/prof_qux +/// +/// Example 2 +/// +/// Extracting a field from the request to put into the routing header +/// unchanged, with the key different from the field name. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // Take the `app_profile_id`, but name it `routing_id` in the header. +/// routing_parameters { +/// field: "app_profile_id" +/// path_template: "{routing_id=**}" +/// } +/// }; +/// +/// result: +/// +/// x-goog-request-params: routing_id=profiles/prof_qux +/// +/// Example 3 +/// +/// Extracting a field from the request to put into the routing +/// header, while matching a path template syntax on the field's value. +/// +/// NB: it is more useful to send nothing than to send garbage for the purpose +/// of dynamic routing, since garbage pollutes cache. Thus the matching. +/// +/// Sub-example 3a +/// +/// The field matches the template. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // Take the `table_name`, if it's well-formed (with project-based +/// // syntax). +/// routing_parameters { +/// field: "table_name" +/// path_template: "{table_name=projects/*/instances/*/**}" +/// } +/// }; +/// +/// result: +/// +/// x-goog-request-params: +/// table_name=projects/proj_foo/instances/instance_bar/table/table_baz +/// +/// Sub-example 3b +/// +/// The field does not match the template. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // Take the `table_name`, if it's well-formed (with region-based +/// // syntax). +/// routing_parameters { +/// field: "table_name" +/// path_template: "{table_name=regions/*/zones/*/**}" +/// } +/// }; +/// +/// result: +/// +/// +/// +/// Sub-example 3c +/// +/// Multiple alternative conflictingly named path templates are +/// specified. The one that matches is used to construct the header. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // Take the `table_name`, if it's well-formed, whether +/// // using the region- or projects-based syntax. +/// +/// routing_parameters { +/// field: "table_name" +/// path_template: "{table_name=regions/*/zones/*/**}" +/// } +/// routing_parameters { +/// field: "table_name" +/// path_template: "{table_name=projects/*/instances/*/**}" +/// } +/// }; +/// +/// result: +/// +/// x-goog-request-params: +/// table_name=projects/proj_foo/instances/instance_bar/table/table_baz +/// +/// Example 4 +/// +/// Extracting a single routing header key-value pair by matching a +/// template syntax on (a part of) a single request field. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // Take just the project id from the `table_name` field. +/// routing_parameters { +/// field: "table_name" +/// path_template: "{routing_id=projects/*}/**" +/// } +/// }; +/// +/// result: +/// +/// x-goog-request-params: routing_id=projects/proj_foo +/// +/// Example 5 +/// +/// Extracting a single routing header key-value pair by matching +/// several conflictingly named path templates on (parts of) a single request +/// field. The last template to match "wins" the conflict. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // If the `table_name` does not have instances information, +/// // take just the project id for routing. +/// // Otherwise take project + instance. +/// +/// routing_parameters { +/// field: "table_name" +/// path_template: "{routing_id=projects/*}/**" +/// } +/// routing_parameters { +/// field: "table_name" +/// path_template: "{routing_id=projects/*/instances/*}/**" +/// } +/// }; +/// +/// result: +/// +/// x-goog-request-params: +/// routing_id=projects/proj_foo/instances/instance_bar +/// +/// Example 6 +/// +/// Extracting multiple routing header key-value pairs by matching +/// several non-conflicting path templates on (parts of) a single request field. +/// +/// Sub-example 6a +/// +/// Make the templates strict, so that if the `table_name` does not +/// have an instance information, nothing is sent. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // The routing code needs two keys instead of one composite +/// // but works only for the tables with the "project-instance" name +/// // syntax. +/// +/// routing_parameters { +/// field: "table_name" +/// path_template: "{project_id=projects/*}/instances/*/**" +/// } +/// routing_parameters { +/// field: "table_name" +/// path_template: "projects/*/{instance_id=instances/*}/**" +/// } +/// }; +/// +/// result: +/// +/// x-goog-request-params: +/// project_id=projects/proj_foo&instance_id=instances/instance_bar +/// +/// Sub-example 6b +/// +/// Make the templates loose, so that if the `table_name` does not +/// have an instance information, just the project id part is sent. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // The routing code wants two keys instead of one composite +/// // but will work with just the `project_id` for tables without +/// // an instance in the `table_name`. +/// +/// routing_parameters { +/// field: "table_name" +/// path_template: "{project_id=projects/*}/**" +/// } +/// routing_parameters { +/// field: "table_name" +/// path_template: "projects/*/{instance_id=instances/*}/**" +/// } +/// }; +/// +/// result (is the same as 6a for our example message because it has the instance +/// information): +/// +/// x-goog-request-params: +/// project_id=projects/proj_foo&instance_id=instances/instance_bar +/// +/// Example 7 +/// +/// Extracting multiple routing header key-value pairs by matching +/// several path templates on multiple request fields. +/// +/// NB: note that here there is no way to specify sending nothing if one of the +/// fields does not match its template. E.g. if the `table_name` is in the wrong +/// format, the `project_id` will not be sent, but the `routing_id` will be. +/// The backend routing code has to be aware of that and be prepared to not +/// receive a full complement of keys if it expects multiple. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // The routing needs both `project_id` and `routing_id` +/// // (from the `app_profile_id` field) for routing. +/// +/// routing_parameters { +/// field: "table_name" +/// path_template: "{project_id=projects/*}/**" +/// } +/// routing_parameters { +/// field: "app_profile_id" +/// path_template: "{routing_id=**}" +/// } +/// }; +/// +/// result: +/// +/// x-goog-request-params: +/// project_id=projects/proj_foo&routing_id=profiles/prof_qux +/// +/// Example 8 +/// +/// Extracting a single routing header key-value pair by matching +/// several conflictingly named path templates on several request fields. The +/// last template to match "wins" the conflict. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // The `routing_id` can be a project id or a region id depending on +/// // the table name format, but only if the `app_profile_id` is not set. +/// // If `app_profile_id` is set it should be used instead. +/// +/// routing_parameters { +/// field: "table_name" +/// path_template: "{routing_id=projects/*}/**" +/// } +/// routing_parameters { +/// field: "table_name" +/// path_template: "{routing_id=regions/*}/**" +/// } +/// routing_parameters { +/// field: "app_profile_id" +/// path_template: "{routing_id=**}" +/// } +/// }; +/// +/// result: +/// +/// x-goog-request-params: routing_id=profiles/prof_qux +/// +/// Example 9 +/// +/// Bringing it all together. +/// +/// annotation: +/// +/// option (google.api.routing) = { +/// // For routing both `table_location` and a `routing_id` are needed. +/// // +/// // table_location can be either an instance id or a region+zone id. +/// // +/// // For `routing_id`, take the value of `app_profile_id` +/// // - If it's in the format `profiles/`, send +/// // just the `` part. +/// // - If it's any other literal, send it as is. +/// // If the `app_profile_id` is empty, and the `table_name` starts with +/// // the project_id, send that instead. +/// +/// routing_parameters { +/// field: "table_name" +/// path_template: "projects/*/{table_location=instances/*}/tables/*" +/// } +/// routing_parameters { +/// field: "table_name" +/// path_template: "{table_location=regions/*/zones/*}/tables/*" +/// } +/// routing_parameters { +/// field: "table_name" +/// path_template: "{routing_id=projects/*}/**" +/// } +/// routing_parameters { +/// field: "app_profile_id" +/// path_template: "{routing_id=**}" +/// } +/// routing_parameters { +/// field: "app_profile_id" +/// path_template: "profiles/{routing_id=*}" +/// } +/// }; +/// +/// result: +/// +/// x-goog-request-params: +/// table_location=instances/instance_bar&routing_id=prof_qux +/// ``` +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RoutingRule { + /// A collection of Routing Parameter specifications. + /// **NOTE:** If multiple Routing Parameters describe the same key + /// (via the `path_template` field or via the `field` field when + /// `path_template` is not provided), "last one wins" rule + /// determines which Parameter gets used. + /// See the examples for more details. + #[prost(message, repeated, tag = "2")] + pub routing_parameters: ::prost::alloc::vec::Vec, +} +/// A projection from an input message to the GRPC or REST header. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RoutingParameter { + /// A request field to extract the header key-value pair from. + #[prost(string, tag = "1")] + pub field: ::prost::alloc::string::String, + /// ```ignore + /// A pattern matching the key-value field. Optional. + /// If not specified, the whole field specified in the `field` field will be + /// taken as value, and its name used as key. If specified, it MUST contain + /// exactly one named segment (along with any number of unnamed segments) The + /// pattern will be matched over the field specified in the `field` field, then + /// if the match is successful: + /// - the name of the single named segment will be used as a header name, + /// - the match value of the segment will be used as a header value; + /// if the match is NOT successful, nothing will be sent. + /// + /// Example: + /// + /// -- This is a field in the request message + /// | that the header value will be extracted from. + /// | + /// | -- This is the key name in the + /// | | routing header. + /// V | + /// field: "table_name" v + /// path_template: "projects/*/{table_location=instances/*}/tables/*" + /// ^ ^ + /// | | + /// In the {} brackets is the pattern that -- | + /// specifies what to extract from the | + /// field as a value to be sent. | + /// | + /// The string in the field must match the whole pattern -- + /// before brackets, inside brackets, after brackets. + /// + /// When looking at this specific example, we can see that: + /// - A key-value pair with the key `table_location` + /// and the value matching `instances/*` should be added + /// to the x-goog-request-params routing header. + /// - The value is extracted from the request message's `table_name` field + /// if it matches the full pattern specified: + /// `projects/*/instances/*/tables/*`. + /// + /// **NB:** If the `path_template` field is not provided, the key name is + /// equal to the field name, and the whole field should be sent as a value. + /// This makes the pattern for the field and the value functionally equivalent + /// to `**`, and the configuration + /// + /// { + /// field: "table_name" + /// } + /// + /// is a functionally equivalent shorthand to: + /// + /// { + /// field: "table_name" + /// path_template: "{table_name=**}" + /// } + /// + /// See Example 1 for more details. + /// ``` + #[prost(string, tag = "2")] + pub path_template: ::prost::alloc::string::String, +} diff --git a/crates/sui-kvstore/src/bigtable/proto/google.bigtable.v2.rs b/crates/sui-kvstore/src/bigtable/proto/google.bigtable.v2.rs new file mode 100644 index 0000000000000..90571abf5c5ea --- /dev/null +++ b/crates/sui-kvstore/src/bigtable/proto/google.bigtable.v2.rs @@ -0,0 +1,1734 @@ +/// Specifies the complete (requested) contents of a single row of a table. +/// Rows which exceed 256MiB in size cannot be read in full. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Row { + /// The unique key which identifies this row within its table. This is the same + /// key that's used to identify the row in, for example, a MutateRowRequest. + /// May contain any non-empty byte string up to 4KiB in length. + #[prost(bytes = "vec", tag = "1")] + pub key: ::prost::alloc::vec::Vec, + /// May be empty, but only if the entire row is empty. + /// The mutual ordering of column families is not specified. + #[prost(message, repeated, tag = "2")] + pub families: ::prost::alloc::vec::Vec, +} +/// Specifies (some of) the contents of a single row/column family intersection +/// of a table. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Family { + /// The unique key which identifies this family within its row. This is the + /// same key that's used to identify the family in, for example, a RowFilter + /// which sets its "family_name_regex_filter" field. + /// Must match `\[-_.a-zA-Z0-9\]+`, except that AggregatingRowProcessors may + /// produce cells in a sentinel family with an empty name. + /// Must be no greater than 64 characters in length. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// Must not be empty. Sorted in order of increasing "qualifier". + #[prost(message, repeated, tag = "2")] + pub columns: ::prost::alloc::vec::Vec, +} +/// Specifies (some of) the contents of a single row/column intersection of a +/// table. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Column { + /// The unique key which identifies this column within its family. This is the + /// same key that's used to identify the column in, for example, a RowFilter + /// which sets its `column_qualifier_regex_filter` field. + /// May contain any byte string, including the empty string, up to 16kiB in + /// length. + #[prost(bytes = "vec", tag = "1")] + pub qualifier: ::prost::alloc::vec::Vec, + /// Must not be empty. Sorted in order of decreasing "timestamp_micros". + #[prost(message, repeated, tag = "2")] + pub cells: ::prost::alloc::vec::Vec, +} +/// Specifies (some of) the contents of a single row/column/timestamp of a table. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Cell { + /// The cell's stored timestamp, which also uniquely identifies it within + /// its column. + /// Values are always expressed in microseconds, but individual tables may set + /// a coarser granularity to further restrict the allowed values. For + /// example, a table which specifies millisecond granularity will only allow + /// values of `timestamp_micros` which are multiples of 1000. + #[prost(int64, tag = "1")] + pub timestamp_micros: i64, + /// The value stored in the cell. + /// May contain any byte string, including the empty string, up to 100MiB in + /// length. + #[prost(bytes = "vec", tag = "2")] + pub value: ::prost::alloc::vec::Vec, + /// Labels applied to the cell by a \[RowFilter][google.bigtable.v2.RowFilter\]. + #[prost(string, repeated, tag = "3")] + pub labels: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// Specifies a contiguous range of rows. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RowRange { + /// The row key at which to start the range. + /// If neither field is set, interpreted as the empty string, inclusive. + #[prost(oneof = "row_range::StartKey", tags = "1, 2")] + pub start_key: ::core::option::Option, + /// The row key at which to end the range. + /// If neither field is set, interpreted as the infinite row key, exclusive. + #[prost(oneof = "row_range::EndKey", tags = "3, 4")] + pub end_key: ::core::option::Option, +} +/// Nested message and enum types in `RowRange`. +pub mod row_range { + /// The row key at which to start the range. + /// If neither field is set, interpreted as the empty string, inclusive. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum StartKey { + /// Used when giving an inclusive lower bound for the range. + #[prost(bytes, tag = "1")] + StartKeyClosed(::prost::alloc::vec::Vec), + /// Used when giving an exclusive lower bound for the range. + #[prost(bytes, tag = "2")] + StartKeyOpen(::prost::alloc::vec::Vec), + } + /// The row key at which to end the range. + /// If neither field is set, interpreted as the infinite row key, exclusive. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum EndKey { + /// Used when giving an exclusive upper bound for the range. + #[prost(bytes, tag = "3")] + EndKeyOpen(::prost::alloc::vec::Vec), + /// Used when giving an inclusive upper bound for the range. + #[prost(bytes, tag = "4")] + EndKeyClosed(::prost::alloc::vec::Vec), + } +} +/// Specifies a non-contiguous set of rows. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RowSet { + /// Single rows included in the set. + #[prost(bytes = "vec", repeated, tag = "1")] + pub row_keys: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + /// Contiguous row ranges included in the set. + #[prost(message, repeated, tag = "2")] + pub row_ranges: ::prost::alloc::vec::Vec, +} +/// Specifies a contiguous range of columns within a single column family. +/// The range spans from <column_family>:<start_qualifier> to +/// <column_family>:<end_qualifier>, where both bounds can be either +/// inclusive or exclusive. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ColumnRange { + /// The name of the column family within which this range falls. + #[prost(string, tag = "1")] + pub family_name: ::prost::alloc::string::String, + /// The column qualifier at which to start the range (within `column_family`). + /// If neither field is set, interpreted as the empty string, inclusive. + #[prost(oneof = "column_range::StartQualifier", tags = "2, 3")] + pub start_qualifier: ::core::option::Option, + /// The column qualifier at which to end the range (within `column_family`). + /// If neither field is set, interpreted as the infinite string, exclusive. + #[prost(oneof = "column_range::EndQualifier", tags = "4, 5")] + pub end_qualifier: ::core::option::Option, +} +/// Nested message and enum types in `ColumnRange`. +pub mod column_range { + /// The column qualifier at which to start the range (within `column_family`). + /// If neither field is set, interpreted as the empty string, inclusive. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum StartQualifier { + /// Used when giving an inclusive lower bound for the range. + #[prost(bytes, tag = "2")] + StartQualifierClosed(::prost::alloc::vec::Vec), + /// Used when giving an exclusive lower bound for the range. + #[prost(bytes, tag = "3")] + StartQualifierOpen(::prost::alloc::vec::Vec), + } + /// The column qualifier at which to end the range (within `column_family`). + /// If neither field is set, interpreted as the infinite string, exclusive. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum EndQualifier { + /// Used when giving an inclusive upper bound for the range. + #[prost(bytes, tag = "4")] + EndQualifierClosed(::prost::alloc::vec::Vec), + /// Used when giving an exclusive upper bound for the range. + #[prost(bytes, tag = "5")] + EndQualifierOpen(::prost::alloc::vec::Vec), + } +} +/// Specified a contiguous range of microsecond timestamps. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TimestampRange { + /// Inclusive lower bound. If left empty, interpreted as 0. + #[prost(int64, tag = "1")] + pub start_timestamp_micros: i64, + /// Exclusive upper bound. If left empty, interpreted as infinity. + #[prost(int64, tag = "2")] + pub end_timestamp_micros: i64, +} +/// Specifies a contiguous range of raw byte values. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ValueRange { + /// The value at which to start the range. + /// If neither field is set, interpreted as the empty string, inclusive. + #[prost(oneof = "value_range::StartValue", tags = "1, 2")] + pub start_value: ::core::option::Option, + /// The value at which to end the range. + /// If neither field is set, interpreted as the infinite string, exclusive. + #[prost(oneof = "value_range::EndValue", tags = "3, 4")] + pub end_value: ::core::option::Option, +} +/// Nested message and enum types in `ValueRange`. +pub mod value_range { + /// The value at which to start the range. + /// If neither field is set, interpreted as the empty string, inclusive. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum StartValue { + /// Used when giving an inclusive lower bound for the range. + #[prost(bytes, tag = "1")] + StartValueClosed(::prost::alloc::vec::Vec), + /// Used when giving an exclusive lower bound for the range. + #[prost(bytes, tag = "2")] + StartValueOpen(::prost::alloc::vec::Vec), + } + /// The value at which to end the range. + /// If neither field is set, interpreted as the infinite string, exclusive. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum EndValue { + /// Used when giving an inclusive upper bound for the range. + #[prost(bytes, tag = "3")] + EndValueClosed(::prost::alloc::vec::Vec), + /// Used when giving an exclusive upper bound for the range. + #[prost(bytes, tag = "4")] + EndValueOpen(::prost::alloc::vec::Vec), + } +} +/// Takes a row as input and produces an alternate view of the row based on +/// specified rules. For example, a RowFilter might trim down a row to include +/// just the cells from columns matching a given regular expression, or might +/// return all the cells of a row but not their values. More complicated filters +/// can be composed out of these components to express requests such as, "within +/// every column of a particular family, give just the two most recent cells +/// which are older than timestamp X." +/// +/// There are two broad categories of RowFilters (true filters and transformers), +/// as well as two ways to compose simple filters into more complex ones +/// (chains and interleaves). They work as follows: +/// +/// * True filters alter the input row by excluding some of its cells wholesale +/// from the output row. An example of a true filter is the `value_regex_filter`, +/// which excludes cells whose values don't match the specified pattern. All +/// regex true filters use RE2 syntax () +/// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An +/// important point to keep in mind is that `RE2(.)` is equivalent by default to +/// `RE2(\[^\n\])`, meaning that it does not match newlines. When attempting to +/// match an arbitrary byte, you should therefore use the escape sequence `\C`, +/// which may need to be further escaped as `\\C` in your client language. +/// +/// * Transformers alter the input row by changing the values of some of its +/// cells in the output, without excluding them completely. Currently, the only +/// supported transformer is the `strip_value_transformer`, which replaces every +/// cell's value with the empty string. +/// +/// * Chains and interleaves are described in more detail in the +/// RowFilter.Chain and RowFilter.Interleave documentation. +/// +/// The total serialized size of a RowFilter message must not +/// exceed 20480 bytes, and RowFilters may not be nested within each other +/// (in Chains or Interleaves) to a depth of more than 20. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RowFilter { + /// Which of the possible RowFilter types to apply. If none are set, this + /// RowFilter returns all cells in the input row. + #[prost( + oneof = "row_filter::Filter", + tags = "1, 2, 3, 16, 17, 18, 4, 14, 5, 6, 7, 8, 9, 15, 10, 11, 12, 13, 19" + )] + pub filter: ::core::option::Option, +} +/// Nested message and enum types in `RowFilter`. +pub mod row_filter { + /// A RowFilter which sends rows through several RowFilters in sequence. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Chain { + /// The elements of "filters" are chained together to process the input row: + /// in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row + /// The full chain is executed atomically. + #[prost(message, repeated, tag = "1")] + pub filters: ::prost::alloc::vec::Vec, + } + /// A RowFilter which sends each row to each of several component + /// RowFilters and interleaves the results. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Interleave { + /// The elements of "filters" all process a copy of the input row, and the + /// results are pooled, sorted, and combined into a single output row. + /// If multiple cells are produced with the same column and timestamp, + /// they will all appear in the output row in an unspecified mutual order. + /// Consider the following example, with three filters: + ///```ignore + /// input row + /// | + /// ----------------------------------------------------- + /// | | | + /// f(0) f(1) f(2) + /// | | | + /// 1: foo,bar,10,x foo,bar,10,z far,bar,7,a + /// 2: foo,blah,11,z far,blah,5,x far,blah,5,x + /// | | | + /// ----------------------------------------------------- + /// | + /// 1: foo,bar,10,z // could have switched with #2 + /// 2: foo,bar,10,x // could have switched with #1 + /// 3: foo,blah,11,z + /// 4: far,bar,7,a + /// 5: far,blah,5,x // identical to #6 + /// 6: far,blah,5,x // identical to #5 + /// + /// All interleaved filters are executed atomically. + #[prost(message, repeated, tag = "1")] + pub filters: ::prost::alloc::vec::Vec, + } + /// A RowFilter which evaluates one of two possible RowFilters, depending on + /// whether or not a predicate RowFilter outputs any cells from the input row. + /// + /// IMPORTANT NOTE: The predicate filter does not execute atomically with the + /// true and false filters, which may lead to inconsistent or unexpected + /// results. Additionally, Condition filters have poor performance, especially + /// when filters are set for the false condition. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Condition { + /// If `predicate_filter` outputs any cells, then `true_filter` will be + /// evaluated on the input row. Otherwise, `false_filter` will be evaluated. + #[prost(message, optional, boxed, tag = "1")] + pub predicate_filter: ::core::option::Option<::prost::alloc::boxed::Box>, + /// The filter to apply to the input row if `predicate_filter` returns any + /// results. If not provided, no results will be returned in the true case. + #[prost(message, optional, boxed, tag = "2")] + pub true_filter: ::core::option::Option<::prost::alloc::boxed::Box>, + /// The filter to apply to the input row if `predicate_filter` does not + /// return any results. If not provided, no results will be returned in the + /// false case. + #[prost(message, optional, boxed, tag = "3")] + pub false_filter: ::core::option::Option<::prost::alloc::boxed::Box>, + } + /// Which of the possible RowFilter types to apply. If none are set, this + /// RowFilter returns all cells in the input row. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Filter { + /// Applies several RowFilters to the data in sequence, progressively + /// narrowing the results. + #[prost(message, tag = "1")] + Chain(Chain), + /// Applies several RowFilters to the data in parallel and combines the + /// results. + #[prost(message, tag = "2")] + Interleave(Interleave), + /// Applies one of two possible RowFilters to the data based on the output of + /// a predicate RowFilter. + #[prost(message, tag = "3")] + Condition(::prost::alloc::boxed::Box), + /// ADVANCED USE ONLY. + /// Hook for introspection into the RowFilter. Outputs all cells directly to + /// the output of the read rather than to any parent filter. Consider the + /// following example: + ///```ignore + /// Chain( + /// FamilyRegex("A"), + /// Interleave( + /// All(), + /// Chain(Label("foo"), Sink()) + /// ), + /// QualifierRegex("B") + /// ) + /// + /// A,A,1,w + /// A,B,2,x + /// B,B,4,z + /// | + /// FamilyRegex("A") + /// | + /// A,A,1,w + /// A,B,2,x + /// | + /// +------------+-------------+ + /// | | + /// All() Label(foo) + /// | | + /// A,A,1,w A,A,1,w,labels:\[foo\] + /// A,B,2,x A,B,2,x,labels:\[foo\] + /// | | + /// | Sink() --------------+ + /// | | | + /// +------------+ x------+ A,A,1,w,labels:\[foo\] + /// | A,B,2,x,labels:\[foo\] + /// A,A,1,w | + /// A,B,2,x | + /// | | + /// QualifierRegex("B") | + /// | | + /// A,B,2,x | + /// | | + /// +--------------------------------+ + /// | + /// A,A,1,w,labels:\[foo\] + /// A,B,2,x,labels:\[foo\] // could be switched + /// A,B,2,x // could be switched + /// + /// Despite being excluded by the qualifier filter, a copy of every cell + /// that reaches the sink is present in the final result. + /// + /// As with an \[Interleave][google.bigtable.v2.RowFilter.Interleave\], + /// duplicate cells are possible, and appear in an unspecified mutual order. + /// In this case we have a duplicate with column "A:B" and timestamp 2, + /// because one copy passed through the all filter while the other was + /// passed through the label and sink. Note that one copy has label "foo", + /// while the other does not. + /// + /// Cannot be used within the `predicate_filter`, `true_filter`, or + /// `false_filter` of a \[Condition][google.bigtable.v2.RowFilter.Condition\]. + #[prost(bool, tag = "16")] + Sink(bool), + /// Matches all cells, regardless of input. Functionally equivalent to + /// leaving `filter` unset, but included for completeness. + #[prost(bool, tag = "17")] + PassAllFilter(bool), + /// Does not match any cells, regardless of input. Useful for temporarily + /// disabling just part of a filter. + #[prost(bool, tag = "18")] + BlockAllFilter(bool), + /// Matches only cells from rows whose keys satisfy the given RE2 regex. In + /// other words, passes through the entire row when the key matches, and + /// otherwise produces an empty row. + /// Note that, since row keys can contain arbitrary bytes, the `\C` escape + /// sequence must be used if a true wildcard is desired. The `.` character + /// will not match the new line character `\n`, which may be present in a + /// binary key. + #[prost(bytes, tag = "4")] + RowKeyRegexFilter(::prost::alloc::vec::Vec), + /// Matches all cells from a row with probability p, and matches no cells + /// from the row with probability 1-p. + #[prost(double, tag = "14")] + RowSampleFilter(f64), + /// Matches only cells from columns whose families satisfy the given RE2 + /// regex. For technical reasons, the regex must not contain the `:` + /// character, even if it is not being used as a literal. + /// Note that, since column families cannot contain the new line character + /// `\n`, it is sufficient to use `.` as a full wildcard when matching + /// column family names. + #[prost(string, tag = "5")] + FamilyNameRegexFilter(::prost::alloc::string::String), + /// Matches only cells from columns whose qualifiers satisfy the given RE2 + /// regex. + /// Note that, since column qualifiers can contain arbitrary bytes, the `\C` + /// escape sequence must be used if a true wildcard is desired. The `.` + /// character will not match the new line character `\n`, which may be + /// present in a binary qualifier. + #[prost(bytes, tag = "6")] + ColumnQualifierRegexFilter(::prost::alloc::vec::Vec), + /// Matches only cells from columns within the given range. + #[prost(message, tag = "7")] + ColumnRangeFilter(super::ColumnRange), + /// Matches only cells with timestamps within the given range. + #[prost(message, tag = "8")] + TimestampRangeFilter(super::TimestampRange), + /// Matches only cells with values that satisfy the given regular expression. + /// Note that, since cell values can contain arbitrary bytes, the `\C` escape + /// sequence must be used if a true wildcard is desired. The `.` character + /// will not match the new line character `\n`, which may be present in a + /// binary value. + #[prost(bytes, tag = "9")] + ValueRegexFilter(::prost::alloc::vec::Vec), + /// Matches only cells with values that fall within the given range. + #[prost(message, tag = "15")] + ValueRangeFilter(super::ValueRange), + /// Skips the first N cells of each row, matching all subsequent cells. + /// If duplicate cells are present, as is possible when using an Interleave, + /// each copy of the cell is counted separately. + #[prost(int32, tag = "10")] + CellsPerRowOffsetFilter(i32), + /// Matches only the first N cells of each row. + /// If duplicate cells are present, as is possible when using an Interleave, + /// each copy of the cell is counted separately. + #[prost(int32, tag = "11")] + CellsPerRowLimitFilter(i32), + /// Matches only the most recent N cells within each column. For example, + /// if N=2, this filter would match column `foo:bar` at timestamps 10 and 9, + /// skip all earlier cells in `foo:bar`, and then begin matching again in + /// column `foo:bar2`. + /// If duplicate cells are present, as is possible when using an Interleave, + /// each copy of the cell is counted separately. + #[prost(int32, tag = "12")] + CellsPerColumnLimitFilter(i32), + /// Replaces each cell's value with the empty string. + #[prost(bool, tag = "13")] + StripValueTransformer(bool), + /// Applies the given label to all cells in the output row. This allows + /// the client to determine which results were produced from which part of + /// the filter. + /// + /// Values must be at most 15 characters in length, and match the RE2 + /// pattern `\[a-z0-9\\-\]+` + /// + /// Due to a technical limitation, it is not currently possible to apply + /// multiple labels to a cell. As a result, a Chain may have no more than + /// one sub-filter which contains a `apply_label_transformer`. It is okay for + /// an Interleave to contain multiple `apply_label_transformers`, as they + /// will be applied to separate copies of the input. This may be relaxed in + /// the future. + #[prost(string, tag = "19")] + ApplyLabelTransformer(::prost::alloc::string::String), + } +} +/// Specifies a particular change to be made to the contents of a row. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Mutation { + /// Which of the possible Mutation types to apply. + #[prost(oneof = "mutation::Mutation", tags = "1, 2, 3, 4")] + pub mutation: ::core::option::Option, +} +/// Nested message and enum types in `Mutation`. +pub mod mutation { + /// A Mutation which sets the value of the specified cell. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct SetCell { + /// The name of the family into which new data should be written. + /// Must match `\[-_.a-zA-Z0-9\]+` + #[prost(string, tag = "1")] + pub family_name: ::prost::alloc::string::String, + /// The qualifier of the column into which new data should be written. + /// Can be any byte string, including the empty string. + #[prost(bytes = "vec", tag = "2")] + pub column_qualifier: ::prost::alloc::vec::Vec, + /// The timestamp of the cell into which new data should be written. + /// Use -1 for current Bigtable server time. + /// Otherwise, the client should set this value itself, noting that the + /// default value is a timestamp of zero if the field is left unspecified. + /// Values must match the granularity of the table (e.g. micros, millis). + #[prost(int64, tag = "3")] + pub timestamp_micros: i64, + /// The value to be written into the specified cell. + #[prost(bytes = "vec", tag = "4")] + pub value: ::prost::alloc::vec::Vec, + } + /// A Mutation which deletes cells from the specified column, optionally + /// restricting the deletions to a given timestamp range. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct DeleteFromColumn { + /// The name of the family from which cells should be deleted. + /// Must match `\[-_.a-zA-Z0-9\]+` + #[prost(string, tag = "1")] + pub family_name: ::prost::alloc::string::String, + /// The qualifier of the column from which cells should be deleted. + /// Can be any byte string, including the empty string. + #[prost(bytes = "vec", tag = "2")] + pub column_qualifier: ::prost::alloc::vec::Vec, + /// The range of timestamps within which cells should be deleted. + #[prost(message, optional, tag = "3")] + pub time_range: ::core::option::Option, + } + /// A Mutation which deletes all cells from the specified column family. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct DeleteFromFamily { + /// The name of the family from which cells should be deleted. + /// Must match `\[-_.a-zA-Z0-9\]+` + #[prost(string, tag = "1")] + pub family_name: ::prost::alloc::string::String, + } + /// A Mutation which deletes all cells from the containing row. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct DeleteFromRow {} + /// Which of the possible Mutation types to apply. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Mutation { + /// Set a cell's value. + #[prost(message, tag = "1")] + SetCell(SetCell), + /// Deletes cells from a column. + #[prost(message, tag = "2")] + DeleteFromColumn(DeleteFromColumn), + /// Deletes cells from a column family. + #[prost(message, tag = "3")] + DeleteFromFamily(DeleteFromFamily), + /// Deletes cells from the entire row. + #[prost(message, tag = "4")] + DeleteFromRow(DeleteFromRow), + } +} +/// Specifies an atomic read/modify/write operation on the latest value of the +/// specified column. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadModifyWriteRule { + /// The name of the family to which the read/modify/write should be applied. + /// Must match `\[-_.a-zA-Z0-9\]+` + #[prost(string, tag = "1")] + pub family_name: ::prost::alloc::string::String, + /// The qualifier of the column to which the read/modify/write should be + /// applied. + /// Can be any byte string, including the empty string. + #[prost(bytes = "vec", tag = "2")] + pub column_qualifier: ::prost::alloc::vec::Vec, + /// The rule used to determine the column's new latest value from its current + /// latest value. + #[prost(oneof = "read_modify_write_rule::Rule", tags = "3, 4")] + pub rule: ::core::option::Option, +} +/// Nested message and enum types in `ReadModifyWriteRule`. +pub mod read_modify_write_rule { + /// The rule used to determine the column's new latest value from its current + /// latest value. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Rule { + /// Rule specifying that `append_value` be appended to the existing value. + /// If the targeted cell is unset, it will be treated as containing the + /// empty string. + #[prost(bytes, tag = "3")] + AppendValue(::prost::alloc::vec::Vec), + /// Rule specifying that `increment_amount` be added to the existing value. + /// If the targeted cell is unset, it will be treated as containing a zero. + /// Otherwise, the targeted cell must contain an 8-byte value (interpreted + /// as a 64-bit big-endian signed integer), or the entire request will fail. + #[prost(int64, tag = "4")] + IncrementAmount(i64), + } +} +/// NOTE: This API is intended to be used by Apache Beam BigtableIO. +/// A partition of a change stream. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StreamPartition { + /// The row range covered by this partition and is specified by + /// [`start_key_closed`, `end_key_open`). + #[prost(message, optional, tag = "1")] + pub row_range: ::core::option::Option, +} +/// NOTE: This API is intended to be used by Apache Beam BigtableIO. +/// The information required to continue reading the data from multiple +/// `StreamPartitions` from where a previous read left off. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StreamContinuationTokens { + /// List of continuation tokens. + #[prost(message, repeated, tag = "1")] + pub tokens: ::prost::alloc::vec::Vec, +} +/// NOTE: This API is intended to be used by Apache Beam BigtableIO. +/// The information required to continue reading the data from a +/// `StreamPartition` from where a previous read left off. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StreamContinuationToken { + /// The partition that this token applies to. + #[prost(message, optional, tag = "1")] + pub partition: ::core::option::Option, + /// An encoded position in the stream to restart reading from. + #[prost(string, tag = "2")] + pub token: ::prost::alloc::string::String, +} +/// ReadIterationStats captures information about the iteration of rows or cells +/// over the course of a read, e.g. how many results were scanned in a read +/// operation versus the results returned. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadIterationStats { + /// The rows seen (scanned) as part of the request. This includes the count of + /// rows returned, as captured below. + #[prost(int64, tag = "1")] + pub rows_seen_count: i64, + /// The rows returned as part of the request. + #[prost(int64, tag = "2")] + pub rows_returned_count: i64, + /// The cells seen (scanned) as part of the request. This includes the count of + /// cells returned, as captured below. + #[prost(int64, tag = "3")] + pub cells_seen_count: i64, + /// The cells returned as part of the request. + #[prost(int64, tag = "4")] + pub cells_returned_count: i64, +} +/// RequestLatencyStats provides a measurement of the latency of the request as +/// it interacts with different systems over its lifetime, e.g. how long the +/// request took to execute within a frontend server. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RequestLatencyStats { + /// The latency measured by the frontend server handling this request, from + /// when the request was received, to when this value is sent back in the + /// response. For more context on the component that is measuring this latency, + /// see: + /// + /// Note: This value may be slightly shorter than the value reported into + /// aggregate latency metrics in Monitoring for this request + /// () as this value + /// needs to be sent in the response before the latency measurement including + /// that transmission is finalized. + /// + /// Note: This value includes the end-to-end latency of contacting nodes in + /// the targeted cluster, e.g. measuring from when the first byte arrives at + /// the frontend server, to when this value is sent back as the last value in + /// the response, including any latency incurred by contacting nodes, waiting + /// for results from nodes, and finally sending results from nodes back to the + /// caller. + #[prost(message, optional, tag = "1")] + pub frontend_server_latency: ::core::option::Option<::prost_types::Duration>, +} +/// FullReadStatsView captures all known information about a read. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FullReadStatsView { + /// Iteration stats describe how efficient the read is, e.g. comparing + /// rows seen vs. rows returned or cells seen vs cells returned can provide an + /// indication of read efficiency (the higher the ratio of seen to retuned the + /// better). + #[prost(message, optional, tag = "1")] + pub read_iteration_stats: ::core::option::Option, + /// Request latency stats describe the time taken to complete a request, from + /// the server side. + #[prost(message, optional, tag = "2")] + pub request_latency_stats: ::core::option::Option, +} +/// RequestStats is the container for additional information pertaining to a +/// single request, helpful for evaluating the performance of the sent request. +/// Currently, there are the following supported methods: +/// * google.bigtable.v2.ReadRows +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RequestStats { + /// Information pertaining to each request type received. The type is chosen + /// based on the requested view. + /// + /// See the messages above for additional context. + #[prost(oneof = "request_stats::StatsView", tags = "1")] + pub stats_view: ::core::option::Option, +} +/// Nested message and enum types in `RequestStats`. +pub mod request_stats { + /// Information pertaining to each request type received. The type is chosen + /// based on the requested view. + /// + /// See the messages above for additional context. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum StatsView { + /// Available with the ReadRowsRequest.RequestStatsView.REQUEST_STATS_FULL + /// view, see package google.bigtable.v2. + #[prost(message, tag = "1")] + FullReadStatsView(super::FullReadStatsView), + } +} +/// Request message for Bigtable.ReadRows. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadRowsRequest { + /// Required. The unique name of the table from which to read. + /// Values are of the form + /// `projects//instances//tables/
`. + #[prost(string, tag = "1")] + pub table_name: ::prost::alloc::string::String, + /// This value specifies routing for replication. If not specified, the + /// "default" application profile will be used. + #[prost(string, tag = "5")] + pub app_profile_id: ::prost::alloc::string::String, + /// The row keys and/or ranges to read sequentially. If not specified, reads + /// from all rows. + #[prost(message, optional, tag = "2")] + pub rows: ::core::option::Option, + /// The filter to apply to the contents of the specified row(s). If unset, + /// reads the entirety of each row. + #[prost(message, optional, tag = "3")] + pub filter: ::core::option::Option, + /// The read will stop after committing to N rows' worth of results. The + /// default (zero) is to return all results. + #[prost(int64, tag = "4")] + pub rows_limit: i64, + /// The view into RequestStats, as described above. + #[prost(enumeration = "read_rows_request::RequestStatsView", tag = "6")] + pub request_stats_view: i32, + /// Experimental API - Please note that this API is currently experimental + /// and can change in the future. + /// + /// Return rows in lexiographical descending order of the row keys. The row + /// contents will not be affected by this flag. + /// + /// Example result set: + ///```ignore + /// [ + /// {key: "k2", "f:col1": "v1", "f:col2": "v1"}, + /// {key: "k1", "f:col1": "v2", "f:col2": "v2"} + /// ] + #[prost(bool, tag = "7")] + pub reversed: bool, +} +/// Nested message and enum types in `ReadRowsRequest`. +pub mod read_rows_request { + /// The desired view into RequestStats that should be returned in the response. + /// + /// See also: RequestStats message. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum RequestStatsView { + /// The default / unset value. The API will default to the NONE option below. + Unspecified = 0, + /// Do not include any RequestStats in the response. This will leave the + /// RequestStats embedded message unset in the response. + RequestStatsNone = 1, + /// Include the full set of available RequestStats in the response, + /// applicable to this read. + RequestStatsFull = 2, + } + impl RequestStatsView { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + RequestStatsView::Unspecified => "REQUEST_STATS_VIEW_UNSPECIFIED", + RequestStatsView::RequestStatsNone => "REQUEST_STATS_NONE", + RequestStatsView::RequestStatsFull => "REQUEST_STATS_FULL", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "REQUEST_STATS_VIEW_UNSPECIFIED" => Some(Self::Unspecified), + "REQUEST_STATS_NONE" => Some(Self::RequestStatsNone), + "REQUEST_STATS_FULL" => Some(Self::RequestStatsFull), + _ => None, + } + } + } +} +/// Response message for Bigtable.ReadRows. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadRowsResponse { + /// A collection of a row's contents as part of the read request. + #[prost(message, repeated, tag = "1")] + pub chunks: ::prost::alloc::vec::Vec, + /// Optionally the server might return the row key of the last row it + /// has scanned. The client can use this to construct a more + /// efficient retry request if needed: any row keys or portions of + /// ranges less than this row key can be dropped from the request. + /// This is primarily useful for cases where the server has read a + /// lot of data that was filtered out since the last committed row + /// key, allowing the client to skip that work on a retry. + #[prost(bytes = "vec", tag = "2")] + pub last_scanned_row_key: ::prost::alloc::vec::Vec, + /// + /// If requested, provide enhanced query performance statistics. The semantics + /// dictate: + /// * request_stats is empty on every (streamed) response, except + /// * request_stats has non-empty information after all chunks have been + /// streamed, where the ReadRowsResponse message only contains + /// request_stats. + /// * For example, if a read request would have returned an empty + /// response instead a single ReadRowsResponse is streamed with empty + /// chunks and request_stats filled. + /// + /// Visually, response messages will stream as follows: + /// ... -> {chunks: \[...\]} -> {chunks: [], request_stats: {...}} + /// \______________________/ \________________________________/ + /// Primary response Trailer of RequestStats info + /// + /// Or if the read did not return any values: + /// {chunks: [], request_stats: {...}} + /// \________________________________/ + /// Trailer of RequestStats info + #[prost(message, optional, tag = "3")] + pub request_stats: ::core::option::Option, +} +/// Nested message and enum types in `ReadRowsResponse`. +pub mod read_rows_response { + /// Specifies a piece of a row's contents returned as part of the read + /// response stream. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct CellChunk { + /// The row key for this chunk of data. If the row key is empty, + /// this CellChunk is a continuation of the same row as the previous + /// CellChunk in the response stream, even if that CellChunk was in a + /// previous ReadRowsResponse message. + #[prost(bytes = "vec", tag = "1")] + pub row_key: ::prost::alloc::vec::Vec, + /// The column family name for this chunk of data. If this message + /// is not present this CellChunk is a continuation of the same column + /// family as the previous CellChunk. The empty string can occur as a + /// column family name in a response so clients must check + /// explicitly for the presence of this message, not just for + /// `family_name.value` being non-empty. + #[prost(message, optional, tag = "2")] + pub family_name: ::core::option::Option<::prost::alloc::string::String>, + /// The column qualifier for this chunk of data. If this message + /// is not present, this CellChunk is a continuation of the same column + /// as the previous CellChunk. Column qualifiers may be empty so + /// clients must check for the presence of this message, not just + /// for `qualifier.value` being non-empty. + #[prost(message, optional, tag = "3")] + pub qualifier: ::core::option::Option<::prost::alloc::vec::Vec>, + /// The cell's stored timestamp, which also uniquely identifies it + /// within its column. Values are always expressed in + /// microseconds, but individual tables may set a coarser + /// granularity to further restrict the allowed values. For + /// example, a table which specifies millisecond granularity will + /// only allow values of `timestamp_micros` which are multiples of + /// 1000. Timestamps are only set in the first CellChunk per cell + /// (for cells split into multiple chunks). + #[prost(int64, tag = "4")] + pub timestamp_micros: i64, + /// Labels applied to the cell by a + /// \[RowFilter][google.bigtable.v2.RowFilter\]. Labels are only set + /// on the first CellChunk per cell. + #[prost(string, repeated, tag = "5")] + pub labels: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// The value stored in the cell. Cell values can be split across + /// multiple CellChunks. In that case only the value field will be + /// set in CellChunks after the first: the timestamp and labels + /// will only be present in the first CellChunk, even if the first + /// CellChunk came in a previous ReadRowsResponse. + #[prost(bytes = "vec", tag = "6")] + pub value: ::prost::alloc::vec::Vec, + /// If this CellChunk is part of a chunked cell value and this is + /// not the final chunk of that cell, value_size will be set to the + /// total length of the cell value. The client can use this size + /// to pre-allocate memory to hold the full cell value. + #[prost(int32, tag = "7")] + pub value_size: i32, + /// Signals to the client concerning previous CellChunks received. + #[prost(oneof = "cell_chunk::RowStatus", tags = "8, 9")] + pub row_status: ::core::option::Option, + } + /// Nested message and enum types in `CellChunk`. + pub mod cell_chunk { + /// Signals to the client concerning previous CellChunks received. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum RowStatus { + /// Indicates that the client should drop all previous chunks for + /// `row_key`, as it will be re-read from the beginning. + #[prost(bool, tag = "8")] + ResetRow(bool), + /// Indicates that the client can safely process all previous chunks for + /// `row_key`, as its data has been fully read. + #[prost(bool, tag = "9")] + CommitRow(bool), + } + } +} +/// Request message for Bigtable.SampleRowKeys. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SampleRowKeysRequest { + /// Required. The unique name of the table from which to sample row keys. + /// Values are of the form + /// `projects//instances//tables/
`. + #[prost(string, tag = "1")] + pub table_name: ::prost::alloc::string::String, + /// This value specifies routing for replication. If not specified, the + /// "default" application profile will be used. + #[prost(string, tag = "2")] + pub app_profile_id: ::prost::alloc::string::String, +} +/// Response message for Bigtable.SampleRowKeys. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SampleRowKeysResponse { + /// Sorted streamed sequence of sample row keys in the table. The table might + /// have contents before the first row key in the list and after the last one, + /// but a key containing the empty string indicates "end of table" and will be + /// the last response given, if present. + /// Note that row keys in this list may not have ever been written to or read + /// from, and users should therefore not make any assumptions about the row key + /// structure that are specific to their use case. + #[prost(bytes = "vec", tag = "1")] + pub row_key: ::prost::alloc::vec::Vec, + /// Approximate total storage space used by all rows in the table which precede + /// `row_key`. Buffering the contents of all rows between two subsequent + /// samples would require space roughly equal to the difference in their + /// `offset_bytes` fields. + #[prost(int64, tag = "2")] + pub offset_bytes: i64, +} +/// Request message for Bigtable.MutateRow. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MutateRowRequest { + /// Required. The unique name of the table to which the mutation should be + /// applied. Values are of the form + /// `projects//instances//tables/
`. + #[prost(string, tag = "1")] + pub table_name: ::prost::alloc::string::String, + /// This value specifies routing for replication. If not specified, the + /// "default" application profile will be used. + #[prost(string, tag = "4")] + pub app_profile_id: ::prost::alloc::string::String, + /// Required. The key of the row to which the mutation should be applied. + #[prost(bytes = "vec", tag = "2")] + pub row_key: ::prost::alloc::vec::Vec, + /// Required. Changes to be atomically applied to the specified row. Entries + /// are applied in order, meaning that earlier mutations can be masked by later + /// ones. Must contain at least one entry and at most 100000. + #[prost(message, repeated, tag = "3")] + pub mutations: ::prost::alloc::vec::Vec, +} +/// Response message for Bigtable.MutateRow. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MutateRowResponse {} +/// Request message for BigtableService.MutateRows. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MutateRowsRequest { + /// Required. The unique name of the table to which the mutations should be + /// applied. + #[prost(string, tag = "1")] + pub table_name: ::prost::alloc::string::String, + /// This value specifies routing for replication. If not specified, the + /// "default" application profile will be used. + #[prost(string, tag = "3")] + pub app_profile_id: ::prost::alloc::string::String, + /// Required. The row keys and corresponding mutations to be applied in bulk. + /// Each entry is applied as an atomic mutation, but the entries may be + /// applied in arbitrary order (even between entries for the same row). + /// At least one entry must be specified, and in total the entries can + /// contain at most 100000 mutations. + #[prost(message, repeated, tag = "2")] + pub entries: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `MutateRowsRequest`. +pub mod mutate_rows_request { + /// A mutation for a given row. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Entry { + /// The key of the row to which the `mutations` should be applied. + #[prost(bytes = "vec", tag = "1")] + pub row_key: ::prost::alloc::vec::Vec, + /// Required. Changes to be atomically applied to the specified row. + /// Mutations are applied in order, meaning that earlier mutations can be + /// masked by later ones. You must specify at least one mutation. + #[prost(message, repeated, tag = "2")] + pub mutations: ::prost::alloc::vec::Vec, + } +} +/// Response message for BigtableService.MutateRows. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MutateRowsResponse { + /// One or more results for Entries from the batch request. + #[prost(message, repeated, tag = "1")] + pub entries: ::prost::alloc::vec::Vec, + /// Information about how client should limit the rate (QPS). Primirily used by + /// supported official Cloud Bigtable clients. If unset, the rate limit info is + /// not provided by the server. + #[prost(message, optional, tag = "3")] + pub rate_limit_info: ::core::option::Option, +} +/// Nested message and enum types in `MutateRowsResponse`. +pub mod mutate_rows_response { + /// The result of applying a passed mutation in the original request. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Entry { + /// The index into the original request's `entries` list of the Entry + /// for which a result is being reported. + #[prost(int64, tag = "1")] + pub index: i64, + /// The result of the request Entry identified by `index`. + /// Depending on how requests are batched during execution, it is possible + /// for one Entry to fail due to an error with another Entry. In the event + /// that this occurs, the same error will be reported for both entries. + #[prost(message, optional, tag = "2")] + pub status: ::core::option::Option, + } +} +/// Information about how client should adjust the load to Bigtable. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RateLimitInfo { + /// Time that clients should wait before adjusting the target rate again. + /// If clients adjust rate too frequently, the impact of the previous + /// adjustment may not have been taken into account and may + /// over-throttle or under-throttle. If clients adjust rate too slowly, they + /// will not be responsive to load changes on server side, and may + /// over-throttle or under-throttle. + #[prost(message, optional, tag = "1")] + pub period: ::core::option::Option<::prost_types::Duration>, + /// If it has been at least one `period` since the last load adjustment, the + /// client should multiply the current load by this value to get the new target + /// load. For example, if the current load is 100 and `factor` is 0.8, the new + /// target load should be 80. After adjusting, the client should ignore + /// `factor` until another `period` has passed. + /// + /// The client can measure its load using any unit that's comparable over time + /// For example, QPS can be used as long as each request involves a similar + /// amount of work. + #[prost(double, tag = "2")] + pub factor: f64, +} +/// Request message for Bigtable.CheckAndMutateRow. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CheckAndMutateRowRequest { + /// Required. The unique name of the table to which the conditional mutation + /// should be applied. Values are of the form + /// `projects//instances//tables/
`. + #[prost(string, tag = "1")] + pub table_name: ::prost::alloc::string::String, + /// This value specifies routing for replication. If not specified, the + /// "default" application profile will be used. + #[prost(string, tag = "7")] + pub app_profile_id: ::prost::alloc::string::String, + /// Required. The key of the row to which the conditional mutation should be + /// applied. + #[prost(bytes = "vec", tag = "2")] + pub row_key: ::prost::alloc::vec::Vec, + /// The filter to be applied to the contents of the specified row. Depending + /// on whether or not any results are yielded, either `true_mutations` or + /// `false_mutations` will be executed. If unset, checks that the row contains + /// any values at all. + #[prost(message, optional, tag = "6")] + pub predicate_filter: ::core::option::Option, + /// Changes to be atomically applied to the specified row if `predicate_filter` + /// yields at least one cell when applied to `row_key`. Entries are applied in + /// order, meaning that earlier mutations can be masked by later ones. + /// Must contain at least one entry if `false_mutations` is empty, and at most + /// 100000. + #[prost(message, repeated, tag = "4")] + pub true_mutations: ::prost::alloc::vec::Vec, + /// Changes to be atomically applied to the specified row if `predicate_filter` + /// does not yield any cells when applied to `row_key`. Entries are applied in + /// order, meaning that earlier mutations can be masked by later ones. + /// Must contain at least one entry if `true_mutations` is empty, and at most + /// 100000. + #[prost(message, repeated, tag = "5")] + pub false_mutations: ::prost::alloc::vec::Vec, +} +/// Response message for Bigtable.CheckAndMutateRow. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CheckAndMutateRowResponse { + /// Whether or not the request's `predicate_filter` yielded any results for + /// the specified row. + #[prost(bool, tag = "1")] + pub predicate_matched: bool, +} +/// Request message for client connection keep-alive and warming. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PingAndWarmRequest { + /// Required. The unique name of the instance to check permissions for as well + /// as respond. Values are of the form + /// `projects//instances/`. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// This value specifies routing for replication. If not specified, the + /// "default" application profile will be used. + #[prost(string, tag = "2")] + pub app_profile_id: ::prost::alloc::string::String, +} +/// Response message for Bigtable.PingAndWarm connection keepalive and warming. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PingAndWarmResponse {} +/// Request message for Bigtable.ReadModifyWriteRow. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadModifyWriteRowRequest { + /// Required. The unique name of the table to which the read/modify/write rules + /// should be applied. Values are of the form + /// `projects//instances//tables/
`. + #[prost(string, tag = "1")] + pub table_name: ::prost::alloc::string::String, + /// This value specifies routing for replication. If not specified, the + /// "default" application profile will be used. + #[prost(string, tag = "4")] + pub app_profile_id: ::prost::alloc::string::String, + /// Required. The key of the row to which the read/modify/write rules should be + /// applied. + #[prost(bytes = "vec", tag = "2")] + pub row_key: ::prost::alloc::vec::Vec, + /// Required. Rules specifying how the specified row's contents are to be + /// transformed into writes. Entries are applied in order, meaning that earlier + /// rules will affect the results of later ones. + #[prost(message, repeated, tag = "3")] + pub rules: ::prost::alloc::vec::Vec, +} +/// Response message for Bigtable.ReadModifyWriteRow. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadModifyWriteRowResponse { + /// A Row containing the new contents of all cells modified by the request. + #[prost(message, optional, tag = "1")] + pub row: ::core::option::Option, +} +/// NOTE: This API is intended to be used by Apache Beam BigtableIO. +/// Request message for Bigtable.GenerateInitialChangeStreamPartitions. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GenerateInitialChangeStreamPartitionsRequest { + /// Required. The unique name of the table from which to get change stream + /// partitions. Values are of the form + /// `projects//instances//tables/
`. + /// Change streaming must be enabled on the table. + #[prost(string, tag = "1")] + pub table_name: ::prost::alloc::string::String, + /// This value specifies routing for replication. If not specified, the + /// "default" application profile will be used. + /// Single cluster routing must be configured on the profile. + #[prost(string, tag = "2")] + pub app_profile_id: ::prost::alloc::string::String, +} +/// NOTE: This API is intended to be used by Apache Beam BigtableIO. +/// Response message for Bigtable.GenerateInitialChangeStreamPartitions. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GenerateInitialChangeStreamPartitionsResponse { + /// A partition of the change stream. + #[prost(message, optional, tag = "1")] + pub partition: ::core::option::Option, +} +/// NOTE: This API is intended to be used by Apache Beam BigtableIO. +/// Request message for Bigtable.ReadChangeStream. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadChangeStreamRequest { + /// Required. The unique name of the table from which to read a change stream. + /// Values are of the form + /// `projects//instances//tables/
`. + /// Change streaming must be enabled on the table. + #[prost(string, tag = "1")] + pub table_name: ::prost::alloc::string::String, + /// This value specifies routing for replication. If not specified, the + /// "default" application profile will be used. + /// Single cluster routing must be configured on the profile. + #[prost(string, tag = "2")] + pub app_profile_id: ::prost::alloc::string::String, + /// The partition to read changes from. + #[prost(message, optional, tag = "3")] + pub partition: ::core::option::Option, + /// If specified, OK will be returned when the stream advances beyond + /// this time. Otherwise, changes will be continuously delivered on the stream. + /// This value is inclusive and will be truncated to microsecond granularity. + #[prost(message, optional, tag = "5")] + pub end_time: ::core::option::Option<::prost_types::Timestamp>, + /// If specified, the duration between `Heartbeat` messages on the stream. + /// Otherwise, defaults to 5 seconds. + #[prost(message, optional, tag = "7")] + pub heartbeat_duration: ::core::option::Option<::prost_types::Duration>, + /// Options for describing where we want to start reading from the stream. + #[prost(oneof = "read_change_stream_request::StartFrom", tags = "4, 6")] + pub start_from: ::core::option::Option, +} +/// Nested message and enum types in `ReadChangeStreamRequest`. +pub mod read_change_stream_request { + /// Options for describing where we want to start reading from the stream. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum StartFrom { + /// Start reading the stream at the specified timestamp. This timestamp must + /// be within the change stream retention period, less than or equal to the + /// current time, and after change stream creation, whichever is greater. + /// This value is inclusive and will be truncated to microsecond granularity. + #[prost(message, tag = "4")] + StartTime(::prost_types::Timestamp), + /// Tokens that describe how to resume reading a stream where reading + /// previously left off. If specified, changes will be read starting at the + /// the position. Tokens are delivered on the stream as part of `Heartbeat` + /// and `CloseStream` messages. + /// + /// If a single token is provided, the token’s partition must exactly match + /// the request’s partition. If multiple tokens are provided, as in the case + /// of a partition merge, the union of the token partitions must exactly + /// cover the request’s partition. Otherwise, INVALID_ARGUMENT will be + /// returned. + #[prost(message, tag = "6")] + ContinuationTokens(super::StreamContinuationTokens), + } +} +/// NOTE: This API is intended to be used by Apache Beam BigtableIO. +/// Response message for Bigtable.ReadChangeStream. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadChangeStreamResponse { + /// The data or control message on the stream. + #[prost(oneof = "read_change_stream_response::StreamRecord", tags = "1, 2, 3")] + pub stream_record: ::core::option::Option, +} +/// Nested message and enum types in `ReadChangeStreamResponse`. +pub mod read_change_stream_response { + /// A partial or complete mutation. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct MutationChunk { + /// If set, then the mutation is a `SetCell` with a chunked value across + /// multiple messages. + #[prost(message, optional, tag = "1")] + pub chunk_info: ::core::option::Option, + /// If this is a continuation of a chunked message (`chunked_value_offset` > + /// 0), ignore all fields except the `SetCell`'s value and merge it with + /// the previous message by concatenating the value fields. + #[prost(message, optional, tag = "2")] + pub mutation: ::core::option::Option, + } + /// Nested message and enum types in `MutationChunk`. + pub mod mutation_chunk { + /// Information about the chunking of this mutation. + /// Only `SetCell` mutations can be chunked, and all chunks for a `SetCell` + /// will be delivered contiguously with no other mutation types interleaved. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct ChunkInfo { + /// The total value size of all the chunks that make up the `SetCell`. + #[prost(int32, tag = "1")] + pub chunked_value_size: i32, + /// The byte offset of this chunk into the total value size of the + /// mutation. + #[prost(int32, tag = "2")] + pub chunked_value_offset: i32, + /// When true, this is the last chunk of a chunked `SetCell`. + #[prost(bool, tag = "3")] + pub last_chunk: bool, + } + } + /// A message corresponding to one or more mutations to the partition + /// being streamed. A single logical `DataChange` message may also be split + /// across a sequence of multiple individual messages. Messages other than + /// the first in a sequence will only have the `type` and `chunks` fields + /// populated, with the final message in the sequence also containing `done` + /// set to true. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct DataChange { + /// The type of the mutation. + #[prost(enumeration = "data_change::Type", tag = "1")] + pub r#type: i32, + /// The cluster where the mutation was applied. + /// Not set when `type` is `GARBAGE_COLLECTION`. + #[prost(string, tag = "2")] + pub source_cluster_id: ::prost::alloc::string::String, + /// The row key for all mutations that are part of this `DataChange`. + /// If the `DataChange` is chunked across multiple messages, then this field + /// will only be set for the first message. + #[prost(bytes = "vec", tag = "3")] + pub row_key: ::prost::alloc::vec::Vec, + /// The timestamp at which the mutation was applied on the Bigtable server. + #[prost(message, optional, tag = "4")] + pub commit_timestamp: ::core::option::Option<::prost_types::Timestamp>, + /// A value that lets stream consumers reconstruct Bigtable's + /// conflict resolution semantics. + /// + /// In the event that the same row key, column family, column qualifier, + /// timestamp are modified on different clusters at the same + /// `commit_timestamp`, the mutation with the larger `tiebreaker` will be the + /// one chosen for the eventually consistent state of the system. + #[prost(int32, tag = "5")] + pub tiebreaker: i32, + /// The mutations associated with this change to the partition. + /// May contain complete mutations or chunks of a multi-message chunked + /// `DataChange` record. + #[prost(message, repeated, tag = "6")] + pub chunks: ::prost::alloc::vec::Vec, + /// When true, indicates that the entire `DataChange` has been read + /// and the client can safely process the message. + #[prost(bool, tag = "8")] + pub done: bool, + /// An encoded position for this stream's partition to restart reading from. + /// This token is for the StreamPartition from the request. + #[prost(string, tag = "9")] + pub token: ::prost::alloc::string::String, + /// An estimate of the commit timestamp that is usually lower than or equal + /// to any timestamp for a record that will be delivered in the future on the + /// stream. It is possible that, under particular circumstances that a future + /// record has a timestamp is is lower than a previously seen timestamp. For + /// an example usage see + /// + #[prost(message, optional, tag = "10")] + pub estimated_low_watermark: ::core::option::Option<::prost_types::Timestamp>, + } + /// Nested message and enum types in `DataChange`. + pub mod data_change { + /// The type of mutation. + #[derive( + Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration, + )] + #[repr(i32)] + pub enum Type { + /// The type is unspecified. + Unspecified = 0, + /// A user-initiated mutation. + User = 1, + /// A system-initiated mutation as part of garbage collection. + /// + GarbageCollection = 2, + /// This is a continuation of a multi-message change. + Continuation = 3, + } + impl Type { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Type::Unspecified => "TYPE_UNSPECIFIED", + Type::User => "USER", + Type::GarbageCollection => "GARBAGE_COLLECTION", + Type::Continuation => "CONTINUATION", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "TYPE_UNSPECIFIED" => Some(Self::Unspecified), + "USER" => Some(Self::User), + "GARBAGE_COLLECTION" => Some(Self::GarbageCollection), + "CONTINUATION" => Some(Self::Continuation), + _ => None, + } + } + } + } + /// A periodic message with information that can be used to checkpoint + /// the state of a stream. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Heartbeat { + /// A token that can be provided to a subsequent `ReadChangeStream` call + /// to pick up reading at the current stream position. + #[prost(message, optional, tag = "1")] + pub continuation_token: ::core::option::Option, + /// An estimate of the commit timestamp that is usually lower than or equal + /// to any timestamp for a record that will be delivered in the future on the + /// stream. It is possible that, under particular circumstances that a future + /// record has a timestamp is is lower than a previously seen timestamp. For + /// an example usage see + /// + #[prost(message, optional, tag = "2")] + pub estimated_low_watermark: ::core::option::Option<::prost_types::Timestamp>, + } + /// A message indicating that the client should stop reading from the stream. + /// If status is OK and `continuation_tokens` & `new_partitions` are empty, the + /// stream has finished (for example if there was an `end_time` specified). + /// If `continuation_tokens` & `new_partitions` are present, then a change in + /// partitioning requires the client to open a new stream for each token to + /// resume reading. Example: + /// [B, D) ends + /// | + /// v + /// new_partitions: [A, C) [C, E) + /// continuation_tokens.partitions: [B,C) [C,D) + /// ^---^ ^---^ + /// ^ ^ + /// | | + /// | StreamContinuationToken 2 + /// | + /// StreamContinuationToken 1 + /// To read the new partition [A,C), supply the continuation tokens whose + /// ranges cover the new partition, for example ContinuationToken[A,B) & + /// ContinuationToken[B,C). + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct CloseStream { + /// The status of the stream. + #[prost(message, optional, tag = "1")] + pub status: ::core::option::Option, + /// If non-empty, contains the information needed to resume reading their + /// associated partitions. + #[prost(message, repeated, tag = "2")] + pub continuation_tokens: ::prost::alloc::vec::Vec, + /// If non-empty, contains the new partitions to start reading from, which + /// are related to but not necessarily identical to the partitions for the + /// above `continuation_tokens`. + #[prost(message, repeated, tag = "3")] + pub new_partitions: ::prost::alloc::vec::Vec, + } + /// The data or control message on the stream. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum StreamRecord { + /// A mutation to the partition. + #[prost(message, tag = "1")] + DataChange(DataChange), + /// A periodic heartbeat message. + #[prost(message, tag = "2")] + Heartbeat(Heartbeat), + /// An indication that the stream should be closed. + #[prost(message, tag = "3")] + CloseStream(CloseStream), + } +} +/// Generated client implementations. +pub mod bigtable_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::http::Uri; + use tonic::codegen::*; + /// Service for reading from and writing to existing Bigtable tables. + #[derive(Debug, Clone)] + pub struct BigtableClient { + inner: tonic::client::Grpc, + } + impl BigtableClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl BigtableClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> BigtableClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + >>::Error: + Into + Send + Sync, + { + BigtableClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Streams back the contents of all requested rows in key order, optionally + /// applying the same Reader filter to each. Depending on their size, + /// rows and cells may be broken up across multiple responses, but + /// atomicity of each row will still be preserved. See the + /// ReadRowsResponse documentation for details. + pub async fn read_rows( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = + http::uri::PathAndQuery::from_static("/google.bigtable.v2.Bigtable/ReadRows"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("google.bigtable.v2.Bigtable", "ReadRows")); + self.inner.server_streaming(req, path, codec).await + } + /// Returns a sample of row keys in the table. The returned row keys will + /// delimit contiguous sections of the table of approximately equal size, + /// which can be used to break up the data for distributed tasks like + /// mapreduces. + pub async fn sample_row_keys( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = + http::uri::PathAndQuery::from_static("/google.bigtable.v2.Bigtable/SampleRowKeys"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.bigtable.v2.Bigtable", + "SampleRowKeys", + )); + self.inner.server_streaming(req, path, codec).await + } + /// Mutates a row atomically. Cells already present in the row are left + /// unchanged unless explicitly changed by `mutation`. + pub async fn mutate_row( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = + http::uri::PathAndQuery::from_static("/google.bigtable.v2.Bigtable/MutateRow"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("google.bigtable.v2.Bigtable", "MutateRow")); + self.inner.unary(req, path, codec).await + } + /// Mutates multiple rows in a batch. Each individual row is mutated + /// atomically as in MutateRow, but the entire batch is not executed + /// atomically. + pub async fn mutate_rows( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = + http::uri::PathAndQuery::from_static("/google.bigtable.v2.Bigtable/MutateRows"); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("google.bigtable.v2.Bigtable", "MutateRows")); + self.inner.server_streaming(req, path, codec).await + } + /// Mutates a row atomically based on the output of a predicate Reader filter. + pub async fn check_and_mutate_row( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.bigtable.v2.Bigtable/CheckAndMutateRow", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.bigtable.v2.Bigtable", + "CheckAndMutateRow", + )); + self.inner.unary(req, path, codec).await + } + /// Warm up associated instance metadata for this connection. + /// This call is not required but may be useful for connection keep-alive. + pub async fn ping_and_warm( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = + http::uri::PathAndQuery::from_static("/google.bigtable.v2.Bigtable/PingAndWarm"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.bigtable.v2.Bigtable", + "PingAndWarm", + )); + self.inner.unary(req, path, codec).await + } + /// Modifies a row atomically on the server. The method reads the latest + /// existing timestamp and value from the specified columns and writes a new + /// entry based on pre-defined read/modify/write rules. The new value for the + /// timestamp is the greater of the existing timestamp or the current server + /// time. The method returns the new contents of all modified cells. + pub async fn read_modify_write_row( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.bigtable.v2.Bigtable", + "ReadModifyWriteRow", + )); + self.inner.unary(req, path, codec).await + } + /// NOTE: This API is intended to be used by Apache Beam BigtableIO. + /// Returns the current list of partitions that make up the table's + /// change stream. The union of partitions will cover the entire keyspace. + /// Partitions can be read with `ReadChangeStream`. + pub async fn generate_initial_change_stream_partitions( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response< + tonic::codec::Streaming, + >, + tonic::Status, + > { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.bigtable.v2.Bigtable", + "GenerateInitialChangeStreamPartitions", + )); + self.inner.server_streaming(req, path, codec).await + } + /// NOTE: This API is intended to be used by Apache Beam BigtableIO. + /// Reads changes from a table's change stream. Changes will + /// reflect both user-initiated mutations and mutations that are caused by + /// garbage collection. + pub async fn read_change_stream( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.bigtable.v2.Bigtable/ReadChangeStream", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.bigtable.v2.Bigtable", + "ReadChangeStream", + )); + self.inner.server_streaming(req, path, codec).await + } + } +} diff --git a/crates/sui-kvstore/src/bigtable/proto/google.pem b/crates/sui-kvstore/src/bigtable/proto/google.pem new file mode 100644 index 0000000000000..bf10a673253bb --- /dev/null +++ b/crates/sui-kvstore/src/bigtable/proto/google.pem @@ -0,0 +1,1128 @@ +# Operating CA: DigiCert +# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust +# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust +# Label: "Baltimore CyberTrust Root" +# Serial: 33554617 +# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4 +# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74 +# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ +RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD +VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX +DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y +ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy +VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr +mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr +IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK +mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu +XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy +dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye +jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1 +BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3 +DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92 +9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx +jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0 +Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz +ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS +R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +# Operating CA: DigiCert +# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root CA" +# Serial: 17154717934120587862167794914071425081 +# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72 +# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43 +# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c +JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP +mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+ +wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4 +VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/ +AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB +AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun +pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC +dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf +fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm +NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx +H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +# Operating CA: DigiCert +# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G2" +# Serial: 15385348160840213938643033620894905419 +# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d +# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f +# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85 +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA +n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc +biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp +EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA +bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu +YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB +AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW +BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI +QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I +0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni +lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9 +B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv +ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- + +# Operating CA: DigiCert +# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Assured ID Root G3" +# Serial: 15459312981008553731928384953135426796 +# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb +# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89 +# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2 +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg +RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf +Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q +RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD +AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY +JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv +6pZjamVFkpUBtA== +-----END CERTIFICATE----- + +# Operating CA: DigiCert +# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root CA" +# Serial: 10944719598952040374951832963794454346 +# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e +# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36 +# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61 +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD +QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB +CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97 +nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt +43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P +T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4 +gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR +TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw +DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr +hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg +06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF +PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls +YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +# Operating CA: DigiCert +# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G2" +# Serial: 4293743540046975378534879503202253541 +# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44 +# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4 +# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH +MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI +2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx +1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ +q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz +tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ +vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV +5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY +1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4 +NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG +Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91 +8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe +pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- + +# Operating CA: DigiCert +# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Global Root G3" +# Serial: 7089244469030293291760083333884364146 +# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca +# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e +# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0 +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe +Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw +EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x +IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG +fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO +Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd +BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx +AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/ +oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8 +sycX +-----END CERTIFICATE----- + +# Operating CA: DigiCert +# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert High Assurance EV Root CA" +# Serial: 3553400076410547919724730734378100087 +# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a +# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25 +# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j +ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3 +LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug +RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm ++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW +PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM +xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB +Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3 +hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg +EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA +FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec +nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z +eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF +hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2 +Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep ++OkuE6N36B9K +-----END CERTIFICATE----- + +# Operating CA: DigiCert +# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com +# Label: "DigiCert Trusted Root G4" +# Serial: 7451500558977370777930084869016614236 +# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49 +# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4 +# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88 +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg +RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y +ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If +xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV +ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO +DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ +jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/ +CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi +EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM +fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY +uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK +chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t +9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2 +SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd ++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc +fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa +sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N +cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N +0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie +4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI +r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1 +/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm +gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+ +-----END CERTIFICATE----- + +# Operating CA: Entrust Datacard +# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc. +# Label: "Entrust Root Certification Authority" +# Serial: 1164660820 +# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4 +# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9 +# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0 +Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW +KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw +NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw +NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy +ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV +BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo +Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4 +4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9 +KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI +rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi +94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB +sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi +gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo +kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE +vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t +O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua +AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP +9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/ +eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m +0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +# Operating CA: Entrust Datacard +# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - EC1" +# Serial: 51543124481930649114116133369 +# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc +# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47 +# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5 +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG +A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3 +d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu +dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq +RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy +MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD +VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0 +L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g +Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi +A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt +ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH +Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC +R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX +hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- + +# Operating CA: Entrust Datacard +# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only +# Label: "Entrust Root Certification Authority - G2" +# Serial: 1246989352 +# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2 +# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4 +# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39 +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50 +cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs +IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz +dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy +NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu +dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt +dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0 +aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T +RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN +cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW +wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1 +U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0 +jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN +BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/ +jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v +1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R +nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH +VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g== +-----END CERTIFICATE----- + +# Operating CA: Entrust Datacard +# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited +# Label: "Entrust.net Premium 2048 Secure Server CA" +# Serial: 946069240 +# MD5 Fingerprint: ee:29:31:bc:32:7e:9a:e6:e8:b5:f7:51:b4:34:71:90 +# SHA1 Fingerprint: 50:30:06:09:1d:97:d4:f5:ae:39:f7:cb:e7:92:7d:7d:65:2d:34:31 +# SHA256 Fingerprint: 6d:c4:71:72:e0:1c:bc:b0:bf:62:58:0d:89:5f:e2:b8:ac:9a:d4:f8:73:80:1e:0c:10:b9:c8:37:d2:1e:b1:77 +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML +RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp +bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5 +IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3 +MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3 +LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp +YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG +A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq +K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe +sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX +MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT +XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/ +HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH +4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub +j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo +U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b +u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+ +bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er +fF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- + +# Operating CA: Entrust Datacard +# Issuer: CN=AffirmTrust Commercial O=AffirmTrust +# Subject: CN=AffirmTrust Commercial O=AffirmTrust +# Label: "AffirmTrust Commercial" +# Serial: 8608355977964138876 +# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7 +# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7 +# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7 +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP +Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr +ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL +MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1 +yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr +VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/ +nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG +XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj +vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt +Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g +N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC +nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +# Operating CA: Entrust Datacard +# Issuer: CN=AffirmTrust Networking O=AffirmTrust +# Subject: CN=AffirmTrust Networking O=AffirmTrust +# Label: "AffirmTrust Networking" +# Serial: 8957382827206547757 +# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f +# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f +# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y +YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua +kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL +QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp +6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG +yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i +QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO +tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu +QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ +Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u +olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48 +x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +# Operating CA: Entrust Datacard +# Issuer: CN=AffirmTrust Premium O=AffirmTrust +# Subject: CN=AffirmTrust Premium O=AffirmTrust +# Label: "AffirmTrust Premium" +# Serial: 7893706540734352110 +# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57 +# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27 +# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz +dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG +A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U +cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf +qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ +JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ ++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS +s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5 +HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7 +70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG +V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S +qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S +5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia +C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX +OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE +FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2 +KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B +8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ +MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc +0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ +u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF +u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH +YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8 +GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO +RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e +KeC2uAloGRwYQw== +-----END CERTIFICATE----- + +# Operating CA: Entrust Datacard +# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust +# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust +# Label: "AffirmTrust Premium ECC" +# Serial: 8401224907861490260 +# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d +# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb +# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23 +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC +VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ +cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ +BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt +VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D +0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9 +ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G +A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs +aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I +flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ== +-----END CERTIFICATE----- + +# Operating CA: GlobalSign +# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA +# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA +# Label: "GlobalSign Root CA" +# Serial: 4835703278459707669005204 +# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a +# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c +# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99 +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG +A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv +b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw +MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i +YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT +aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ +jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp +xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp +1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG +snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ +U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8 +9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B +AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz +yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE +38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP +AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad +DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME +HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +# Operating CA: GlobalSign +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3 +# Label: "GlobalSign Root CA - R3" +# Serial: 4835703278459759426209954 +# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28 +# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad +# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4 +MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8 +RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT +gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm +KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd +QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ +XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o +LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU +RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp +jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK +6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX +mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs +Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH +WD9f +-----END CERTIFICATE----- + +# Operating CA: GlobalSign +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5 +# Label: "GlobalSign ECC Root CA - R5" +# Serial: 32785792099990507226680698011560947931244 +# MD5 Fingerprint: 9f:ad:3b:1c:02:1e:8a:ba:17:74:38:81:0c:a2:bc:08 +# SHA1 Fingerprint: 1f:24:c6:30:cd:a4:18:ef:20:69:ff:ad:4f:dd:5f:46:3a:1b:69:aa +# SHA256 Fingerprint: 17:9f:bc:14:8a:3d:d0:0f:d2:4e:a1:34:58:cc:43:bf:a7:f5:9c:81:82:d7:83:a5:13:f6:eb:ec:10:0c:89:24 +-----BEGIN CERTIFICATE----- +MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc +8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke +hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI +KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg +515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO +xwy8p2Fp8fc74SrL+SvzZpA3 +-----END CERTIFICATE----- + +# Operating CA: GlobalSign +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 +# Label: "GlobalSign Root CA - R6" +# Serial: 1417766617973444989252670301619537 +# MD5 Fingerprint: 4f:dd:07:e4:d4:22:64:39:1e:0c:37:42:ea:d1:c6:ae +# SHA1 Fingerprint: 80:94:64:0e:b5:a7:a1:ca:11:9c:1f:dd:d5:9f:81:02:63:a7:fb:d1 +# SHA256 Fingerprint: 2c:ab:ea:fe:37:d0:6c:a2:2a:ba:73:91:c0:03:3d:25:98:29:52:c4:53:64:73:49:76:3a:3a:b5:ad:6c:cf:69 +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIORea7A4Mzw4VlSOb/RVEwDQYJKoZIhvcNAQEMBQAwTDEg +MB4GA1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjYxEzARBgNVBAoTCkdsb2Jh +bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTQxMjEwMDAwMDAwWhcNMzQx +MjEwMDAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSNjET +MBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAJUH6HPKZvnsFMp7PPcNCPG0RQssgrRI +xutbPK6DuEGSMxSkb3/pKszGsIhrxbaJ0cay/xTOURQh7ErdG1rG1ofuTToVBu1k +ZguSgMpE3nOUTvOniX9PeGMIyBJQbUJmL025eShNUhqKGoC3GYEOfsSKvGRMIRxD +aNc9PIrFsmbVkJq3MQbFvuJtMgamHvm566qjuL++gmNQ0PAYid/kD3n16qIfKtJw +LnvnvJO7bVPiSHyMEAc4/2ayd2F+4OqMPKq0pPbzlUoSB239jLKJz9CgYXfIWHSw +1CM69106yqLbnQneXUQtkPGBzVeS+n68UARjNN9rkxi+azayOeSsJDa38O+2HBNX +k7besvjihbdzorg1qkXy4J02oW9UivFyVm4uiMVRQkQVlO6jxTiWm05OWgtH8wY2 +SXcwvHE35absIQh1/OZhFj931dmRl4QKbNQCTXTAFO39OfuD8l4UoQSwC+n+7o/h +bguyCLNhZglqsQY6ZZZZwPA1/cnaKI0aEYdwgQqomnUdnjqGBQCe24DWJfncBZ4n +WUx2OVvq+aWh2IMP0f/fMBH5hc8zSPXKbWQULHpYT9NLCEnFlWQaYw55PfWzjMpY +rZxCRXluDocZXFSxZba/jJvcE+kNb7gu3GduyYsRtYQUigAZcIN5kZeR1Bonvzce +MgfYFGM8KEyvAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBSubAWjkxPioufi1xzWx/B/yGdToDAfBgNVHSMEGDAWgBSu +bAWjkxPioufi1xzWx/B/yGdToDANBgkqhkiG9w0BAQwFAAOCAgEAgyXt6NH9lVLN +nsAEoJFp5lzQhN7craJP6Ed41mWYqVuoPId8AorRbrcWc+ZfwFSY1XS+wc3iEZGt +Ixg93eFyRJa0lV7Ae46ZeBZDE1ZXs6KzO7V33EByrKPrmzU+sQghoefEQzd5Mr61 +55wsTLxDKZmOMNOsIeDjHfrYBzN2VAAiKrlNIC5waNrlU/yDXNOd8v9EDERm8tLj +vUYAGm0CuiVdjaExUd1URhxN25mW7xocBFymFe944Hn+Xds+qkxV/ZoVqW/hpvvf +cDDpw+5CRu3CkwWJ+n1jez/QcYF8AOiYrg54NMMl+68KnyBr3TsTjxKM4kEaSHpz +oHdpx7Zcf4LIHv5YGygrqGytXm3ABdJ7t+uA/iU3/gKbaKxCXcPu9czc8FB10jZp +nOZ7BN9uBmm23goJSFmH63sUYHpkqmlD75HHTOwY3WzvUy2MmeFe8nI+z1TIvWfs +pA9MRf/TuTAjB0yPEL+GltmZWrSZVxykzLsViVO6LAUP5MSeGbEYNNVMnbrt9x+v +JJUEeKgDu+6B5dpffItKoZB0JaezPkvILFa9x8jvOOJckvB595yEunQtYQEgfn7R +8k8HWV+LLUNS60YMlOH1Zkd5d9VUWx+tJDfLRVpOoERIyNiwmcUVhAn21klJwGW4 +5hpxbqCo8YLoRT5s1gLXCmeDBVrJpBA= +-----END CERTIFICATE----- + +# Note: "GlobalSign Root CA - R7" not added on purpose. It is P-521. + +# Operating CA: GoDaddy +# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc. +# Label: "Go Daddy Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01 +# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b +# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT +EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp +ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz +NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH +EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE +AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD +E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH +/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy +DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh +GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR +tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA +AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX +WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu +9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr +gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo +2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI +4uJEvlz36hz1 +-----END CERTIFICATE----- + +# Operating CA: GoDaddy +# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc. +# Label: "Starfield Root Certificate Authority - G2" +# Serial: 0 +# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96 +# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e +# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5 +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs +ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw +MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj +aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp +Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg +nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1 +HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N +Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN +dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0 +HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G +CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU +sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3 +4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg +8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1 +mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +# Operating CA: GoDaddy +# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority +# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority +# Label: "Starfield Class 2 CA" +# Serial: 0 +# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24 +# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a +# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58 +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl +MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp +U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw +NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE +ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp +ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3 +DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf +8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN ++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0 +X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa +K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA +1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G +A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR +zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0 +YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD +bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3 +L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D +eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp +VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY +WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +# Operating CA: GoDaddy +# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority +# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority +# Label: "Go Daddy Class 2 CA" +# Serial: 0 +# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67 +# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4 +# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4 +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh +MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE +YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3 +MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo +ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg +MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN +ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA +PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w +wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi +EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY +avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+ +YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE +sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h +/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5 +IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD +ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy +OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P +TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER +dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf +ReYNnyicsbkqWletNw+vHX/bvZ8= +-----END CERTIFICATE----- + +# Operating CA: Sectigo +# Issuer: CN=AAA Certificate Services O=Comodo CA Limited +# Subject: CN=AAA Certificate Services O=Comodo CA Limited +# Label: "Comodo AAA Services root" +# Serial: 1 +# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0 +# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49 +# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4 +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb +MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow +GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj +YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM +GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua +BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe +3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4 +YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR +rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm +ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU +oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v +QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t +b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF +AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q +GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2 +G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi +l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3 +smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +# Operating CA: Sectigo +# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO Certification Authority O=COMODO CA Limited +# Label: "COMODO Certification Authority" +# Serial: 43390818032842818540635488309124489234 +# MD5 Fingerprint: 20:E7:4F:82:C2:7E:94:80:34:82:8A:13:A9:17:1D:97 +# SHA1 Fingerprint EE:86:93:87:FF:FD:83:49:AB:5A:D1:43:22:58:87:89:A4:57:B0:12 +# SHA256 Fingerprint: 1A:0D:20:44:5D:E5:BA:18:62:D1:9E:F8:80:85:8C:BC:E5:01:02:B3:6E:8F:0A:04:0C:3C:69:E7:45:22:FE:6E +-----BEGIN CERTIFICATE----- +MIID0DCCArigAwIBAgIQIKTEf93f4cdTYwcTiHdgEjANBgkqhkiG9w0BAQUFADCB +gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV +BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0xMTAxMDEwMDAw +MDBaFw0zMDEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl +YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P +RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3 +UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI +2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8 +Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp ++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+ +DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O +nKVIrLsm9wIDAQABo0IwQDAdBgNVHQ4EFgQUC1jli8ZMFTekQKkwqSG+RzZaVv8w +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD +ggEBAC/JxBwHO89hAgCx2SFRdXIDMLDEFh9sAIsQrK/xR9SuEDwMGvjUk2ysEDd8 +t6aDZK3N3w6HM503sMZ7OHKx8xoOo/lVem0DZgMXlUrxsXrfViEGQo+x06iF3u6X +HWLrp+cxEmbDD6ZLLkGC9/3JG6gbr+48zuOcrigHoSybJMIPIyaDMouGDx8rEkYl +Fo92kANr3ryqImhrjKGsKxE5pttwwn1y6TPn/CbxdFqR5p2ErPioBhlG5qfpqjQi +pKGfeq23sqSaM4hxAjwu1nqyH6LKwN0vEJT9s4yEIHlG1QXUEOTS22RPuFvuG8Ug +R1uUq27UlTMdphVx8fiUylQ5PsE= +-----END CERTIFICATE----- + +# Operating CA: Sectigo +# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited +# Label: "COMODO ECC Certification Authority" +# Serial: 41578283867086692638256921589707938090 +# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23 +# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11 +# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7 +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT +IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw +MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy +ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N +T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR +FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J +cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW +BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm +fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv +GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +# Operating CA: Sectigo +# Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited +# Label: "COMODO RSA Certification Authority" +# Serial: 101909084537582093308941363524873193117 +# MD5 Fingerprint: 1b:31:b0:71:40:36:cc:14:36:91:ad:c4:3e:fd:ec:18 +# SHA1 Fingerprint: af:e5:d2:44:a8:d1:19:42:30:ff:47:9f:e2:f8:97:bb:cd:7a:8c:b4 +# SHA256 Fingerprint: 52:f0:e1:c4:e5:8e:c6:29:29:1b:60:31:7f:07:46:71:b8:5d:7e:a8:0d:5b:07:27:34:63:53:4b:32:b4:02:34 +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB +hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV +BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5 +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT +EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR +6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X +pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC +9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV +/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf +Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z ++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w +qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah +SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC +u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf +Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq +crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB +/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl +wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM +4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV +2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna +FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ +CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK +boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke +jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL +S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb +QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl +0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB +NVOFBkpdn627G190 +-----END CERTIFICATE----- + +# Operating CA: Sectigo +# Issuer: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust ECC Certification Authority O=The USERTRUST Network +# Label: "USERTrust ECC Certification Authority" +# Serial: 123013823720199481456569720443997572134 +# MD5 Fingerprint: fa:68:bc:d9:b5:7f:ad:fd:c9:1d:06:83:28:cc:24:c1 +# SHA1 Fingerprint: d1:cb:ca:5d:b2:d5:2a:7f:69:3b:67:4d:e5:f0:5a:1d:0c:95:7d:f0 +# SHA256 Fingerprint: 4f:f4:60:d5:4b:9c:86:da:bf:bc:fc:57:12:e0:40:0d:2b:ed:3f:bc:4d:4f:bd:aa:86:e0:6a:dc:d2:a9:ad:7a +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT +Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg +VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo +I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng +o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G +A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB +zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW +RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- + +# Operating CA: Sectigo +# Issuer: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Subject: CN=USERTrust RSA Certification Authority O=The USERTRUST Network +# Label: "USERTrust RSA Certification Authority" +# Serial: 2645093764781058787591871645665788717 +# MD5 Fingerprint: 1b:fe:69:d1:91:b7:19:33:a3:72:a8:0f:e1:55:e5:b5 +# SHA1 Fingerprint: 2b:8f:1b:57:33:0d:bb:a2:d0:7a:6c:51:f7:0e:e9:0d:da:b9:ad:8e +# SHA256 Fingerprint: e7:93:c9:b0:2f:d8:aa:13:e2:1c:31:22:8a:cc:b0:81:19:64:3b:74:9c:89:89:64:b1:74:6d:46:c3:d4:cb:d2 +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw +MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B +3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY +tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ +Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 +VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT +79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 +c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT +Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l +c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee +UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE +Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF +Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO +VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 +ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs +8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR +iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze +Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ +XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ +qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB +VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB +L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG +jjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- + +# Operating CA: Google Trust Services LLC +# Subject: C = US, O = Google Trust Services LLC, CN = GTS Root R1 +# Issuer: C = US, O = Google Trust Services LLC, CN = GTS Root R1 +# Label: "GTS Root R1" +# Serial: 0203E5936F31B01349886BA217 +# MD5 Fingerprint: 05:FE:D0:BF:71:A8:A3:76:63:DA:01:E0:D8:52:DC:40 +# SHA1 Fingerprint: E5:8C:1C:C4:91:3B:38:63:4B:E9:10:6E:E3:AD:8E:6B:9D:D9:81:4A +# SHA256 Fingerprint: D9:47:43:2A:BD:E7:B7:FA:90:FC:2E:6B:59:10:1B:12:80:E0:E1:C7:E4:E4:0F:A3:C6:88:7F:FF:57:A7:F4:CF +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlk28xsBNJiGuiFzANBgkqhkiG9w0BAQwFADBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaMf/vo +27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7w +Cl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjw +TcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0Pfybl +qAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtcvfaH +szVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4Zor8 +Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUspzBmk +MiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92 +wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70p +aDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrN +VjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQID +AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBAJ+qQibb +C5u+/x6Wki4+omVKapi6Ist9wTrYggoGxval3sBOh2Z5ofmmWJyq+bXmYOfg6LEe +QkEzCzc9zolwFcq1JKjPa7XSQCGYzyI0zzvFIoTgxQ6KfF2I5DUkzps+GlQebtuy +h6f88/qBVRRiClmpIgUxPoLW7ttXNLwzldMXG+gnoot7TiYaelpkttGsN/H9oPM4 +7HLwEXWdyzRSjeZ2axfG34arJ45JK3VmgRAhpuo+9K4l/3wV3s6MJT/KYnAK9y8J +ZgfIPxz88NtFMN9iiMG1D53Dn0reWVlHxYciNuaCp+0KueIHoI17eko8cdLiA6Ef +MgfdG+RCzgwARWGAtQsgWSl4vflVy2PFPEz0tv/bal8xa5meLMFrUKTX5hgUvYU/ +Z6tGn6D/Qqc6f1zLXbBwHSs09dR2CQzreExZBfMzQsNhFRAbd03OIozUhfJFfbdT +6u9AWpQKXCBfTkBdYiJ23//OYb2MI3jSNwLgjt7RETeJ9r/tSQdirpLsQBqvFAnZ +0E6yove+7u7Y/9waLd64NnHi/Hm3lCXRSHNboTXns5lndcEZOitHTtNCjv0xyBZm +2tIMPNuzjsmhDYAPexZ3FL//2wmUspO8IFgV6dtxQ/PeEMMA3KgqlbbC1j+Qa3bb +bP6MvPJwNQzcmRk13NfIRmPVNnGuV/u3gm3c +-----END CERTIFICATE----- + +# Operating CA: Google Trust Services LLC +# Subject: C = US, O = Google Trust Services LLC, CN = GTS Root R2 +# Issuer: C = US, O = Google Trust Services LLC, CN = GTS Root R2 +# Label: "GTS Root R2" +# Serial: 0203E5AEC58D04251AAB1125AA +# MD5 Fingerprint=1E:39:C0:53:E6:1E:29:82:0B:CA:52:55:36:5D:57:DC +# SHA1 Fingerprint=9A:44:49:76:32:DB:DE:FA:D0:BC:FB:5A:7B:17:BD:9E:56:09:24:94 +# SHA256 Fingerprint=8D:25:CD:97:22:9D:BF:70:35:6B:DA:4E:B3:CC:73:40:31:E2:4C:F0:0F:AF:CF:D3:2D:C7:6E:B5:84:1C:7E:A8 +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlrsWNBCUaqxElqjANBgkqhkiG9w0BAQwFADBHMQsw +CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU +MBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw +MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp +Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3LvCvpt +nfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3KgGjSY +6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9BuXvAu +MC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOdre7k +RXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXuPuWg +f9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1mKPV ++3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K8Yzo +dDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqjx5RW +Ir9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsRnTKa +G73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0kzCq +gc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9OktwID +AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBAB/Kzt3H +vqGf2SdMC9wXmBFqiN495nFWcrKeGk6c1SuYJF2ba3uwM4IJvd8lRuqYnrYb/oM8 +0mJhwQTtzuDFycgTE1XnqGOtjHsB/ncw4c5omwX4Eu55MaBBRTUoCnGkJE+M3DyC +B19m3H0Q/gxhswWV7uGugQ+o+MePTagjAiZrHYNSVc61LwDKgEDg4XSsYPWHgJ2u +NmSRXbBoGOqKYcl3qJfEycel/FVL8/B/uWU9J2jQzGv6U53hkRrJXRqWbTKH7QMg +yALOWr7Z6v2yTcQvG99fevX4i8buMTolUVVnjWQye+mew4K6Ki3pHrTgSAai/Gev +HyICc/sgCq+dVEuhzf9gR7A/Xe8bVr2XIZYtCtFenTgCR2y59PYjJbigapordwj6 +xLEokCZYCDzifqrXPW+6MYgKBesntaFJ7qBFVHvmJ2WZICGoo7z7GJa7Um8M7YNR +TOlZ4iBgxcJlkoKM8xAfDoqXvneCbT+PHV28SSe9zE8P4c52hgQjxcCMElv924Sg +JPFI/2R80L5cFtHvma3AH/vLrrw4IgYmZNralw4/KBVEqE8AyvCazM90arQ+POuV +7LXTWtiBmelDGDfrs7vRWGJB82bSj6p4lVQgw1oudCvV0b4YacCs1aTPObpRhANl +6WLAYv7YTVWW4tAR+kg0Eeye7QUd5MjWHYbL +-----END CERTIFICATE----- + +# Operating CA: Google Trust Services LLC +# Subject: C = US, O = Google Trust Services LLC, CN = GTS Root R3 +# Issuer: C = US, O = Google Trust Services LLC, CN = GTS Root R3 +# Label: "GTS Root R3" +# Serial: 0203E5B882EB20F825276D3D66 +# MD5 Fingerprint: 3E:E7:9D:58:02:94:46:51:94:E5:E0:22:4A:8B:E7:73 +# SHA1 Fingerprint: ED:E5:71:80:2B:C8:92:B9:5B:83:3C:D2:32:68:3F:09:CD:A0:1E:46 +# SHA256 Fingerprint: 34:D8:A7:3E:E2:08:D9:BC:DB:0D:95:65:20:93:4B:4E:40:E6:94:82:59:6E:8B:6F:73:C8:42:6B:01:0A:6F:48 +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPluILrIPglJ209ZjAKBggqhkjOPQQDAzBHMQswCQYD +VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG +A1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw +WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz +IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout736G +jOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2ADDL2 +4CejQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEA9uEglRR7 +VKOQFhG/hMjqb2sXnh5GmCCbn9MN2azTL818+FsuVbu/3ZL3pAzcMeGiAjEA/Jdm +ZuVDFhOD3cffL74UOO0BzrEXGhF16b0DjyZ+hOXJYKaV11RZt+cRLInUue4X +-----END CERTIFICATE----- + +# Operating CA: Google Trust Services LLC +# Subject: C = US, O = Google Trust Services LLC, CN = GTS Root R4 +# Issuer: C = US, O = Google Trust Services LLC, CN = GTS Root R4 +# Label: "GTS Root R4" +# Serial: 0203E5C068EF631A9C72905052 +# MD5 Fingerprint=43:96:83:77:19:4D:76:B3:9D:65:52:E4:1D:22:A5:E8 +# SHA1 Fingerprint=77:D3:03:67:B5:E0:0C:15:F6:0C:38:61:DF:7C:E1:3B:92:46:4D:47 +# SHA256 Fingerprint=34:9D:FA:40:58:C5:E2:63:12:3B:39:8A:E7:95:57:3C:4E:13:13:C8:3F:E6:8F:93:55:6C:D5:E8:03:1B:3C:7D +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPlwGjvYxqccpBQUjAKBggqhkjOPQQDAzBHMQswCQYD +VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG +A1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw +WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz +IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQAIgNi +AATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzuhXyi +QHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/lxKvR +HYqjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNpADBmAjEA6ED/g94D +9J+uHXqnLrmvT/aDHQ4thQEd0dlq7A/Cr8deVl5c1RxYIigL9zC2L7F8AjEA8GE8 +p/SgguMh1YQdc4acLa/KNJvxn7kjNuK8YAOdgLOaVsjh4rsUecrNIdSUtUlD +-----END CERTIFICATE----- + +# Operating CA: Google Trust Services LLC +# Subject: OU = GlobalSign ECC Root CA - R4, O = GlobalSign, CN = GlobalSign +# Issuer: OU = GlobalSign ECC Root CA - R4, O = GlobalSign, CN = GlobalSign +# Label: "GlobalSign R4" +# Serial: 0203E57EF53F93FDA50921B2A6 +# MD5 Fingerprint: 26:29:F8:6D:E1:88:BF:A2:65:7F:AA:C4:CD:0F:7F:FC +# SHA1 Fingerprint: 6B:A0:B0:98:E1:71:EF:5A:AD:FE:48:15:80:77:10:F4:BD:6F:0B:28 +# SHA256 Fingerprint: B0:85:D7:0B:96:4F:19:1A:73:E4:AF:0D:54:AE:7A:0E:07:AA:FD:AF:9B:71:DD:08:62:13:8A:B7:32:5A:24:A2 +-----BEGIN CERTIFICATE----- +MIIB3DCCAYOgAwIBAgINAgPlfvU/k/2lCSGypjAKBggqhkjOPQQDAjBQMSQwIgYD +VQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0gUjQxEzARBgNVBAoTCkdsb2Jh +bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTIxMTEzMDAwMDAwWhcNMzgw +MTE5MDMxNDA3WjBQMSQwIgYDVQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0g +UjQxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wWTAT +BgcqhkjOPQIBBggqhkjOPQMBBwNCAAS4xnnTj2wlDp8uORkcA6SumuU5BwkWymOx +uYb4ilfBV85C+nOh92VC/x7BALJucw7/xyHlGKSq2XE/qNS5zowdo0IwQDAOBgNV +HQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVLB7rUW44kB/ ++wpu+74zyTyjhNUwCgYIKoZIzj0EAwIDRwAwRAIgIk90crlgr/HmnKAWBVBfw147 +bmF0774BxL4YSFlhgjICICadVGNA3jdgUM/I2O2dgq43mLyjj0xMqTQrbO/7lZsm +-----END CERTIFICATE----- diff --git a/crates/sui-kvstore/src/bigtable/proto/google.rpc.rs b/crates/sui-kvstore/src/bigtable/proto/google.rpc.rs new file mode 100644 index 0000000000000..22c01090fc8bb --- /dev/null +++ b/crates/sui-kvstore/src/bigtable/proto/google.rpc.rs @@ -0,0 +1,24 @@ +/// The `Status` type defines a logical error model that is suitable for +/// different programming environments, including REST APIs and RPC APIs. It is +/// used by \[gRPC\](). Each `Status` message contains +/// three pieces of data: error code, error message, and error details. +/// +/// You can find out more about this error model and how to work with it in the +/// [API Design Guide](). +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Status { + /// The status code, which should be an enum value of + /// \[google.rpc.Code][google.rpc.Code\]. + #[prost(int32, tag = "1")] + pub code: i32, + /// A developer-facing error message, which should be in English. Any + /// user-facing error message should be localized and sent in the + /// \[google.rpc.Status.details][google.rpc.Status.details\] field, or localized + /// by the client. + #[prost(string, tag = "2")] + pub message: ::prost::alloc::string::String, + /// A list of messages that carry the error details. There is a common set of + /// message types for APIs to use. + #[prost(message, repeated, tag = "3")] + pub details: ::prost::alloc::vec::Vec<::prost_types::Any>, +} diff --git a/crates/sui-kvstore/src/bigtable/worker.rs b/crates/sui-kvstore/src/bigtable/worker.rs new file mode 100644 index 0000000000000..4698bab17de40 --- /dev/null +++ b/crates/sui-kvstore/src/bigtable/worker.rs @@ -0,0 +1,39 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::{BigTableClient, KeyValueStoreWriter, TransactionData}; +use async_trait::async_trait; +use sui_data_ingestion_core::Worker; +use sui_types::full_checkpoint_content::CheckpointData; + +pub struct KvWorker { + pub client: BigTableClient, +} + +#[async_trait] +impl Worker for KvWorker { + type Result = (); + + async fn process_checkpoint(&self, checkpoint: &CheckpointData) -> anyhow::Result<()> { + let mut client = self.client.clone(); + let mut objects = vec![]; + let mut transactions = vec![]; + for transaction in &checkpoint.transactions { + let full_transaction = TransactionData { + transaction: transaction.transaction.clone(), + effects: transaction.effects.clone(), + events: transaction.events.clone(), + checkpoint_number: checkpoint.checkpoint_summary.sequence_number, + timestamp: checkpoint.checkpoint_summary.timestamp_ms, + }; + for object in &transaction.output_objects { + objects.push(object); + } + transactions.push(full_transaction); + } + client.save_objects(&objects).await?; + client.save_transactions(&transactions).await?; + client.save_checkpoint(checkpoint).await?; + Ok(()) + } +} diff --git a/crates/sui-kvstore/src/lib.rs b/crates/sui-kvstore/src/lib.rs new file mode 100644 index 0000000000000..caee844739fb9 --- /dev/null +++ b/crates/sui-kvstore/src/lib.rs @@ -0,0 +1,57 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +mod bigtable; +use anyhow::Result; +use async_trait::async_trait; +pub use bigtable::client::BigTableClient; +pub use bigtable::worker::KvWorker; +use sui_types::crypto::AuthorityStrongQuorumSignInfo; +use sui_types::digests::{CheckpointDigest, TransactionDigest}; +use sui_types::effects::{TransactionEffects, TransactionEvents}; +use sui_types::full_checkpoint_content::CheckpointData; +use sui_types::messages_checkpoint::{ + CheckpointContents, CheckpointSequenceNumber, CheckpointSummary, +}; +use sui_types::object::Object; +use sui_types::storage::ObjectKey; +use sui_types::transaction::Transaction; + +#[async_trait] +pub trait KeyValueStoreReader { + async fn get_objects(&mut self, objects: &[ObjectKey]) -> Result>; + async fn get_transactions( + &mut self, + transactions: &[TransactionDigest], + ) -> Result>; + async fn get_checkpoints( + &mut self, + sequence_numbers: &[CheckpointSequenceNumber], + ) -> Result>; + async fn get_checkpoint_by_digest( + &mut self, + digest: CheckpointDigest, + ) -> Result>; +} + +#[async_trait] +pub trait KeyValueStoreWriter { + async fn save_objects(&mut self, objects: &[&Object]) -> Result<()>; + async fn save_transactions(&mut self, transactions: &[TransactionData]) -> Result<()>; + async fn save_checkpoint(&mut self, checkpoint: &CheckpointData) -> Result<()>; +} + +#[derive(Clone, Debug)] +pub struct Checkpoint { + pub summary: CheckpointSummary, + pub contents: CheckpointContents, + pub signatures: AuthorityStrongQuorumSignInfo, +} + +#[derive(Clone, Debug)] +pub struct TransactionData { + pub transaction: Transaction, + pub effects: TransactionEffects, + pub events: Option, + pub checkpoint_number: CheckpointSequenceNumber, + pub timestamp: u64, +} diff --git a/crates/sui-kvstore/src/main.rs b/crates/sui-kvstore/src/main.rs new file mode 100644 index 0000000000000..82f9ceac61315 --- /dev/null +++ b/crates/sui-kvstore/src/main.rs @@ -0,0 +1,35 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +use anyhow::Result; +use sui_data_ingestion_core::setup_single_workflow; +use sui_kvstore::BigTableClient; +use sui_kvstore::KvWorker; +use telemetry_subscribers::TelemetryConfig; + +#[tokio::main] +async fn main() -> Result<()> { + let _guard = TelemetryConfig::new().with_env().init(); + let args: Vec = std::env::args().collect(); + if args.len() < 3 { + eprintln!("Please provide BigTable instance id and network name"); + std::process::exit(1); + } + let instance_id = args[1].to_string(); + let network = args[2].to_string(); + assert!( + network == "mainnet" || network == "testnet", + "Invalid network name" + ); + + let client = BigTableClient::new_remote(instance_id, false, None).await?; + let (executor, _term_sender) = setup_single_workflow( + KvWorker { client }, + format!("https://checkpoints.{}.sui.io", network), + 0, + 1, + None, + ) + .await?; + executor.await?; + Ok(()) +} diff --git a/crates/sui-move/src/build.rs b/crates/sui-move/src/build.rs index 1d765e49e8120..42789e2ef9b35 100644 --- a/crates/sui-move/src/build.rs +++ b/crates/sui-move/src/build.rs @@ -88,11 +88,12 @@ impl Build { if generate_struct_layouts { let layout_str = serde_yaml::to_string(&pkg.generate_struct_layouts()).unwrap(); // store under /build//layouts/struct_layouts.yaml - let layout_filename = rerooted_path + let dir_name = rerooted_path .join("build") .join(pkg.package.compiled_package_info.package_name.as_str()) - .join(LAYOUTS_DIR) - .join(STRUCT_LAYOUTS_FILENAME); + .join(LAYOUTS_DIR); + let layout_filename = dir_name.join(STRUCT_LAYOUTS_FILENAME); + fs::create_dir_all(dir_name)?; fs::write(layout_filename, layout_str)? } diff --git a/crates/sui-move/src/main.rs b/crates/sui-move/src/main.rs index 49676c20b498a..4d763b8182a33 100644 --- a/crates/sui-move/src/main.rs +++ b/crates/sui-move/src/main.rs @@ -26,10 +26,10 @@ struct Args { #[clap(long = "path", short = 'p', global = true)] pub package_path: Option, /// If true, run the Move bytecode verifier on the bytecode from a successful build - #[clap(long = "path", short = 'p', global = true)] + #[clap(long, global = true)] pub run_bytecode_verifier: bool, /// If true, print build diagnostics to stderr--no printing if false - #[clap(long = "path", short = 'p', global = true)] + #[clap(long, global = true)] pub print_diags_to_stderr: bool, /// Package build options #[clap(flatten)] diff --git a/crates/sui-mvr-indexer/Cargo.toml b/crates/sui-mvr-indexer/Cargo.toml new file mode 100644 index 0000000000000..fd869ae522ddf --- /dev/null +++ b/crates/sui-mvr-indexer/Cargo.toml @@ -0,0 +1,89 @@ +[package] +name = "sui-mvr-indexer" +version.workspace = true +authors = ["Mysten Labs "] +license = "Apache-2.0" +publish = false +edition = "2021" + +[dependencies] +anyhow.workspace = true +rand = "0.8.5" +async-trait.workspace = true +axum.workspace = true +backoff.workspace = true +bb8 = "0.8.5" +bcs.workspace = true +bytes.workspace = true +chrono.workspace = true +clap = { workspace = true, features = ["env"] } +csv.workspace = true +diesel = { workspace = true, features = ["chrono", "serde_json"] } +diesel-async = { workspace = true, features = ["bb8", "postgres", "async-connection-wrapper"] } +futures.workspace = true +hex.workspace = true +indicatif.workspace = true +itertools.workspace = true +jsonrpsee.workspace = true +object_store.workspace = true +prometheus.workspace = true +rayon.workspace = true +regex.workspace = true +serde.workspace = true +serde_json.workspace = true +serde_with.workspace = true +strum.workspace = true +strum_macros.workspace = true +tap.workspace = true +tempfile.workspace = true +thiserror.workspace = true +tokio = { workspace = true, features = ["full"] } +tokio-util = { workspace = true, features = ["rt"] } +toml.workspace = true +tracing.workspace = true +url.workspace = true + +fastcrypto = { workspace = true, features = ["copy_key"] } +mysten-metrics.workspace = true +simulacrum.workspace = true +sui-config.workspace = true +sui-archival.workspace = true +sui-core.workspace = true +sui-data-ingestion-core.workspace = true +sui-json.workspace = true +sui-json-rpc.workspace = true +sui-json-rpc-api.workspace = true +sui-json-rpc-types.workspace = true +sui-open-rpc.workspace = true +sui-sdk.workspace = true +sui-snapshot.workspace = true +sui-storage.workspace = true +sui-types.workspace = true +sui-package-resolver.workspace = true +sui-protocol-config.workspace = true +telemetry-subscribers.workspace = true +sui-rest-api.workspace = true +sui-transaction-builder.workspace = true +sui-synthetic-ingestion.workspace = true + +move-core-types.workspace = true +move-bytecode-utils.workspace = true +move-binary-format.workspace = true + +diesel_migrations.workspace = true +cached.workspace = true +tokio-stream.workspace = true +dashmap.workspace = true + +[dev-dependencies] +sui-keys.workspace = true +sui-move-build.workspace = true +sui-swarm-config.workspace = true +sui-test-transaction-builder.workspace = true +test-cluster.workspace = true +ntest.workspace = true +criterion.workspace = true + +[[bin]] +name = "sui-mvr-indexer" +path = "src/main.rs" diff --git a/crates/sui-mvr-indexer/README.md b/crates/sui-mvr-indexer/README.md new file mode 100644 index 0000000000000..e579bc76ac3ad --- /dev/null +++ b/crates/sui-mvr-indexer/README.md @@ -0,0 +1,27 @@ +The MVR indexer is a spin-off of the Sui indexer. It has a subset of the full indexer schema, limited to just the tables needed to support MVR. The required tables are `epochs`, `checkpoints`, `packages`, `objects_snapshot`, and `objects_history`. This enables the custom indexer to support the `package_by_name` and `type_by_name` queries on GraphQL. + +# Running this indexer +## Start the Postgres Service + +Postgres must run as a service in the background for other tools to communicate with. If it was installed using homebrew, it can be started as a service with: + +``` sh +brew services start postgresql@version +``` + +## DB reset +When making db-related changes, you may find yourself having to run migrations and reset dbs often. The commands below are how you can invoke these actions. +```sh +cargo run --bin sui-mvr-indexer -- --database-url "" reset-database --force +``` + +## Start the indexer +```SH +cargo run --bin sui-mvr-indexer -- --db-url "" indexer --rpc-client-url "https://fullnode.devnet.sui.io:443" --remote-store-url http://lax-suifn-t99eb.devnet.sui.io:9000/rest +``` + +## Migrations + +To add a new table, run `diesel migration generate your_table_name`, and modify the newly created `up.sql` and `down.sql` files. + +You would apply the migration with `diesel migration run`, and run the script in `./scripts/generate_indexer_schema.sh` to update the `schema.rs` file. diff --git a/crates/sui-mvr-indexer/diesel.toml b/crates/sui-mvr-indexer/diesel.toml new file mode 100644 index 0000000000000..4430344705b42 --- /dev/null +++ b/crates/sui-mvr-indexer/diesel.toml @@ -0,0 +1,8 @@ +# For documentation on how to configure this file, +# see https://diesel.rs/guides/configuring-diesel-cli + +[print_schema] +file = "src/schema/pg.rs" + +[migrations_directory] +dir = "migrations/pg" diff --git a/crates/sui-mvr-indexer/migrations/pg/00000000000000_diesel_initial_setup/down.sql b/crates/sui-mvr-indexer/migrations/pg/00000000000000_diesel_initial_setup/down.sql new file mode 100644 index 0000000000000..a9f526091194b --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/00000000000000_diesel_initial_setup/down.sql @@ -0,0 +1,6 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + +DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass); +DROP FUNCTION IF EXISTS diesel_set_updated_at(); diff --git a/crates/sui-mvr-indexer/migrations/pg/00000000000000_diesel_initial_setup/up.sql b/crates/sui-mvr-indexer/migrations/pg/00000000000000_diesel_initial_setup/up.sql new file mode 100644 index 0000000000000..d68895b1a7b7d --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/00000000000000_diesel_initial_setup/up.sql @@ -0,0 +1,36 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + + + + +-- Sets up a trigger for the given table to automatically set a column called +-- `updated_at` whenever the row is modified (unless `updated_at` was included +-- in the modified columns) +-- +-- # Example +-- +-- ```sql +-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW()); +-- +-- SELECT diesel_manage_updated_at('users'); +-- ``` +CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$ +BEGIN + EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s + FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl); +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$ +BEGIN + IF ( + NEW IS DISTINCT FROM OLD AND + NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at + ) THEN + NEW.updated_at := current_timestamp; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044020_events/down.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044020_events/down.sql new file mode 100644 index 0000000000000..57f1de973b1d2 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044020_events/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS chain_identifier; diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044020_events/up.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044020_events/up.sql new file mode 100644 index 0000000000000..14aa6a098161f --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044020_events/up.sql @@ -0,0 +1,26 @@ +-- TODO: modify queries in indexer reader to take advantage of the new indices +CREATE TABLE events +( + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + transaction_digest bytea NOT NULL, + -- array of SuiAddress in bytes. All signers of the transaction. + senders bytea[] NOT NULL, + -- bytes of the entry package ID. Notice that the package and module here + -- are the package and module of the function that emitted the event, diffrent + -- from the package and module of the event type. + package bytea NOT NULL, + -- entry module name + module text NOT NULL, + -- StructTag in Display format, fully qualified including type parameters + event_type text NOT NULL, + -- timestamp of the checkpoint when the event was emitted + timestamp_ms BIGINT NOT NULL, + -- bcs of the Event contents (Event.contents) + bcs BYTEA NOT NULL, + PRIMARY KEY(tx_sequence_number, event_sequence_number) +) PARTITION BY RANGE (tx_sequence_number); +CREATE TABLE events_partition_0 PARTITION OF events FOR VALUES FROM (0) TO (MAXVALUE); +CREATE INDEX events_package ON events (package, tx_sequence_number, event_sequence_number); +CREATE INDEX events_package_module ON events (package, module, tx_sequence_number, event_sequence_number); +CREATE INDEX events_event_type ON events (event_type text_pattern_ops, tx_sequence_number, event_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044023_objects/down.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044023_objects/down.sql new file mode 100644 index 0000000000000..edea7960b79d7 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044023_objects/down.sql @@ -0,0 +1,3 @@ +DROP TABLE IF EXISTS objects; +DROP TABLE IF EXISTS objects_history; +DROP TABLE IF EXISTS objects_snapshot; diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044023_objects/up.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044023_objects/up.sql new file mode 100644 index 0000000000000..54854fabf4359 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044023_objects/up.sql @@ -0,0 +1,95 @@ +CREATE TABLE objects ( + object_id bytea PRIMARY KEY, + object_version bigint NOT NULL, + object_digest bytea NOT NULL, + checkpoint_sequence_number bigint NOT NULL, + -- Immutable/Address/Object/Shared, see types.rs + owner_type smallint NOT NULL, + -- bytes of SuiAddress/ObjectID of the owner ID. + -- Non-null for objects with an owner: Addresso or Objects + owner_id bytea, + -- Object type + object_type text, + -- Components of the StructTag: package, module, name (name of the struct, without type parameters) + object_type_package bytea, + object_type_module text, + object_type_name text, + -- bcs serialized Object + serialized_object bytea NOT NULL, + -- Non-null when the object is a coin. + -- e.g. `0x2::sui::SUI` + coin_type text, + -- Non-null when the object is a coin. + coin_balance bigint, + -- DynamicField/DynamicObject, see types.rs + -- Non-null when the object is a dynamic field + df_kind smallint, + -- bcs serialized DynamicFieldName + -- Non-null when the object is a dynamic field + df_name bytea, + -- object_type in DynamicFieldInfo. + df_object_type text, + -- object_id in DynamicFieldInfo. + df_object_id bytea +); + +-- OwnerType: 1: Address, 2: Object, see types.rs +CREATE INDEX objects_owner ON objects (owner_type, owner_id) WHERE owner_type BETWEEN 1 AND 2 AND owner_id IS NOT NULL; +CREATE INDEX objects_coin ON objects (owner_id, coin_type) WHERE coin_type IS NOT NULL AND owner_type = 1; +CREATE INDEX objects_checkpoint_sequence_number ON objects (checkpoint_sequence_number); +CREATE INDEX objects_package_module_name_full_type ON objects (object_type_package, object_type_module, object_type_name, object_type); +CREATE INDEX objects_owner_package_module_name_full_type ON objects (owner_id, object_type_package, object_type_module, object_type_name, object_type); + +-- similar to objects table, except that +-- 1. the primary key to store multiple object versions and partitions by checkpoint_sequence_number +-- 2. allow null values in some columns for deleted / wrapped objects +-- 3. object_status to mark the status of the object, which is either Active or WrappedOrDeleted +CREATE TABLE objects_history ( + object_id bytea NOT NULL, + object_version bigint NOT NULL, + object_status smallint NOT NULL, + object_digest bytea, + checkpoint_sequence_number bigint NOT NULL, + owner_type smallint, + owner_id bytea, + object_type text, + object_type_package bytea, + object_type_module text, + object_type_name text, + serialized_object bytea, + coin_type text, + coin_balance bigint, + df_kind smallint, + df_name bytea, + df_object_type text, + df_object_id bytea, + CONSTRAINT objects_history_pk PRIMARY KEY (checkpoint_sequence_number, object_id, object_version) +) PARTITION BY RANGE (checkpoint_sequence_number); +CREATE INDEX objects_history_id_version ON objects_history (object_id, object_version, checkpoint_sequence_number); +-- init with first partition of the history table +CREATE TABLE objects_history_partition_0 PARTITION OF objects_history FOR VALUES FROM (0) TO (MAXVALUE); + +-- snapshot table by folding objects_history table until certain checkpoint, +-- effectively the snapshot of objects at the same checkpoint, +-- except that it also includes deleted or wrapped objects with the corresponding object_status. +CREATE TABLE objects_snapshot ( + object_id bytea PRIMARY KEY, + object_version bigint NOT NULL, + object_status smallint NOT NULL, + object_digest bytea, + checkpoint_sequence_number bigint NOT NULL, + owner_type smallint, + owner_id bytea, + object_type text, + object_type_package bytea, + object_type_module text, + object_type_name text, + serialized_object bytea, + coin_type text, + coin_balance bigint, + df_kind smallint, + df_name bytea, + df_object_type text, + df_object_id bytea +); +CREATE INDEX objects_snapshot_checkpoint_sequence_number ON objects_snapshot (checkpoint_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044026_transactions/down.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044026_transactions/down.sql new file mode 100644 index 0000000000000..15e9dc9f1cb82 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044026_transactions/down.sql @@ -0,0 +1,3 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS transactions; +DROP TABLE IF EXISTS transactions_partition_0; diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044026_transactions/up.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044026_transactions/up.sql new file mode 100644 index 0000000000000..f5404e3610751 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044026_transactions/up.sql @@ -0,0 +1,23 @@ +CREATE TABLE transactions ( + tx_sequence_number BIGINT NOT NULL, + transaction_digest bytea NOT NULL, + -- bcs serialized SenderSignedData bytes + raw_transaction bytea NOT NULL, + -- bcs serialized TransactionEffects bytes + raw_effects bytea NOT NULL, + checkpoint_sequence_number BIGINT NOT NULL, + timestamp_ms BIGINT NOT NULL, + -- array of bcs serialized IndexedObjectChange bytes + object_changes bytea[] NOT NULL, + -- array of bcs serialized BalanceChange bytes + balance_changes bytea[] NOT NULL, + -- array of bcs serialized StoredEvent bytes + events bytea[] NOT NULL, + -- SystemTransaction/ProgrammableTransaction. See types.rs + transaction_kind smallint NOT NULL, + -- number of successful commands in this transaction, bound by number of command + -- in a programmaable transaction. + success_command_count smallint NOT NULL, + PRIMARY KEY (tx_sequence_number) +) PARTITION BY RANGE (tx_sequence_number); +CREATE TABLE transactions_partition_0 PARTITION OF transactions FOR VALUES FROM (0) TO (MAXVALUE); diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044044_checkpoints/down.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044044_checkpoints/down.sql new file mode 100644 index 0000000000000..fba5a8b5468c6 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044044_checkpoints/down.sql @@ -0,0 +1,3 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS checkpoints; +DROP TABLE IF EXISTS pruner_cp_watermark; diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044044_checkpoints/up.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044044_checkpoints/up.sql new file mode 100644 index 0000000000000..ddb63b020de70 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044044_checkpoints/up.sql @@ -0,0 +1,36 @@ +CREATE TABLE checkpoints +( + sequence_number BIGINT PRIMARY KEY, + checkpoint_digest BYTEA NOT NULL, + epoch BIGINT NOT NULL, + -- total transactions in the network at the end of this checkpoint (including itself) + network_total_transactions BIGINT NOT NULL, + previous_checkpoint_digest BYTEA, + -- if this checkpoitn is the last checkpoint of an epoch + end_of_epoch boolean NOT NULL, + -- array of TranscationDigest in bytes included in this checkpoint + tx_digests BYTEA[] NOT NULL, + timestamp_ms BIGINT NOT NULL, + total_gas_cost BIGINT NOT NULL, + computation_cost BIGINT NOT NULL, + storage_cost BIGINT NOT NULL, + storage_rebate BIGINT NOT NULL, + non_refundable_storage_fee BIGINT NOT NULL, + -- bcs serialized Vec bytes + checkpoint_commitments BYTEA NOT NULL, + -- bcs serialized AggregateAuthoritySignature bytes + validator_signature BYTEA NOT NULL, + -- bcs serialzied EndOfEpochData bytes, if the checkpoint marks end of an epoch + end_of_epoch_data BYTEA, + min_tx_sequence_number BIGINT, + max_tx_sequence_number BIGINT +); + +CREATE INDEX checkpoints_epoch ON checkpoints (epoch, sequence_number); +CREATE INDEX checkpoints_digest ON checkpoints USING HASH (checkpoint_digest); + +CREATE TABLE pruner_cp_watermark ( + checkpoint_sequence_number BIGINT PRIMARY KEY, + min_tx_sequence_number BIGINT NOT NULL, + max_tx_sequence_number BIGINT NOT NULL +) diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044052_epochs/down.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044052_epochs/down.sql new file mode 100644 index 0000000000000..ddb05ac2ebe8b --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044052_epochs/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS epochs; diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044052_epochs/up.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044052_epochs/up.sql new file mode 100644 index 0000000000000..5b540121cb849 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-044052_epochs/up.sql @@ -0,0 +1,47 @@ +CREATE TABLE epochs +( + epoch BIGINT PRIMARY KEY, + first_checkpoint_id BIGINT NOT NULL, + epoch_start_timestamp BIGINT NOT NULL, + reference_gas_price BIGINT NOT NULL, + protocol_version BIGINT NOT NULL, + total_stake BIGINT NOT NULL, + storage_fund_balance BIGINT NOT NULL, + system_state bytea NOT NULL, + -- The following fields are nullable because they are filled in + -- only at the end of an epoch. + epoch_total_transactions BIGINT, + last_checkpoint_id BIGINT, + epoch_end_timestamp BIGINT, + -- The following fields are from SystemEpochInfoEvent emitted + -- **after** advancing to the next epoch + storage_fund_reinvestment BIGINT, + storage_charge BIGINT, + storage_rebate BIGINT, + stake_subsidy_amount BIGINT, + total_gas_fees BIGINT, + total_stake_rewards_distributed BIGINT, + leftover_storage_fund_inflow BIGINT, + -- bcs serialized Vec bytes, found in last CheckpointSummary + -- of the epoch + epoch_commitments bytea +); + +-- Table storing the protocol configs for each protocol version. +-- Examples include gas schedule, transaction limits, etc. +CREATE TABLE protocol_configs +( + protocol_version BIGINT NOT NULL, + config_name TEXT NOT NULL, + config_value TEXT, + PRIMARY KEY(protocol_version, config_name) +); + +-- Table storing the feature flags for each protocol version. +CREATE TABLE feature_flags +( + protocol_version BIGINT NOT NULL, + flag_name TEXT NOT NULL, + flag_value BOOLEAN NOT NULL, + PRIMARY KEY(protocol_version, flag_name) +); diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-060729_packages/down.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-060729_packages/down.sql new file mode 100644 index 0000000000000..6b473dc06f4a2 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-060729_packages/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS packages; diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-08-19-060729_packages/up.sql b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-060729_packages/up.sql new file mode 100644 index 0000000000000..f08a5549608eb --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-08-19-060729_packages/up.sql @@ -0,0 +1,14 @@ +CREATE TABLE packages +( + package_id bytea NOT NULL, + original_id bytea NOT NULL, + package_version bigint NOT NULL, + -- bcs serialized MovePackage + move_package bytea NOT NULL, + checkpoint_sequence_number bigint NOT NULL, + CONSTRAINT packages_pkey PRIMARY KEY (package_id, original_id, package_version), + CONSTRAINT packages_unique_package_id UNIQUE (package_id) +); + +CREATE INDEX packages_cp_id_version ON packages (checkpoint_sequence_number, original_id, package_version); +CREATE INDEX packages_id_version_cp ON packages (original_id, package_version, checkpoint_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-10-06-204335_tx_indices/down.sql b/crates/sui-mvr-indexer/migrations/pg/2023-10-06-204335_tx_indices/down.sql new file mode 100644 index 0000000000000..f5604c0db5357 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-10-06-204335_tx_indices/down.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS tx_senders; +DROP TABLE IF EXISTS tx_recipients; +DROP TABLE IF EXISTS tx_input_objects; +DROP TABLE IF EXISTS tx_changed_objects; +DROP TABLE IF EXISTS tx_calls_pkg; +DROP TABLE IF EXISTS tx_calls_mod; +DROP TABLE IF EXISTS tx_calls_fun; +DROP TABLE IF EXISTS tx_digests; +DROP TABLE IF EXISTS tx_kinds; diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-10-06-204335_tx_indices/up.sql b/crates/sui-mvr-indexer/migrations/pg/2023-10-06-204335_tx_indices/up.sql new file mode 100644 index 0000000000000..563df854b97ef --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-10-06-204335_tx_indices/up.sql @@ -0,0 +1,67 @@ +CREATE TABLE tx_senders ( + tx_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(sender, tx_sequence_number) +); + +CREATE TABLE tx_recipients ( + tx_sequence_number BIGINT NOT NULL, + recipient BYTEA NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(recipient, tx_sequence_number) +); +CREATE INDEX tx_recipients_sender ON tx_recipients (sender, recipient, tx_sequence_number); + +CREATE TABLE tx_input_objects ( + tx_sequence_number BIGINT NOT NULL, + object_id BYTEA NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(object_id, tx_sequence_number) +); +CREATE INDEX tx_input_objects_sender ON tx_input_objects (sender, object_id, tx_sequence_number); + +CREATE TABLE tx_changed_objects ( + tx_sequence_number BIGINT NOT NULL, + object_id BYTEA NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(object_id, tx_sequence_number) +); +CREATE INDEX tx_changed_objects_sender ON tx_changed_objects (sender, object_id, tx_sequence_number); + +CREATE TABLE tx_calls_pkg ( + tx_sequence_number BIGINT NOT NULL, + package BYTEA NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, tx_sequence_number) +); +CREATE INDEX tx_calls_pkg_sender ON tx_calls_pkg (sender, package, tx_sequence_number); + +CREATE TABLE tx_calls_mod ( + tx_sequence_number BIGINT NOT NULL, + package BYTEA NOT NULL, + module TEXT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, module, tx_sequence_number) +); +CREATE INDEX tx_calls_mod_sender ON tx_calls_mod (sender, package, module, tx_sequence_number); + +CREATE TABLE tx_calls_fun ( + tx_sequence_number BIGINT NOT NULL, + package BYTEA NOT NULL, + module TEXT NOT NULL, + func TEXT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, module, func, tx_sequence_number) +); +CREATE INDEX tx_calls_fun_sender ON tx_calls_fun (sender, package, module, func, tx_sequence_number); + +CREATE TABLE tx_digests ( + tx_digest BYTEA PRIMARY KEY, + tx_sequence_number BIGINT NOT NULL +); + +CREATE TABLE tx_kinds ( + tx_sequence_number BIGINT NOT NULL, + tx_kind SMALLINT NOT NULL, + PRIMARY KEY(tx_kind, tx_sequence_number) +); diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-10-07-160139_display/down.sql b/crates/sui-mvr-indexer/migrations/pg/2023-10-07-160139_display/down.sql new file mode 100644 index 0000000000000..f73e497c406d3 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-10-07-160139_display/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS display; diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-10-07-160139_display/up.sql b/crates/sui-mvr-indexer/migrations/pg/2023-10-07-160139_display/up.sql new file mode 100644 index 0000000000000..c82918e253c8c --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-10-07-160139_display/up.sql @@ -0,0 +1,7 @@ +CREATE TABLE display +( + object_type text PRIMARY KEY, + id BYTEA NOT NULL, + version SMALLINT NOT NULL, + bcs BYTEA NOT NULL +); diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-11-29-193859_advance_partition/down.sql b/crates/sui-mvr-indexer/migrations/pg/2023-11-29-193859_advance_partition/down.sql new file mode 100644 index 0000000000000..bab0311186e1d --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-11-29-193859_advance_partition/down.sql @@ -0,0 +1,2 @@ +DROP PROCEDURE IF EXISTS advance_partition; +DROP PROCEDURE IF EXISTS drop_partition; diff --git a/crates/sui-mvr-indexer/migrations/pg/2023-11-29-193859_advance_partition/up.sql b/crates/sui-mvr-indexer/migrations/pg/2023-11-29-193859_advance_partition/up.sql new file mode 100644 index 0000000000000..8ca64b86a7081 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2023-11-29-193859_advance_partition/up.sql @@ -0,0 +1,17 @@ +CREATE OR REPLACE PROCEDURE advance_partition(table_name TEXT, last_epoch BIGINT, new_epoch BIGINT, last_epoch_start BIGINT, new_epoch_start BIGINT) +LANGUAGE plpgsql +AS $$ +BEGIN + EXECUTE format('ALTER TABLE %I DETACH PARTITION %I_partition_%s', table_name, table_name, last_epoch); + EXECUTE format('ALTER TABLE %I ATTACH PARTITION %I_partition_%s FOR VALUES FROM (%L) TO (%L)', table_name, table_name, last_epoch, last_epoch_start, new_epoch_start); + EXECUTE format('CREATE TABLE IF NOT EXISTS %I_partition_%s PARTITION OF %I FOR VALUES FROM (%L) TO (MAXVALUE)', table_name, new_epoch, table_name, new_epoch_start); +END; +$$; + +CREATE OR REPLACE PROCEDURE drop_partition(table_name TEXT, epoch BIGINT) +LANGUAGE plpgsql +AS $$ +BEGIN + EXECUTE format('DROP TABLE IF EXISTS %I_partition_%s', table_name, epoch); +END; +$$; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-05-05-155158_obj_indices/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-05-05-155158_obj_indices/down.sql new file mode 100644 index 0000000000000..7a3a7670f24c2 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-05-05-155158_obj_indices/down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS objects_version; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-05-05-155158_obj_indices/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-05-05-155158_obj_indices/up.sql new file mode 100644 index 0000000000000..666e5a2423319 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-05-05-155158_obj_indices/up.sql @@ -0,0 +1,31 @@ +-- Indexing table mapping an object's ID and version to its checkpoint +-- sequence number, partitioned by the first byte of its Object ID. +CREATE TABLE objects_version ( + object_id bytea NOT NULL, + object_version bigint NOT NULL, + cp_sequence_number bigint NOT NULL, + PRIMARY KEY (object_id, object_version) +) PARTITION BY RANGE (object_id); + +-- Create a partition for each first byte value. +DO $$ +DECLARE + lo text; + hi text; +BEGIN + FOR i IN 0..254 LOOP + lo := LPAD(TO_HEX(i), 2, '0'); + hi := LPAD(TO_HEX(i + 1), 2, '0'); + EXECUTE FORMAT($F$ + CREATE TABLE objects_version_%1$s PARTITION OF objects_version FOR VALUES + FROM (E'\\x%1$s00000000000000000000000000000000000000000000000000000000000000') + TO (E'\\x%2$s00000000000000000000000000000000000000000000000000000000000000'); + $F$, lo, hi); + END LOOP; +END; +$$ LANGUAGE plpgsql; + +-- Special case for the last partition, because of the upper bound. +CREATE TABLE objects_version_ff PARTITION OF objects_version FOR VALUES +FROM (E'\\xff00000000000000000000000000000000000000000000000000000000000000') +TO (MAXVALUE); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-06-14-045801_event_indices/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-06-14-045801_event_indices/down.sql new file mode 100644 index 0000000000000..3583887435168 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-06-14-045801_event_indices/down.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS event_emit_package; +DROP TABLE IF EXISTS event_emit_module; +DROP TABLE IF EXISTS event_struct_package; +DROP TABLE IF EXISTS event_struct_module; +DROP TABLE IF EXISTS event_struct_name; +DROP TABLE IF EXISTS event_struct_instantiation; +DROP TABLE IF EXISTS event_senders; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-06-14-045801_event_indices/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-06-14-045801_event_indices/up.sql new file mode 100644 index 0000000000000..a89625146a9fd --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-06-14-045801_event_indices/up.sql @@ -0,0 +1,74 @@ +CREATE TABLE event_emit_package +( + package BYTEA NOT NULL, + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, tx_sequence_number, event_sequence_number) +); +CREATE INDEX event_emit_package_sender ON event_emit_package (sender, package, tx_sequence_number, event_sequence_number); + +CREATE TABLE event_emit_module +( + package BYTEA NOT NULL, + module TEXT NOT NULL, + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, module, tx_sequence_number, event_sequence_number) +); +CREATE INDEX event_emit_module_sender ON event_emit_module (sender, package, module, tx_sequence_number, event_sequence_number); + +CREATE TABLE event_struct_package +( + package BYTEA NOT NULL, + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, tx_sequence_number, event_sequence_number) +); +CREATE INDEX event_struct_package_sender ON event_struct_package (sender, package, tx_sequence_number, event_sequence_number); + + +CREATE TABLE event_struct_module +( + package BYTEA NOT NULL, + module TEXT NOT NULL, + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, module, tx_sequence_number, event_sequence_number) +); +CREATE INDEX event_struct_module_sender ON event_struct_module (sender, package, module, tx_sequence_number, event_sequence_number); + +CREATE TABLE event_struct_name +( + package BYTEA NOT NULL, + module TEXT NOT NULL, + type_name TEXT NOT NULL, + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, module, type_name, tx_sequence_number, event_sequence_number) +); +CREATE INDEX event_struct_name_sender ON event_struct_name (sender, package, module, type_name, tx_sequence_number, event_sequence_number); + +CREATE TABLE event_struct_instantiation +( + package BYTEA NOT NULL, + module TEXT NOT NULL, + type_instantiation TEXT NOT NULL, + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(package, module, type_instantiation, tx_sequence_number, event_sequence_number) +); +CREATE INDEX event_struct_instantiation_sender ON event_struct_instantiation (sender, package, module, type_instantiation, tx_sequence_number, event_sequence_number); + +CREATE TABLE event_senders +( + sender BYTEA NOT NULL, + tx_sequence_number BIGINT NOT NULL, + event_sequence_number BIGINT NOT NULL, + PRIMARY KEY(sender, tx_sequence_number, event_sequence_number) +); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-07-13-003534_chain_identifier/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-07-13-003534_chain_identifier/down.sql new file mode 100644 index 0000000000000..57f1de973b1d2 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-07-13-003534_chain_identifier/down.sql @@ -0,0 +1,2 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS chain_identifier; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-07-13-003534_chain_identifier/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-07-13-003534_chain_identifier/up.sql new file mode 100644 index 0000000000000..205e3a89f63e5 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-07-13-003534_chain_identifier/up.sql @@ -0,0 +1,6 @@ +-- Your SQL goes here +CREATE TABLE chain_identifier +( + checkpoint_digest BYTEA NOT NULL, + PRIMARY KEY(checkpoint_digest) +); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-05-164455_full_objects_history/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-05-164455_full_objects_history/down.sql new file mode 100644 index 0000000000000..619fc41782e68 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-05-164455_full_objects_history/down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS full_objects_history; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-05-164455_full_objects_history/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-05-164455_full_objects_history/up.sql new file mode 100644 index 0000000000000..1504a21e51658 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-05-164455_full_objects_history/up.sql @@ -0,0 +1,10 @@ +-- This table will store every history version of each object, and never get pruned. +-- Since it can grow indefinitely, we keep minimum amount of information in this table for the purpose +-- of point lookups. +CREATE TABLE full_objects_history +( + object_id bytea NOT NULL, + object_version bigint NOT NULL, + serialized_object bytea, + PRIMARY KEY (object_id, object_version) +); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-10-195655_drop-df-columns/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-10-195655_drop-df-columns/down.sql new file mode 100644 index 0000000000000..8490a091b30f4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-10-195655_drop-df-columns/down.sql @@ -0,0 +1,15 @@ +ALTER TABLE objects +ADD COLUMN df_name bytea, +ADD COLUMN df_object_type text, +ADD COLUMN df_object_id bytea, +ADD COLUMN checkpoint_sequence_number bigint; + +ALTER TABLE objects_snapshot +ADD COLUMN df_name bytea, +ADD COLUMN df_object_type text, +ADD COLUMN df_object_id bytea; + +ALTER TABLE objects_history +ADD COLUMN df_name bytea, +ADD COLUMN df_object_type text, +ADD COLUMN df_object_id bytea; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-10-195655_drop-df-columns/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-10-195655_drop-df-columns/up.sql new file mode 100644 index 0000000000000..4782193c4edc9 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-10-195655_drop-df-columns/up.sql @@ -0,0 +1,15 @@ +ALTER TABLE objects +DROP COLUMN df_name, +DROP COLUMN df_object_type, +DROP COLUMN df_object_id, +DROP COLUMN checkpoint_sequence_number; + +ALTER TABLE objects_snapshot +DROP COLUMN df_name, +DROP COLUMN df_object_type, +DROP COLUMN df_object_id; + +ALTER TABLE objects_history +DROP COLUMN df_name, +DROP COLUMN df_object_type, +DROP COLUMN df_object_id; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-12-150939_tx_affected/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-12-150939_tx_affected/down.sql new file mode 100644 index 0000000000000..98cc9c0a36ce9 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-12-150939_tx_affected/down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS tx_affected_addresses; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-12-150939_tx_affected/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-12-150939_tx_affected/up.sql new file mode 100644 index 0000000000000..4f71554f1394a --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-12-150939_tx_affected/up.sql @@ -0,0 +1,9 @@ +CREATE TABLE tx_affected_addresses ( + tx_sequence_number BIGINT NOT NULL, + affected BYTEA NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(affected, tx_sequence_number) +); + +CREATE INDEX tx_affected_addresses_tx_sequence_number_index ON tx_affected_addresses (tx_sequence_number); +CREATE INDEX tx_affected_addresses_sender ON tx_affected_addresses (sender, affected, tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-12-213234_watermarks/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-12-213234_watermarks/down.sql new file mode 100644 index 0000000000000..e9de336153f62 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-12-213234_watermarks/down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS watermarks; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-12-213234_watermarks/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-12-213234_watermarks/up.sql new file mode 100644 index 0000000000000..73bdc70055246 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-12-213234_watermarks/up.sql @@ -0,0 +1,34 @@ +CREATE TABLE IF NOT EXISTS watermarks +( + -- The pipeline governed by this watermark, i.e `epochs`, `checkpoints`, + -- `transactions`. + pipeline TEXT PRIMARY KEY, + -- Inclusive upper epoch bound for this entity's data. Committer updates + -- this field. Pruner uses this to determine if pruning is necessary based + -- on the retention policy. + epoch_hi_inclusive BIGINT NOT NULL, + -- Inclusive upper checkpoint bound for this entity's data. Committer + -- updates this field. All data of this entity in the checkpoint must be + -- persisted before advancing this watermark. The committer refers to this + -- on disaster recovery to resume writing. + checkpoint_hi_inclusive BIGINT NOT NULL, + -- Exclusive upper transaction sequence number bound for this entity's + -- data. Committer updates this field. + tx_hi BIGINT NOT NULL, + -- Inclusive lower epoch bound for this entity's data. Pruner updates this + -- field when the epoch range exceeds the retention policy. + epoch_lo BIGINT NOT NULL, + -- Inclusive low watermark that the pruner advances. Corresponds to the + -- epoch id, checkpoint sequence number, or tx sequence number depending on + -- the entity. Data before this watermark is considered pruned by a reader. + -- The underlying data may still exist in the db instance. + reader_lo BIGINT NOT NULL, + -- Updated using the database's current timestamp when the pruner sees that + -- some data needs to be dropped. The pruner uses this column to determine + -- whether to prune or wait long enough that all in-flight reads complete + -- or timeout before it acts on an updated watermark. + timestamp_ms BIGINT NOT NULL, + -- Column used by the pruner to track its true progress. Data below this + -- watermark can be immediately pruned. + pruner_hi BIGINT NOT NULL +); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-18-003318_epochs_add_json_system_state/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-18-003318_epochs_add_json_system_state/down.sql new file mode 100644 index 0000000000000..9de241cfe20dc --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-18-003318_epochs_add_json_system_state/down.sql @@ -0,0 +1 @@ +ALTER TABLE epochs DROP COLUMN system_state_summary_json; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-18-003318_epochs_add_json_system_state/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-18-003318_epochs_add_json_system_state/up.sql new file mode 100644 index 0000000000000..4dce2a5a9ecfd --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-18-003318_epochs_add_json_system_state/up.sql @@ -0,0 +1 @@ +ALTER TABLE epochs ADD COLUMN system_state_summary_json JSONB; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-19-011238_raw_checkpoints/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-19-011238_raw_checkpoints/down.sql new file mode 100644 index 0000000000000..9cfef48c9b5f6 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-19-011238_raw_checkpoints/down.sql @@ -0,0 +1 @@ +DROP TABLE raw_checkpoints; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-19-011238_raw_checkpoints/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-19-011238_raw_checkpoints/up.sql new file mode 100644 index 0000000000000..26791856ff4c9 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-19-011238_raw_checkpoints/up.sql @@ -0,0 +1,6 @@ +CREATE TABLE raw_checkpoints +( + sequence_number BIGINT PRIMARY KEY, + certified_checkpoint BYTEA NOT NULL, + checkpoint_contents BYTEA NOT NULL +); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-19-121113_tx_affected_objects/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-19-121113_tx_affected_objects/down.sql new file mode 100644 index 0000000000000..b0868da73b0f2 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-19-121113_tx_affected_objects/down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS tx_affected_objects; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-19-121113_tx_affected_objects/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-19-121113_tx_affected_objects/up.sql new file mode 100644 index 0000000000000..146f78b2f5063 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-19-121113_tx_affected_objects/up.sql @@ -0,0 +1,9 @@ +CREATE TABLE tx_affected_objects ( + tx_sequence_number BIGINT NOT NULL, + affected BYTEA NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(affected, tx_sequence_number) +); + +CREATE INDEX tx_affected_objects_tx_sequence_number_index ON tx_affected_objects (tx_sequence_number); +CREATE INDEX tx_affected_objects_sender ON tx_affected_objects (sender, affected, tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-24-213054_epochs_system_state_nullable/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-24-213054_epochs_system_state_nullable/down.sql new file mode 100644 index 0000000000000..e6697b4849a4e --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-24-213054_epochs_system_state_nullable/down.sql @@ -0,0 +1 @@ +ALTER TABLE epochs ALTER COLUMN system_state SET NOT NULL; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-24-213054_epochs_system_state_nullable/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-24-213054_epochs_system_state_nullable/up.sql new file mode 100644 index 0000000000000..a6e7f167c48cc --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-24-213054_epochs_system_state_nullable/up.sql @@ -0,0 +1 @@ +ALTER TABLE epochs ALTER COLUMN system_state DROP NOT NULL; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135801_event_emit_module_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135801_event_emit_module_pruning_index/down.sql new file mode 100644 index 0000000000000..825855f3d700b --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135801_event_emit_module_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS event_emit_module_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135801_event_emit_module_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135801_event_emit_module_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135801_event_emit_module_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135801_event_emit_module_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135801_event_emit_module_pruning_index/up.sql new file mode 100644 index 0000000000000..ac69bc8488758 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135801_event_emit_module_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + event_emit_module_tx_sequence_number +ON event_emit_module (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135802_event_emit_package_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135802_event_emit_package_pruning_index/down.sql new file mode 100644 index 0000000000000..30b5fdb6cead6 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135802_event_emit_package_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS event_emit_package_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135802_event_emit_package_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135802_event_emit_package_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135802_event_emit_package_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135802_event_emit_package_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135802_event_emit_package_pruning_index/up.sql new file mode 100644 index 0000000000000..231ab598b4766 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135802_event_emit_package_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + event_emit_package_tx_sequence_number +ON event_emit_package (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135803_event_senders_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135803_event_senders_pruning_index/down.sql new file mode 100644 index 0000000000000..e9b5b0b903ed5 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135803_event_senders_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS event_senders_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135803_event_senders_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135803_event_senders_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135803_event_senders_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135803_event_senders_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135803_event_senders_pruning_index/up.sql new file mode 100644 index 0000000000000..b5883b8a3a4ce --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135803_event_senders_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + event_senders_tx_sequence_number +ON event_senders (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135804_event_struct_instantiation_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135804_event_struct_instantiation_pruning_index/down.sql new file mode 100644 index 0000000000000..43b1d27d9ed2e --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135804_event_struct_instantiation_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS event_struct_instantiation_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135804_event_struct_instantiation_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135804_event_struct_instantiation_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135804_event_struct_instantiation_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135804_event_struct_instantiation_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135804_event_struct_instantiation_pruning_index/up.sql new file mode 100644 index 0000000000000..7847620e936f3 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135804_event_struct_instantiation_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + event_struct_instantiation_tx_sequence_number +ON event_struct_instantiation (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135805_event_struct_module_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135805_event_struct_module_pruning_index/down.sql new file mode 100644 index 0000000000000..76606ab0400a6 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135805_event_struct_module_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS event_struct_module_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135805_event_struct_module_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135805_event_struct_module_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135805_event_struct_module_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135805_event_struct_module_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135805_event_struct_module_pruning_index/up.sql new file mode 100644 index 0000000000000..748a4095da169 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135805_event_struct_module_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + event_struct_module_tx_sequence_number +ON event_struct_module (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135806_event_struct_name_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135806_event_struct_name_pruning_index/down.sql new file mode 100644 index 0000000000000..944405cf172e3 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135806_event_struct_name_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS event_struct_name_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135806_event_struct_name_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135806_event_struct_name_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135806_event_struct_name_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135806_event_struct_name_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135806_event_struct_name_pruning_index/up.sql new file mode 100644 index 0000000000000..2ca251c139af9 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135806_event_struct_name_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + event_struct_name_tx_sequence_number +ON event_struct_name (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135807_event_struct_package_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135807_event_struct_package_pruning_index/down.sql new file mode 100644 index 0000000000000..40fde7e4578b6 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135807_event_struct_package_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS event_struct_package_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135807_event_struct_package_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135807_event_struct_package_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135807_event_struct_package_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135807_event_struct_package_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135807_event_struct_package_pruning_index/up.sql new file mode 100644 index 0000000000000..00e88fcfb5273 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135807_event_struct_package_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + event_struct_package_tx_sequence_number +ON event_struct_package (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135808_tx_calls_fun_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135808_tx_calls_fun_pruning_index/down.sql new file mode 100644 index 0000000000000..da1519d208f7a --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135808_tx_calls_fun_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS tx_calls_fun_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135808_tx_calls_fun_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135808_tx_calls_fun_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135808_tx_calls_fun_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135808_tx_calls_fun_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135808_tx_calls_fun_pruning_index/up.sql new file mode 100644 index 0000000000000..c868f6e55a66f --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135808_tx_calls_fun_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + tx_calls_fun_tx_sequence_number +ON tx_calls_fun (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135809_tx_calls_mod_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135809_tx_calls_mod_pruning_index/down.sql new file mode 100644 index 0000000000000..16bf8eb87dbef --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135809_tx_calls_mod_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS tx_calls_mod_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135809_tx_calls_mod_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135809_tx_calls_mod_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135809_tx_calls_mod_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135809_tx_calls_mod_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135809_tx_calls_mod_pruning_index/up.sql new file mode 100644 index 0000000000000..debc152d98f2d --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135809_tx_calls_mod_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + tx_calls_mod_tx_sequence_number +ON tx_calls_mod (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135810_tx_calls_pkg_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135810_tx_calls_pkg_pruning_index/down.sql new file mode 100644 index 0000000000000..f6ef795109c61 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135810_tx_calls_pkg_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS tx_calls_pkg_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135810_tx_calls_pkg_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135810_tx_calls_pkg_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135810_tx_calls_pkg_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135810_tx_calls_pkg_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135810_tx_calls_pkg_pruning_index/up.sql new file mode 100644 index 0000000000000..0e6c1f1bf7d30 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135810_tx_calls_pkg_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + tx_calls_pkg_tx_sequence_number +ON tx_calls_pkg (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135811_tx_changed_objects_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135811_tx_changed_objects_pruning_index/down.sql new file mode 100644 index 0000000000000..1dfcf480b9e86 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135811_tx_changed_objects_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS tx_changed_objects_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135811_tx_changed_objects_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135811_tx_changed_objects_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135811_tx_changed_objects_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135811_tx_changed_objects_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135811_tx_changed_objects_pruning_index/up.sql new file mode 100644 index 0000000000000..4ef5b459dbf05 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135811_tx_changed_objects_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + tx_changed_objects_tx_sequence_number +ON tx_changed_objects (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135812_tx_digests_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135812_tx_digests_pruning_index/down.sql new file mode 100644 index 0000000000000..d0bd714bc60b2 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135812_tx_digests_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS tx_digests_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135812_tx_digests_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135812_tx_digests_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135812_tx_digests_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135812_tx_digests_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135812_tx_digests_pruning_index/up.sql new file mode 100644 index 0000000000000..efdff9cbe7a56 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135812_tx_digests_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + tx_digests_tx_sequence_number +ON tx_digests (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135813_tx_input_objects_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135813_tx_input_objects_pruning_index/down.sql new file mode 100644 index 0000000000000..5061884270f6b --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135813_tx_input_objects_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS tx_input_objects_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135813_tx_input_objects_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135813_tx_input_objects_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135813_tx_input_objects_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135813_tx_input_objects_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135813_tx_input_objects_pruning_index/up.sql new file mode 100644 index 0000000000000..39d01c598a4a5 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135813_tx_input_objects_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + tx_input_objects_tx_sequence_number +ON tx_input_objects (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135814_tx_kinds_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135814_tx_kinds_pruning_index/down.sql new file mode 100644 index 0000000000000..10f5e96b1ce17 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135814_tx_kinds_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS tx_kinds_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135814_tx_kinds_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135814_tx_kinds_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135814_tx_kinds_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135814_tx_kinds_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135814_tx_kinds_pruning_index/up.sql new file mode 100644 index 0000000000000..6227a18f8eb46 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135814_tx_kinds_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + tx_kinds_tx_sequence_number +ON tx_kinds (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135815_tx_recipients_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135815_tx_recipients_pruning_index/down.sql new file mode 100644 index 0000000000000..138ba02741229 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135815_tx_recipients_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS tx_recipients_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135815_tx_recipients_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135815_tx_recipients_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135815_tx_recipients_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135815_tx_recipients_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135815_tx_recipients_pruning_index/up.sql new file mode 100644 index 0000000000000..d2294ac2561ed --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135815_tx_recipients_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + tx_recipients_tx_sequence_number +ON tx_recipients (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135816_tx_senders_pruning_index/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135816_tx_senders_pruning_index/down.sql new file mode 100644 index 0000000000000..c09d44eb4660d --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135816_tx_senders_pruning_index/down.sql @@ -0,0 +1 @@ +DROP INDEX CONCURRENTLY IF EXISTS tx_senders_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135816_tx_senders_pruning_index/metadata.toml b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135816_tx_senders_pruning_index/metadata.toml new file mode 100644 index 0000000000000..79e9221c1f2a4 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135816_tx_senders_pruning_index/metadata.toml @@ -0,0 +1 @@ +run_in_transaction = false diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135816_tx_senders_pruning_index/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135816_tx_senders_pruning_index/up.sql new file mode 100644 index 0000000000000..f22a06e2bb548 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-25-135816_tx_senders_pruning_index/up.sql @@ -0,0 +1,3 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS + tx_senders_tx_sequence_number +ON tx_senders (tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-30-153705_add_event_sender/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-30-153705_add_event_sender/down.sql new file mode 100644 index 0000000000000..89e6710d92f1b --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-30-153705_add_event_sender/down.sql @@ -0,0 +1 @@ +ALTER TABLE events DROP COLUMN sender; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-09-30-153705_add_event_sender/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-09-30-153705_add_event_sender/up.sql new file mode 100644 index 0000000000000..7ea312c09453c --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-09-30-153705_add_event_sender/up.sql @@ -0,0 +1 @@ +ALTER TABLE events ADD COLUMN sender BYTEA; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-10-08-025030_partial_index_instead/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-10-08-025030_partial_index_instead/down.sql new file mode 100644 index 0000000000000..82659b80658c0 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-10-08-025030_partial_index_instead/down.sql @@ -0,0 +1,7 @@ +-- Drop the new partial indices +DROP INDEX IF EXISTS objects_history_owner_partial; +DROP INDEX IF EXISTS objects_history_coin_owner_partial; +DROP INDEX IF EXISTS objects_history_coin_only_partial; +DROP INDEX IF EXISTS objects_history_type_partial; +DROP INDEX IF EXISTS objects_history_package_module_name_full_type_partial; +DROP INDEX IF EXISTS objects_history_owner_package_module_name_full_type_partial; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-10-08-025030_partial_index_instead/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-10-08-025030_partial_index_instead/up.sql new file mode 100644 index 0000000000000..800f77b3f540b --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-10-08-025030_partial_index_instead/up.sql @@ -0,0 +1,18 @@ +-- Create new partial indices with object_status = 0 condition +CREATE INDEX IF NOT EXISTS objects_history_owner_partial ON objects_history (checkpoint_sequence_number, owner_type, owner_id) +WHERE owner_type BETWEEN 1 AND 2 AND owner_id IS NOT NULL AND object_status = 0; + +CREATE INDEX IF NOT EXISTS objects_history_coin_owner_partial ON objects_history (checkpoint_sequence_number, owner_id, coin_type, object_id) +WHERE coin_type IS NOT NULL AND owner_type = 1 AND object_status = 0; + +CREATE INDEX IF NOT EXISTS objects_history_coin_only_partial ON objects_history (checkpoint_sequence_number, coin_type, object_id) +WHERE coin_type IS NOT NULL AND object_status = 0; + +CREATE INDEX IF NOT EXISTS objects_history_type_partial ON objects_history (checkpoint_sequence_number, object_type) +WHERE object_status = 0; + +CREATE INDEX IF NOT EXISTS objects_history_package_module_name_full_type_partial ON objects_history (checkpoint_sequence_number, object_type_package, object_type_module, object_type_name, object_type) +WHERE object_status = 0; + +CREATE INDEX IF NOT EXISTS objects_history_owner_package_module_name_full_type_partial ON objects_history (checkpoint_sequence_number, owner_id, object_type_package, object_type_module, object_type_name, object_type) +WHERE object_status = 0; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-10-09-180628_add_network_total_transactions_to_epochs/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-10-09-180628_add_network_total_transactions_to_epochs/down.sql new file mode 100644 index 0000000000000..e088120452e58 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-10-09-180628_add_network_total_transactions_to_epochs/down.sql @@ -0,0 +1 @@ +ALTER TABLE epochs DROP COLUMN first_tx_sequence_number; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-10-09-180628_add_network_total_transactions_to_epochs/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-10-09-180628_add_network_total_transactions_to_epochs/up.sql new file mode 100644 index 0000000000000..becdb61fe5e83 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-10-09-180628_add_network_total_transactions_to_epochs/up.sql @@ -0,0 +1 @@ +ALTER TABLE epochs ADD COLUMN first_tx_sequence_number bigint; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/down.sql new file mode 100644 index 0000000000000..807c01dca462d --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/down.sql @@ -0,0 +1,6 @@ +CREATE INDEX IF NOT EXISTS objects_history_owner ON objects_history (checkpoint_sequence_number, owner_type, owner_id) WHERE owner_type BETWEEN 1 AND 2 AND owner_id IS NOT NULL; +CREATE INDEX IF NOT EXISTS objects_history_coin_owner ON objects_history (checkpoint_sequence_number, owner_id, coin_type, object_id) WHERE coin_type IS NOT NULL AND owner_type = 1; +CREATE INDEX IF NOT EXISTS objects_history_coin_only ON objects_history (checkpoint_sequence_number, coin_type, object_id) WHERE coin_type IS NOT NULL; +CREATE INDEX IF NOT EXISTS objects_history_type ON objects_history (checkpoint_sequence_number, object_type); +CREATE INDEX IF NOT EXISTS objects_history_package_module_name_full_type ON objects_history (checkpoint_sequence_number, object_type_package, object_type_module, object_type_name, object_type); +CREATE INDEX IF NOT EXISTS objects_history_owner_package_module_name_full_type ON objects_history (checkpoint_sequence_number, owner_id, object_type_package, object_type_module, object_type_name, object_type); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/up.sql new file mode 100644 index 0000000000000..754e719819f1e --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-10-25-153629_remove_objects_history_full_indices/up.sql @@ -0,0 +1,6 @@ +DROP INDEX IF EXISTS objects_history_owner; +DROP INDEX IF EXISTS objects_history_coin_owner; +DROP INDEX IF EXISTS objects_history_coin_only; +DROP INDEX IF EXISTS objects_history_type; +DROP INDEX IF EXISTS objects_history_package_module_name_full_type; +DROP INDEX IF EXISTS objects_history_owner_package_module_name_full_type; diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/down.sql b/crates/sui-mvr-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/down.sql new file mode 100644 index 0000000000000..b9fcef3e1f439 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/down.sql @@ -0,0 +1,18 @@ +CREATE TABLE tx_senders ( + tx_sequence_number BIGINT NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(sender, tx_sequence_number) +); + +CREATE INDEX IF NOT EXISTS tx_senders_tx_sequence_number + ON tx_senders (tx_sequence_number); + +CREATE TABLE tx_recipients ( + tx_sequence_number BIGINT NOT NULL, + recipient BYTEA NOT NULL, + sender BYTEA NOT NULL, + PRIMARY KEY(recipient, tx_sequence_number) +); + +CREATE INDEX IF NOT EXISTS tx_recipients_sender + ON tx_recipients (sender, recipient, tx_sequence_number); diff --git a/crates/sui-mvr-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/up.sql b/crates/sui-mvr-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/up.sql new file mode 100644 index 0000000000000..fb259ea615d84 --- /dev/null +++ b/crates/sui-mvr-indexer/migrations/pg/2024-10-30-005153_drop_tx_sender_and_recipient/up.sql @@ -0,0 +1,2 @@ +DROP TABLE IF EXISTS tx_senders; +DROP TABLE IF EXISTS tx_recipients; diff --git a/crates/sui-mvr-indexer/src/apis/coin_api.rs b/crates/sui-mvr-indexer/src/apis/coin_api.rs new file mode 100644 index 0000000000000..13a5a2c55a819 --- /dev/null +++ b/crates/sui-mvr-indexer/src/apis/coin_api.rs @@ -0,0 +1,153 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::indexer_reader::IndexerReader; +use async_trait::async_trait; +use jsonrpsee::core::RpcResult; +use jsonrpsee::RpcModule; +use sui_json_rpc::coin_api::{parse_to_struct_tag, parse_to_type_tag}; +use sui_json_rpc::SuiRpcModule; +use sui_json_rpc_api::{cap_page_limit, CoinReadApiServer}; +use sui_json_rpc_types::{Balance, CoinPage, Page, SuiCoinMetadata}; +use sui_open_rpc::Module; +use sui_types::balance::Supply; +use sui_types::base_types::{ObjectID, SuiAddress}; +use sui_types::gas_coin::{GAS, TOTAL_SUPPLY_MIST}; + +pub(crate) struct CoinReadApi { + inner: IndexerReader, +} + +impl CoinReadApi { + pub fn new(inner: IndexerReader) -> Self { + Self { inner } + } +} + +#[async_trait] +impl CoinReadApiServer for CoinReadApi { + async fn get_coins( + &self, + owner: SuiAddress, + coin_type: Option, + cursor: Option, + limit: Option, + ) -> RpcResult { + let limit = cap_page_limit(limit); + if limit == 0 { + return Ok(CoinPage::empty()); + } + + // Normalize coin type tag and default to Gas + let coin_type = + parse_to_type_tag(coin_type)?.to_canonical_string(/* with_prefix */ true); + + let cursor = match cursor { + Some(c) => c, + // If cursor is not specified, we need to start from the beginning of the coin type, which is the minimal possible ObjectID. + None => ObjectID::ZERO, + }; + let mut results = self + .inner + .get_owned_coins(owner, Some(coin_type), cursor, limit + 1) + .await?; + + let has_next_page = results.len() > limit; + results.truncate(limit); + let next_cursor = results.last().map(|o| o.coin_object_id); + Ok(Page { + data: results, + next_cursor, + has_next_page, + }) + } + + async fn get_all_coins( + &self, + owner: SuiAddress, + cursor: Option, + limit: Option, + ) -> RpcResult { + let limit = cap_page_limit(limit); + if limit == 0 { + return Ok(CoinPage::empty()); + } + + let cursor = match cursor { + Some(c) => c, + // If cursor is not specified, we need to start from the beginning of the coin type, which is the minimal possible ObjectID. + None => ObjectID::ZERO, + }; + let mut results = self + .inner + .get_owned_coins(owner, None, cursor, limit + 1) + .await?; + + let has_next_page = results.len() > limit; + results.truncate(limit); + let next_cursor = results.last().map(|o| o.coin_object_id); + Ok(Page { + data: results, + next_cursor, + has_next_page, + }) + } + + async fn get_balance( + &self, + owner: SuiAddress, + coin_type: Option, + ) -> RpcResult { + // Normalize coin type tag and default to Gas + let coin_type = + parse_to_type_tag(coin_type)?.to_canonical_string(/* with_prefix */ true); + + let mut results = self + .inner + .get_coin_balances(owner, Some(coin_type.clone())) + .await?; + if results.is_empty() { + return Ok(Balance::zero(coin_type)); + } + Ok(results.swap_remove(0)) + } + + async fn get_all_balances(&self, owner: SuiAddress) -> RpcResult> { + self.inner + .get_coin_balances(owner, None) + .await + .map_err(Into::into) + } + + async fn get_coin_metadata(&self, coin_type: String) -> RpcResult> { + let coin_struct = parse_to_struct_tag(&coin_type)?; + self.inner + .get_coin_metadata(coin_struct) + .await + .map_err(Into::into) + } + + async fn get_total_supply(&self, coin_type: String) -> RpcResult { + let coin_struct = parse_to_struct_tag(&coin_type)?; + if GAS::is_gas(&coin_struct) { + Ok(Supply { + value: TOTAL_SUPPLY_MIST, + }) + } else { + self.inner + .get_total_supply(coin_struct) + .await + .map_err(Into::into) + } + } +} + +impl SuiRpcModule for CoinReadApi { + fn rpc(self) -> RpcModule { + self.into_rpc() + } + + fn rpc_doc_module() -> Module { + sui_json_rpc_api::CoinReadApiOpenRpc::module_doc() + } +} diff --git a/crates/sui-mvr-indexer/src/apis/extended_api.rs b/crates/sui-mvr-indexer/src/apis/extended_api.rs new file mode 100644 index 0000000000000..9b9827ea2bbe1 --- /dev/null +++ b/crates/sui-mvr-indexer/src/apis/extended_api.rs @@ -0,0 +1,83 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::indexer_reader::IndexerReader; +use jsonrpsee::{core::RpcResult, RpcModule}; +use sui_json_rpc::SuiRpcModule; +use sui_json_rpc_api::{validate_limit, ExtendedApiServer, QUERY_MAX_RESULT_LIMIT_CHECKPOINTS}; +use sui_json_rpc_types::{ + CheckpointedObjectID, EpochInfo, EpochPage, Page, QueryObjectsPage, SuiObjectResponseQuery, +}; +use sui_open_rpc::Module; +use sui_types::sui_serde::BigInt; + +pub(crate) struct ExtendedApi { + inner: IndexerReader, +} + +impl ExtendedApi { + pub fn new(inner: IndexerReader) -> Self { + Self { inner } + } +} + +#[async_trait::async_trait] +impl ExtendedApiServer for ExtendedApi { + async fn get_epochs( + &self, + cursor: Option>, + limit: Option, + descending_order: Option, + ) -> RpcResult { + let limit = validate_limit(limit, QUERY_MAX_RESULT_LIMIT_CHECKPOINTS)?; + let mut epochs = self + .inner + .get_epochs( + cursor.map(|x| *x), + limit + 1, + descending_order.unwrap_or(false), + ) + .await?; + + let has_next_page = epochs.len() > limit; + epochs.truncate(limit); + let next_cursor = epochs.last().map(|e| e.epoch); + Ok(Page { + data: epochs, + next_cursor: next_cursor.map(|id| id.into()), + has_next_page, + }) + } + + async fn get_current_epoch(&self) -> RpcResult { + let stored_epoch = self.inner.get_latest_epoch_info_from_db().await?; + EpochInfo::try_from(stored_epoch).map_err(Into::into) + } + + async fn query_objects( + &self, + _query: SuiObjectResponseQuery, + _cursor: Option, + _limit: Option, + ) -> RpcResult { + Err(jsonrpsee::types::error::CallError::Custom( + jsonrpsee::types::error::ErrorCode::MethodNotFound.into(), + ) + .into()) + } + + async fn get_total_transactions(&self) -> RpcResult> { + let latest_checkpoint = self.inner.get_latest_checkpoint().await?; + Ok(latest_checkpoint.network_total_transactions.into()) + } +} + +impl SuiRpcModule for ExtendedApi { + fn rpc(self) -> RpcModule { + self.into_rpc() + } + + fn rpc_doc_module() -> Module { + sui_json_rpc_api::ExtendedApiOpenRpc::module_doc() + } +} diff --git a/crates/sui-mvr-indexer/src/apis/governance_api.rs b/crates/sui-mvr-indexer/src/apis/governance_api.rs new file mode 100644 index 0000000000000..0cb52dc8e3a11 --- /dev/null +++ b/crates/sui-mvr-indexer/src/apis/governance_api.rs @@ -0,0 +1,295 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::BTreeMap; + +use crate::{errors::IndexerError, indexer_reader::IndexerReader}; +use async_trait::async_trait; +use jsonrpsee::{core::RpcResult, RpcModule}; + +use cached::{proc_macro::cached, SizedCache}; +use sui_json_rpc::{governance_api::ValidatorExchangeRates, SuiRpcModule}; +use sui_json_rpc_api::GovernanceReadApiServer; +use sui_json_rpc_types::{ + DelegatedStake, EpochInfo, StakeStatus, SuiCommittee, SuiObjectDataFilter, ValidatorApys, +}; +use sui_open_rpc::Module; +use sui_types::{ + base_types::{MoveObjectType, ObjectID, SuiAddress}, + committee::EpochId, + governance::StakedSui, + sui_serde::BigInt, + sui_system_state::{sui_system_state_summary::SuiSystemStateSummary, PoolTokenExchangeRate}, +}; + +#[derive(Clone)] +pub struct GovernanceReadApi { + inner: IndexerReader, +} + +impl GovernanceReadApi { + pub fn new(inner: IndexerReader) -> Self { + Self { inner } + } + + pub async fn get_epoch_info(&self, epoch: Option) -> Result { + match self.inner.get_epoch_info(epoch).await { + Ok(Some(epoch_info)) => Ok(epoch_info), + Ok(None) => Err(IndexerError::InvalidArgumentError(format!( + "Missing epoch {epoch:?}" + ))), + Err(e) => Err(e), + } + } + + async fn get_latest_sui_system_state(&self) -> Result { + self.inner.get_latest_sui_system_state().await + } + + async fn get_stakes_by_ids( + &self, + ids: Vec, + ) -> Result, IndexerError> { + let mut stakes = vec![]; + for stored_object in self.inner.multi_get_objects(ids).await? { + let object = sui_types::object::Object::try_from(stored_object)?; + let stake_object = StakedSui::try_from(&object)?; + stakes.push(stake_object); + } + + self.get_delegated_stakes(stakes).await + } + + async fn get_staked_by_owner( + &self, + owner: SuiAddress, + ) -> Result, IndexerError> { + let mut stakes = vec![]; + for stored_object in self + .inner + .get_owned_objects( + owner, + Some(SuiObjectDataFilter::StructType( + MoveObjectType::staked_sui().into(), + )), + None, + // Allow querying for up to 1000 staked objects + 1000, + ) + .await? + { + let object = sui_types::object::Object::try_from(stored_object)?; + let stake_object = StakedSui::try_from(&object)?; + stakes.push(stake_object); + } + + self.get_delegated_stakes(stakes).await + } + + pub async fn get_delegated_stakes( + &self, + stakes: Vec, + ) -> Result, IndexerError> { + let pools = stakes + .into_iter() + .fold(BTreeMap::<_, Vec<_>>::new(), |mut pools, stake| { + pools.entry(stake.pool_id()).or_default().push(stake); + pools + }); + + let system_state_summary = self.get_latest_sui_system_state().await?; + let epoch = system_state_summary.epoch; + + let rates = exchange_rates(self, &system_state_summary) + .await? + .into_iter() + .map(|rates| (rates.pool_id, rates)) + .collect::>(); + + let mut delegated_stakes = vec![]; + for (pool_id, stakes) in pools { + // Rate table and rate can be null when the pool is not active + let rate_table = rates.get(&pool_id).ok_or_else(|| { + IndexerError::InvalidArgumentError( + "Cannot find rates for staking pool {pool_id}".to_string(), + ) + })?; + let current_rate = rate_table.rates.first().map(|(_, rate)| rate); + + let mut delegations = vec![]; + for stake in stakes { + let status = if epoch >= stake.activation_epoch() { + let estimated_reward = if let Some(current_rate) = current_rate { + let stake_rate = rate_table + .rates + .iter() + .find_map(|(epoch, rate)| { + if *epoch == stake.activation_epoch() { + Some(rate.clone()) + } else { + None + } + }) + .unwrap_or_default(); + let estimated_reward = ((stake_rate.rate() / current_rate.rate()) - 1.0) + * stake.principal() as f64; + std::cmp::max(0, estimated_reward.round() as u64) + } else { + 0 + }; + StakeStatus::Active { estimated_reward } + } else { + StakeStatus::Pending + }; + delegations.push(sui_json_rpc_types::Stake { + staked_sui_id: stake.id(), + // TODO: this might change when we implement warm up period. + stake_request_epoch: stake.activation_epoch().saturating_sub(1), + stake_active_epoch: stake.activation_epoch(), + principal: stake.principal(), + status, + }) + } + delegated_stakes.push(DelegatedStake { + validator_address: rate_table.address, + staking_pool: pool_id, + stakes: delegations, + }) + } + Ok(delegated_stakes) + } +} + +/// Cached exchange rates for validators for the given epoch, the cache size is 1, it will be cleared when the epoch changes. +/// rates are in descending order by epoch. +#[cached( + type = "SizedCache>", + create = "{ SizedCache::with_size(1) }", + convert = " { system_state_summary.epoch } ", + result = true +)] +pub async fn exchange_rates( + state: &GovernanceReadApi, + system_state_summary: &SuiSystemStateSummary, +) -> Result, IndexerError> { + // Get validator rate tables + let mut tables = vec![]; + + for validator in &system_state_summary.active_validators { + tables.push(( + validator.sui_address, + validator.staking_pool_id, + validator.exchange_rates_id, + validator.exchange_rates_size, + true, + )); + } + + // Get inactive validator rate tables + for df in state + .inner + .get_dynamic_fields( + system_state_summary.inactive_pools_id, + None, + system_state_summary.inactive_pools_size as usize, + ) + .await? + { + let pool_id: sui_types::id::ID = bcs::from_bytes(&df.bcs_name).map_err(|e| { + sui_types::error::SuiError::ObjectDeserializationError { + error: e.to_string(), + } + })?; + let inactive_pools_id = system_state_summary.inactive_pools_id; + let validator = state + .inner + .get_validator_from_table(inactive_pools_id, pool_id) + .await?; + tables.push(( + validator.sui_address, + validator.staking_pool_id, + validator.exchange_rates_id, + validator.exchange_rates_size, + false, + )); + } + + let mut exchange_rates = vec![]; + // Get exchange rates for each validator + for (address, pool_id, exchange_rates_id, exchange_rates_size, active) in tables { + let mut rates = vec![]; + for df in state + .inner + .get_dynamic_fields_raw(exchange_rates_id, None, exchange_rates_size as usize) + .await? + { + let dynamic_field = df + .to_dynamic_field::() + .ok_or_else(|| sui_types::error::SuiError::ObjectDeserializationError { + error: "dynamic field malformed".to_owned(), + })?; + + rates.push((dynamic_field.name, dynamic_field.value)); + } + + rates.sort_by(|(a, _), (b, _)| a.cmp(b).reverse()); + + exchange_rates.push(ValidatorExchangeRates { + address, + pool_id, + active, + rates, + }); + } + Ok(exchange_rates) +} + +#[async_trait] +impl GovernanceReadApiServer for GovernanceReadApi { + async fn get_stakes_by_ids( + &self, + staked_sui_ids: Vec, + ) -> RpcResult> { + self.get_stakes_by_ids(staked_sui_ids) + .await + .map_err(Into::into) + } + + async fn get_stakes(&self, owner: SuiAddress) -> RpcResult> { + self.get_staked_by_owner(owner).await.map_err(Into::into) + } + + async fn get_committee_info(&self, epoch: Option>) -> RpcResult { + let epoch = self.get_epoch_info(epoch.as_deref().copied()).await?; + Ok(epoch.committee().map_err(IndexerError::from)?.into()) + } + + async fn get_latest_sui_system_state(&self) -> RpcResult { + self.get_latest_sui_system_state().await.map_err(Into::into) + } + + async fn get_reference_gas_price(&self) -> RpcResult> { + let epoch = self.get_epoch_info(None).await?; + Ok(BigInt::from(epoch.reference_gas_price.ok_or_else( + || { + IndexerError::PersistentStorageDataCorruptionError( + "missing latest reference gas price".to_owned(), + ) + }, + )?)) + } + + async fn get_validators_apy(&self) -> RpcResult { + Ok(self.get_validators_apy().await?) + } +} + +impl SuiRpcModule for GovernanceReadApi { + fn rpc(self) -> RpcModule { + self.into_rpc() + } + + fn rpc_doc_module() -> Module { + sui_json_rpc_api::GovernanceReadApiOpenRpc::module_doc() + } +} diff --git a/crates/sui-mvr-indexer/src/apis/indexer_api.rs b/crates/sui-mvr-indexer/src/apis/indexer_api.rs new file mode 100644 index 0000000000000..7c3dbf0308f16 --- /dev/null +++ b/crates/sui-mvr-indexer/src/apis/indexer_api.rs @@ -0,0 +1,428 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use async_trait::async_trait; +use jsonrpsee::core::RpcResult; +use jsonrpsee::types::SubscriptionEmptyError; +use jsonrpsee::types::SubscriptionResult; +use jsonrpsee::{RpcModule, SubscriptionSink}; +use tap::TapFallible; + +use sui_json_rpc::name_service::{Domain, NameRecord, NameServiceConfig, NameServiceError}; +use sui_json_rpc::SuiRpcModule; +use sui_json_rpc_api::{cap_page_limit, IndexerApiServer}; +use sui_json_rpc_types::{ + DynamicFieldPage, EventFilter, EventPage, ObjectsPage, Page, SuiObjectResponse, + SuiObjectResponseQuery, SuiTransactionBlockResponseQuery, TransactionBlocksPage, + TransactionFilter, +}; +use sui_open_rpc::Module; +use sui_types::base_types::{ObjectID, SuiAddress}; +use sui_types::digests::TransactionDigest; +use sui_types::dynamic_field::{DynamicFieldName, Field}; +use sui_types::error::SuiObjectResponseError; +use sui_types::event::EventID; +use sui_types::object::ObjectRead; +use sui_types::TypeTag; + +use crate::indexer_reader::IndexerReader; +use crate::IndexerError; + +pub(crate) struct IndexerApi { + inner: IndexerReader, + name_service_config: NameServiceConfig, +} + +impl IndexerApi { + pub fn new(inner: IndexerReader, name_service_config: NameServiceConfig) -> Self { + Self { + inner, + name_service_config, + } + } + + async fn get_owned_objects_internal( + &self, + address: SuiAddress, + query: Option, + cursor: Option, + limit: usize, + ) -> RpcResult { + let SuiObjectResponseQuery { filter, options } = query.unwrap_or_default(); + let options = options.unwrap_or_default(); + let objects = self + .inner + .get_owned_objects(address, filter, cursor, limit + 1) + .await?; + + let mut object_futures = vec![]; + for object in objects { + object_futures.push(tokio::task::spawn( + object.try_into_object_read(self.inner.package_resolver()), + )); + } + let mut objects = futures::future::join_all(object_futures) + .await + .into_iter() + .collect::, _>>() + .map_err(|e| { + tracing::error!("Error joining object read futures."); + jsonrpsee::core::Error::Custom(format!("Error joining object read futures. {}", e)) + })? + .into_iter() + .collect::, _>>() + .tap_err(|e| tracing::error!("Error converting object to object read: {}", e))?; + let has_next_page = objects.len() > limit; + objects.truncate(limit); + + let next_cursor = objects.last().map(|o_read| o_read.object_id()); + let mut parallel_tasks = vec![]; + for o in objects { + let inner_clone = self.inner.clone(); + let options = options.clone(); + parallel_tasks.push(tokio::task::spawn(async move { + match o { + ObjectRead::NotExists(id) => Ok(SuiObjectResponse::new_with_error( + SuiObjectResponseError::NotExists { object_id: id }, + )), + ObjectRead::Exists(object_ref, o, layout) => { + if options.show_display { + match inner_clone.get_display_fields(&o, &layout).await { + Ok(rendered_fields) => Ok(SuiObjectResponse::new_with_data( + (object_ref, o, layout, options, Some(rendered_fields)) + .try_into()?, + )), + Err(e) => Ok(SuiObjectResponse::new( + Some((object_ref, o, layout, options, None).try_into()?), + Some(SuiObjectResponseError::DisplayError { + error: e.to_string(), + }), + )), + } + } else { + Ok(SuiObjectResponse::new_with_data( + (object_ref, o, layout, options, None).try_into()?, + )) + } + } + ObjectRead::Deleted((object_id, version, digest)) => Ok( + SuiObjectResponse::new_with_error(SuiObjectResponseError::Deleted { + object_id, + version, + digest, + }), + ), + } + })); + } + let data = futures::future::join_all(parallel_tasks) + .await + .into_iter() + .collect::, _>>() + .map_err(|e: tokio::task::JoinError| anyhow::anyhow!(e))? + .into_iter() + .collect::, anyhow::Error>>()?; + + Ok(Page { + data, + next_cursor, + has_next_page, + }) + } +} + +#[async_trait] +impl IndexerApiServer for IndexerApi { + async fn get_owned_objects( + &self, + address: SuiAddress, + query: Option, + cursor: Option, + limit: Option, + ) -> RpcResult { + let limit = cap_page_limit(limit); + if limit == 0 { + return Ok(ObjectsPage::empty()); + } + self.get_owned_objects_internal(address, query, cursor, limit) + .await + } + + async fn query_transaction_blocks( + &self, + query: SuiTransactionBlockResponseQuery, + cursor: Option, + limit: Option, + descending_order: Option, + ) -> RpcResult { + let limit = cap_page_limit(limit); + if limit == 0 { + return Ok(TransactionBlocksPage::empty()); + } + let mut results = self + .inner + .query_transaction_blocks( + query.filter, + query.options.unwrap_or_default(), + cursor, + limit + 1, + descending_order.unwrap_or(false), + ) + .await + .map_err(|e: IndexerError| anyhow::anyhow!(e))?; + + let has_next_page = results.len() > limit; + results.truncate(limit); + let next_cursor = results.last().map(|o| o.digest); + Ok(Page { + data: results, + next_cursor, + has_next_page, + }) + } + + async fn query_events( + &self, + query: EventFilter, + // exclusive cursor if `Some`, otherwise start from the beginning + cursor: Option, + limit: Option, + descending_order: Option, + ) -> RpcResult { + let limit = cap_page_limit(limit); + if limit == 0 { + return Ok(EventPage::empty()); + } + let descending_order = descending_order.unwrap_or(false); + let mut results = self + .inner + .query_events(query, cursor, limit + 1, descending_order) + .await?; + + let has_next_page = results.len() > limit; + results.truncate(limit); + let next_cursor = results.last().map(|o| o.id); + Ok(Page { + data: results, + next_cursor, + has_next_page, + }) + } + + async fn get_dynamic_fields( + &self, + parent_object_id: ObjectID, + cursor: Option, + limit: Option, + ) -> RpcResult { + let limit = cap_page_limit(limit); + if limit == 0 { + return Ok(DynamicFieldPage::empty()); + } + let mut results = self + .inner + .get_dynamic_fields(parent_object_id, cursor, limit + 1) + .await?; + + let has_next_page = results.len() > limit; + results.truncate(limit); + let next_cursor = results.last().map(|o| o.object_id); + Ok(Page { + data: results, + next_cursor, + has_next_page, + }) + } + + async fn get_dynamic_field_object( + &self, + parent_object_id: ObjectID, + name: DynamicFieldName, + ) -> RpcResult { + let name_bcs_value = self.inner.bcs_name_from_dynamic_field_name(&name).await?; + // Try as Dynamic Field + let id = sui_types::dynamic_field::derive_dynamic_field_id( + parent_object_id, + &name.type_, + &name_bcs_value, + ) + .expect("deriving dynamic field id can't fail"); + + let options = sui_json_rpc_types::SuiObjectDataOptions::full_content(); + match self.inner.get_object_read(id).await? { + sui_types::object::ObjectRead::NotExists(_) + | sui_types::object::ObjectRead::Deleted(_) => {} + sui_types::object::ObjectRead::Exists(object_ref, o, layout) => { + return Ok(SuiObjectResponse::new_with_data( + (object_ref, o, layout, options, None).try_into()?, + )); + } + } + + // Try as Dynamic Field Object + let dynamic_object_field_struct = + sui_types::dynamic_field::DynamicFieldInfo::dynamic_object_field_wrapper(name.type_); + let dynamic_object_field_type = TypeTag::Struct(Box::new(dynamic_object_field_struct)); + let dynamic_object_field_id = sui_types::dynamic_field::derive_dynamic_field_id( + parent_object_id, + &dynamic_object_field_type, + &name_bcs_value, + ) + .expect("deriving dynamic field id can't fail"); + match self.inner.get_object_read(dynamic_object_field_id).await? { + sui_types::object::ObjectRead::NotExists(_) + | sui_types::object::ObjectRead::Deleted(_) => {} + sui_types::object::ObjectRead::Exists(object_ref, o, layout) => { + return Ok(SuiObjectResponse::new_with_data( + (object_ref, o, layout, options, None).try_into()?, + )); + } + } + + Ok(SuiObjectResponse::new_with_error( + sui_types::error::SuiObjectResponseError::DynamicFieldNotFound { parent_object_id }, + )) + } + + fn subscribe_event(&self, _sink: SubscriptionSink, _filter: EventFilter) -> SubscriptionResult { + Err(SubscriptionEmptyError) + } + + fn subscribe_transaction( + &self, + _sink: SubscriptionSink, + _filter: TransactionFilter, + ) -> SubscriptionResult { + Err(SubscriptionEmptyError) + } + + async fn resolve_name_service_address(&self, name: String) -> RpcResult> { + let domain: Domain = name.parse().map_err(IndexerError::NameServiceError)?; + let parent_domain = domain.parent(); + + // construct the record ids to lookup. + let record_id = self.name_service_config.record_field_id(&domain); + let parent_record_id = self.name_service_config.record_field_id(&parent_domain); + + // get latest timestamp to check expiration. + let current_timestamp = self.inner.get_latest_checkpoint().await?.timestamp_ms; + + // gather the requests to fetch in the multi_get_objs. + let mut requests = vec![record_id]; + + // we only want to fetch both the child and the parent if the domain is a subdomain. + if domain.is_subdomain() { + requests.push(parent_record_id); + } + + // fetch both parent (if subdomain) and child records in a single get query. + // We do this as we do not know if the subdomain is a node or leaf record. + let domains: Vec<_> = self + .inner + .multi_get_objects(requests) + .await? + .into_iter() + .map(|o| sui_types::object::Object::try_from(o).ok()) + .collect(); + + // Find the requested object in the list of domains. + // We need to loop (in an array of maximum size 2), as we cannot guarantee + // the order of the returned objects. + let Some(requested_object) = domains + .iter() + .find(|o| o.as_ref().is_some_and(|o| o.id() == record_id)) + .and_then(|o| o.clone()) + else { + return Ok(None); + }; + + let name_record: NameRecord = requested_object.try_into().map_err(IndexerError::from)?; + + // Handle NODE record case. + if !name_record.is_leaf_record() { + return if !name_record.is_node_expired(current_timestamp) { + Ok(name_record.target_address) + } else { + Err(IndexerError::NameServiceError(NameServiceError::NameExpired).into()) + }; + } + + // repeat the process for the parent object too. + let Some(requested_object) = domains + .iter() + .find(|o| o.as_ref().is_some_and(|o| o.id() == parent_record_id)) + .and_then(|o| o.clone()) + else { + return Err(IndexerError::NameServiceError(NameServiceError::NameExpired).into()); + }; + + let parent_record: NameRecord = requested_object.try_into().map_err(IndexerError::from)?; + + if parent_record.is_valid_leaf_parent(&name_record) + && !parent_record.is_node_expired(current_timestamp) + { + Ok(name_record.target_address) + } else { + Err(IndexerError::NameServiceError(NameServiceError::NameExpired).into()) + } + } + + async fn resolve_name_service_names( + &self, + address: SuiAddress, + _cursor: Option, + _limit: Option, + ) -> RpcResult> { + let reverse_record_id = self + .name_service_config + .reverse_record_field_id(address.as_ref()); + + let mut result = Page { + data: vec![], + next_cursor: None, + has_next_page: false, + }; + + let Some(field_reverse_record_object) = + self.inner.get_object(&reverse_record_id, None).await? + else { + return Ok(result); + }; + + let domain = field_reverse_record_object + .to_rust::>() + .ok_or_else(|| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Malformed Object {reverse_record_id}" + )) + })? + .value; + + let domain_name = domain.to_string(); + + // Tries to resolve the name, to verify it is not expired. + let resolved_address = self + .resolve_name_service_address(domain_name.clone()) + .await?; + + // If we do not have a resolved address, we do not include the domain in the result. + if resolved_address.is_none() { + return Ok(result); + } + + // We push the domain name to the result and return it. + result.data.push(domain_name); + + Ok(result) + } +} + +impl SuiRpcModule for IndexerApi { + fn rpc(self) -> RpcModule { + self.into_rpc() + } + + fn rpc_doc_module() -> Module { + sui_json_rpc_api::IndexerApiOpenRpc::module_doc() + } +} diff --git a/crates/sui-mvr-indexer/src/apis/mod.rs b/crates/sui-mvr-indexer/src/apis/mod.rs new file mode 100644 index 0000000000000..e797c7ecdc239 --- /dev/null +++ b/crates/sui-mvr-indexer/src/apis/mod.rs @@ -0,0 +1,20 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +pub(crate) use coin_api::CoinReadApi; +pub(crate) use extended_api::ExtendedApi; +pub use governance_api::GovernanceReadApi; +pub(crate) use indexer_api::IndexerApi; +pub(crate) use move_utils::MoveUtilsApi; +pub(crate) use read_api::ReadApi; +pub(crate) use transaction_builder_api::TransactionBuilderApi; +pub(crate) use write_api::WriteApi; + +mod coin_api; +mod extended_api; +pub mod governance_api; +mod indexer_api; +mod move_utils; +pub mod read_api; +mod transaction_builder_api; +mod write_api; diff --git a/crates/sui-mvr-indexer/src/apis/move_utils.rs b/crates/sui-mvr-indexer/src/apis/move_utils.rs new file mode 100644 index 0000000000000..2bb75b9831dbf --- /dev/null +++ b/crates/sui-mvr-indexer/src/apis/move_utils.rs @@ -0,0 +1,143 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::BTreeMap; + +use async_trait::async_trait; +use jsonrpsee::core::RpcResult; +use jsonrpsee::RpcModule; +use move_binary_format::normalized::Module as NormalizedModule; + +use sui_json_rpc::error::SuiRpcInputError; +use sui_json_rpc::SuiRpcModule; +use sui_json_rpc_api::MoveUtilsServer; +use sui_json_rpc_types::ObjectValueKind; +use sui_json_rpc_types::SuiMoveNormalizedType; +use sui_json_rpc_types::{ + MoveFunctionArgType, SuiMoveNormalizedFunction, SuiMoveNormalizedModule, + SuiMoveNormalizedStruct, +}; +use sui_open_rpc::Module; +use sui_types::base_types::ObjectID; + +use crate::indexer_reader::IndexerReader; + +pub struct MoveUtilsApi { + inner: IndexerReader, +} + +impl MoveUtilsApi { + pub fn new(inner: IndexerReader) -> Self { + Self { inner } + } +} + +#[async_trait] +impl MoveUtilsServer for MoveUtilsApi { + async fn get_normalized_move_modules_by_package( + &self, + package_id: ObjectID, + ) -> RpcResult> { + let resolver_modules = self.inner.get_package(package_id).await?.modules().clone(); + let sui_normalized_modules = resolver_modules + .into_iter() + .map(|(k, v)| (k, NormalizedModule::new(v.bytecode()).into())) + .collect::>(); + Ok(sui_normalized_modules) + } + + async fn get_normalized_move_module( + &self, + package: ObjectID, + module_name: String, + ) -> RpcResult { + let mut modules = self.get_normalized_move_modules_by_package(package).await?; + let module = modules.remove(&module_name).ok_or_else(|| { + SuiRpcInputError::GenericNotFound(format!( + "No module was found with name {module_name}", + )) + })?; + Ok(module) + } + + async fn get_normalized_move_struct( + &self, + package: ObjectID, + module_name: String, + struct_name: String, + ) -> RpcResult { + let mut module = self + .get_normalized_move_module(package, module_name) + .await?; + module + .structs + .remove(&struct_name) + .ok_or_else(|| { + SuiRpcInputError::GenericNotFound(format!( + "No struct was found with struct name {struct_name}" + )) + }) + .map_err(Into::into) + } + + async fn get_normalized_move_function( + &self, + package: ObjectID, + module_name: String, + function_name: String, + ) -> RpcResult { + let mut module = self + .get_normalized_move_module(package, module_name) + .await?; + module + .exposed_functions + .remove(&function_name) + .ok_or_else(|| { + SuiRpcInputError::GenericNotFound(format!( + "No function was found with function name {function_name}", + )) + }) + .map_err(Into::into) + } + + async fn get_move_function_arg_types( + &self, + package: ObjectID, + module: String, + function: String, + ) -> RpcResult> { + let function = self + .get_normalized_move_function(package, module, function) + .await?; + let args = function + .parameters + .iter() + .map(|p| match p { + SuiMoveNormalizedType::Struct { .. } => { + MoveFunctionArgType::Object(ObjectValueKind::ByValue) + } + SuiMoveNormalizedType::Vector(_) => { + MoveFunctionArgType::Object(ObjectValueKind::ByValue) + } + SuiMoveNormalizedType::Reference(_) => { + MoveFunctionArgType::Object(ObjectValueKind::ByImmutableReference) + } + SuiMoveNormalizedType::MutableReference(_) => { + MoveFunctionArgType::Object(ObjectValueKind::ByMutableReference) + } + _ => MoveFunctionArgType::Pure, + }) + .collect::>(); + Ok(args) + } +} + +impl SuiRpcModule for MoveUtilsApi { + fn rpc(self) -> RpcModule { + self.into_rpc() + } + + fn rpc_doc_module() -> Module { + sui_json_rpc_api::MoveUtilsOpenRpc::module_doc() + } +} diff --git a/crates/sui-mvr-indexer/src/apis/read_api.rs b/crates/sui-mvr-indexer/src/apis/read_api.rs new file mode 100644 index 0000000000000..3e3de5343869d --- /dev/null +++ b/crates/sui-mvr-indexer/src/apis/read_api.rs @@ -0,0 +1,305 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use async_trait::async_trait; +use jsonrpsee::core::RpcResult; +use jsonrpsee::RpcModule; +use sui_json_rpc::error::SuiRpcInputError; +use sui_types::error::SuiObjectResponseError; +use sui_types::object::ObjectRead; + +use crate::errors::IndexerError; +use crate::indexer_reader::IndexerReader; +use sui_json_rpc::SuiRpcModule; +use sui_json_rpc_api::{ReadApiServer, QUERY_MAX_RESULT_LIMIT}; +use sui_json_rpc_types::{ + Checkpoint, CheckpointId, CheckpointPage, ProtocolConfigResponse, SuiEvent, + SuiGetPastObjectRequest, SuiObjectDataOptions, SuiObjectResponse, SuiPastObjectResponse, + SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, +}; +use sui_open_rpc::Module; +use sui_protocol_config::{ProtocolConfig, ProtocolVersion}; +use sui_types::base_types::{ObjectID, SequenceNumber}; +use sui_types::digests::{ChainIdentifier, TransactionDigest}; +use sui_types::sui_serde::BigInt; + +#[derive(Clone)] +pub struct ReadApi { + inner: IndexerReader, +} + +impl ReadApi { + pub fn new(inner: IndexerReader) -> Self { + Self { inner } + } + + async fn get_checkpoint(&self, id: CheckpointId) -> Result { + match self.inner.get_checkpoint(id).await { + Ok(Some(epoch_info)) => Ok(epoch_info), + Ok(None) => Err(IndexerError::InvalidArgumentError(format!( + "Checkpoint {id:?} not found" + ))), + Err(e) => Err(e), + } + } + + async fn get_latest_checkpoint(&self) -> Result { + self.inner.get_latest_checkpoint().await + } + + async fn get_chain_identifier(&self) -> RpcResult { + let genesis_checkpoint = self.get_checkpoint(CheckpointId::SequenceNumber(0)).await?; + Ok(ChainIdentifier::from(genesis_checkpoint.digest)) + } +} + +#[async_trait] +impl ReadApiServer for ReadApi { + async fn get_object( + &self, + object_id: ObjectID, + options: Option, + ) -> RpcResult { + let object_read = self.inner.get_object_read(object_id).await?; + object_read_to_object_response(&self.inner, object_read, options.unwrap_or_default()).await + } + + // For ease of implementation we just forward to the single object query, although in the + // future we may want to improve the performance by having a more naitive multi_get + // functionality + async fn multi_get_objects( + &self, + object_ids: Vec, + options: Option, + ) -> RpcResult> { + if object_ids.len() > *QUERY_MAX_RESULT_LIMIT { + return Err( + SuiRpcInputError::SizeLimitExceeded(QUERY_MAX_RESULT_LIMIT.to_string()).into(), + ); + } + let stored_objects = self.inner.multi_get_objects(object_ids).await?; + let options = options.unwrap_or_default(); + + let futures = stored_objects.into_iter().map(|stored_object| async { + let object_read = stored_object + .try_into_object_read(self.inner.package_resolver()) + .await?; + object_read_to_object_response(&self.inner, object_read, options.clone()).await + }); + + let mut objects = futures::future::try_join_all(futures).await?; + // Resort the objects by the order of the object id. + objects.sort_by_key(|obj| obj.data.as_ref().map(|data| data.object_id)); + + Ok(objects) + } + + async fn get_total_transaction_blocks(&self) -> RpcResult> { + let checkpoint = self.get_latest_checkpoint().await?; + Ok(BigInt::from(checkpoint.network_total_transactions)) + } + + async fn get_transaction_block( + &self, + digest: TransactionDigest, + options: Option, + ) -> RpcResult { + let mut txn = self + .multi_get_transaction_blocks(vec![digest], options) + .await?; + + let txn = txn.pop().ok_or_else(|| { + IndexerError::InvalidArgumentError(format!("Transaction {digest} not found")) + })?; + + Ok(txn) + } + + async fn multi_get_transaction_blocks( + &self, + digests: Vec, + options: Option, + ) -> RpcResult> { + let num_digests = digests.len(); + if num_digests > *QUERY_MAX_RESULT_LIMIT { + Err(SuiRpcInputError::SizeLimitExceeded( + QUERY_MAX_RESULT_LIMIT.to_string(), + ))? + } + + let options = options.unwrap_or_default(); + let txns = self + .inner + .multi_get_transaction_block_response_in_blocking_task(digests, options) + .await?; + + Ok(txns) + } + + async fn try_get_past_object( + &self, + _object_id: ObjectID, + _version: SequenceNumber, + _options: Option, + ) -> RpcResult { + Err(jsonrpsee::types::error::CallError::Custom( + jsonrpsee::types::error::ErrorCode::MethodNotFound.into(), + ) + .into()) + } + + async fn try_get_object_before_version( + &self, + _: ObjectID, + _: SequenceNumber, + ) -> RpcResult { + Err(jsonrpsee::types::error::CallError::Custom( + jsonrpsee::types::error::ErrorCode::MethodNotFound.into(), + ) + .into()) + } + + async fn try_multi_get_past_objects( + &self, + _past_objects: Vec, + _options: Option, + ) -> RpcResult> { + Err(jsonrpsee::types::error::CallError::Custom( + jsonrpsee::types::error::ErrorCode::MethodNotFound.into(), + ) + .into()) + } + + async fn get_latest_checkpoint_sequence_number(&self) -> RpcResult> { + let checkpoint = self.get_latest_checkpoint().await?; + Ok(BigInt::from(checkpoint.sequence_number)) + } + + async fn get_checkpoint(&self, id: CheckpointId) -> RpcResult { + self.get_checkpoint(id).await.map_err(Into::into) + } + + async fn get_checkpoints( + &self, + cursor: Option>, + limit: Option, + descending_order: bool, + ) -> RpcResult { + let cursor = cursor.map(BigInt::into_inner); + let limit = sui_json_rpc_api::validate_limit( + limit, + sui_json_rpc_api::QUERY_MAX_RESULT_LIMIT_CHECKPOINTS, + ) + .map_err(SuiRpcInputError::from)?; + + let mut checkpoints = self + .inner + .get_checkpoints(cursor, limit + 1, descending_order) + .await?; + + let has_next_page = checkpoints.len() > limit; + checkpoints.truncate(limit); + + let next_cursor = checkpoints.last().map(|d| d.sequence_number.into()); + + Ok(CheckpointPage { + data: checkpoints, + next_cursor, + has_next_page, + }) + } + + async fn get_checkpoints_deprecated_limit( + &self, + cursor: Option>, + limit: Option>, + descending_order: bool, + ) -> RpcResult { + self.get_checkpoints( + cursor, + limit.map(|l| l.into_inner() as usize), + descending_order, + ) + .await + } + + async fn get_events(&self, transaction_digest: TransactionDigest) -> RpcResult> { + self.inner + .get_transaction_events(transaction_digest) + .await + .map_err(Into::into) + } + + async fn get_protocol_config( + &self, + version: Option>, + ) -> RpcResult { + let chain = self.get_chain_identifier().await?.chain(); + let version = if let Some(version) = version { + (*version).into() + } else { + let latest_epoch = self.inner.get_latest_epoch_info_from_db().await?; + (latest_epoch.protocol_version as u64).into() + }; + + ProtocolConfig::get_for_version_if_supported(version, chain) + .ok_or(SuiRpcInputError::ProtocolVersionUnsupported( + ProtocolVersion::MIN.as_u64(), + ProtocolVersion::MAX.as_u64(), + )) + .map_err(Into::into) + .map(ProtocolConfigResponse::from) + } + + async fn get_chain_identifier(&self) -> RpcResult { + self.get_chain_identifier().await.map(|id| id.to_string()) + } +} + +impl SuiRpcModule for ReadApi { + fn rpc(self) -> RpcModule { + self.into_rpc() + } + + fn rpc_doc_module() -> Module { + sui_json_rpc_api::ReadApiOpenRpc::module_doc() + } +} + +async fn object_read_to_object_response( + indexer_reader: &IndexerReader, + object_read: ObjectRead, + options: SuiObjectDataOptions, +) -> RpcResult { + match object_read { + ObjectRead::NotExists(id) => Ok(SuiObjectResponse::new_with_error( + SuiObjectResponseError::NotExists { object_id: id }, + )), + ObjectRead::Exists(object_ref, o, layout) => { + let mut display_fields = None; + if options.show_display { + match indexer_reader.get_display_fields(&o, &layout).await { + Ok(rendered_fields) => display_fields = Some(rendered_fields), + Err(e) => { + return Ok(SuiObjectResponse::new( + Some((object_ref, o, layout, options, None).try_into()?), + Some(SuiObjectResponseError::DisplayError { + error: e.to_string(), + }), + )); + } + } + } + Ok(SuiObjectResponse::new_with_data( + (object_ref, o, layout, options, display_fields).try_into()?, + )) + } + ObjectRead::Deleted((object_id, version, digest)) => Ok(SuiObjectResponse::new_with_error( + SuiObjectResponseError::Deleted { + object_id, + version, + digest, + }, + )), + } +} diff --git a/crates/sui-mvr-indexer/src/apis/transaction_builder_api.rs b/crates/sui-mvr-indexer/src/apis/transaction_builder_api.rs new file mode 100644 index 0000000000000..c98ce9c371c10 --- /dev/null +++ b/crates/sui-mvr-indexer/src/apis/transaction_builder_api.rs @@ -0,0 +1,70 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use super::governance_api::GovernanceReadApi; +use crate::indexer_reader::IndexerReader; +use async_trait::async_trait; +use move_core_types::language_storage::StructTag; +use sui_json_rpc::transaction_builder_api::TransactionBuilderApi as SuiTransactionBuilderApi; +use sui_json_rpc_types::{SuiObjectDataFilter, SuiObjectDataOptions, SuiObjectResponse}; +use sui_transaction_builder::DataReader; +use sui_types::base_types::{ObjectID, ObjectInfo, SuiAddress}; +use sui_types::object::Object; + +pub(crate) struct TransactionBuilderApi { + inner: IndexerReader, +} + +impl TransactionBuilderApi { + #[allow(clippy::new_ret_no_self)] + pub fn new(inner: IndexerReader) -> SuiTransactionBuilderApi { + SuiTransactionBuilderApi::new_with_data_reader(std::sync::Arc::new(Self { inner })) + } +} + +#[async_trait] +impl DataReader for TransactionBuilderApi { + async fn get_owned_objects( + &self, + address: SuiAddress, + object_type: StructTag, + ) -> Result, anyhow::Error> { + let stored_objects = self + .inner + .get_owned_objects( + address, + Some(SuiObjectDataFilter::StructType(object_type)), + None, + 50, // Limit the number of objects returned to 50 + ) + .await?; + + stored_objects + .into_iter() + .map(|object| { + let object = Object::try_from(object)?; + let object_ref = object.compute_object_reference(); + let info = ObjectInfo::new(&object_ref, &object); + Ok(info) + }) + .collect::, _>>() + } + + async fn get_object_with_options( + &self, + object_id: ObjectID, + options: SuiObjectDataOptions, + ) -> Result { + let result = self.inner.get_object_read(object_id).await?; + Ok((result, options).try_into()?) + } + + async fn get_reference_gas_price(&self) -> Result { + let epoch_info = GovernanceReadApi::new(self.inner.clone()) + .get_epoch_info(None) + .await?; + Ok(epoch_info + .reference_gas_price + .ok_or_else(|| anyhow::anyhow!("missing latest reference_gas_price"))?) + } +} diff --git a/crates/sui-mvr-indexer/src/apis/write_api.rs b/crates/sui-mvr-indexer/src/apis/write_api.rs new file mode 100644 index 0000000000000..71a54c356635b --- /dev/null +++ b/crates/sui-mvr-indexer/src/apis/write_api.rs @@ -0,0 +1,90 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use async_trait::async_trait; +use fastcrypto::encoding::Base64; +use jsonrpsee::core::RpcResult; +use jsonrpsee::http_client::HttpClient; +use jsonrpsee::RpcModule; + +use sui_json_rpc::SuiRpcModule; +use sui_json_rpc_api::{WriteApiClient, WriteApiServer}; +use sui_json_rpc_types::{ + DevInspectArgs, DevInspectResults, DryRunTransactionBlockResponse, SuiTransactionBlockResponse, + SuiTransactionBlockResponseOptions, +}; +use sui_open_rpc::Module; +use sui_types::base_types::SuiAddress; +use sui_types::quorum_driver_types::ExecuteTransactionRequestType; +use sui_types::sui_serde::BigInt; + +use crate::types::SuiTransactionBlockResponseWithOptions; + +pub(crate) struct WriteApi { + fullnode: HttpClient, +} + +impl WriteApi { + pub fn new(fullnode_client: HttpClient) -> Self { + Self { + fullnode: fullnode_client, + } + } +} + +#[async_trait] +impl WriteApiServer for WriteApi { + async fn execute_transaction_block( + &self, + tx_bytes: Base64, + signatures: Vec, + options: Option, + request_type: Option, + ) -> RpcResult { + let sui_transaction_response = self + .fullnode + .execute_transaction_block(tx_bytes, signatures, options.clone(), request_type) + .await?; + Ok(SuiTransactionBlockResponseWithOptions { + response: sui_transaction_response, + options: options.unwrap_or_default(), + } + .into()) + } + + async fn dev_inspect_transaction_block( + &self, + sender_address: SuiAddress, + tx_bytes: Base64, + gas_price: Option>, + epoch: Option>, + additional_args: Option, + ) -> RpcResult { + self.fullnode + .dev_inspect_transaction_block( + sender_address, + tx_bytes, + gas_price, + epoch, + additional_args, + ) + .await + } + + async fn dry_run_transaction_block( + &self, + tx_bytes: Base64, + ) -> RpcResult { + self.fullnode.dry_run_transaction_block(tx_bytes).await + } +} + +impl SuiRpcModule for WriteApi { + fn rpc(self) -> RpcModule { + self.into_rpc() + } + + fn rpc_doc_module() -> Module { + sui_json_rpc_api::WriteApiOpenRpc::module_doc() + } +} diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/digest_task.rs b/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/digest_task.rs new file mode 100644 index 0000000000000..8273bcdaa3b7b --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/digest_task.rs @@ -0,0 +1,26 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::backfill::backfill_instances::ingestion_backfills::IngestionBackfillTrait; +use crate::database::ConnectionPool; +use sui_types::full_checkpoint_content::CheckpointData; +use tracing::info; + +/// Dummy backfill that only prints the sequence number and checkpoint of the digest. Intended to +/// benchmark backfill performance. +pub struct DigestBackfill; + +#[async_trait::async_trait] +impl IngestionBackfillTrait for DigestBackfill { + type ProcessedType = (); + + fn process_checkpoint(checkpoint: &CheckpointData) -> Vec { + let cp = checkpoint.checkpoint_summary.sequence_number; + let digest = checkpoint.checkpoint_summary.content_digest; + info!("{cp}: {digest}"); + + vec![] + } + + async fn commit_chunk(_pool: ConnectionPool, _processed_data: Vec) {} +} diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/ingestion_backfill_task.rs b/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/ingestion_backfill_task.rs new file mode 100644 index 0000000000000..2702f755c0842 --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/ingestion_backfill_task.rs @@ -0,0 +1,98 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::backfill::backfill_instances::ingestion_backfills::IngestionBackfillTrait; +use crate::backfill::backfill_task::BackfillTask; +use crate::database::ConnectionPool; +use dashmap::DashMap; +use std::ops::RangeInclusive; +use std::sync::Arc; +use sui_data_ingestion_core::{setup_single_workflow, ReaderOptions, Worker}; +use sui_types::full_checkpoint_content::CheckpointData; +use sui_types::messages_checkpoint::CheckpointSequenceNumber; +use tokio::sync::Notify; + +pub struct IngestionBackfillTask { + ready_checkpoints: Arc>>, + notify: Arc, + _exit_sender: tokio::sync::oneshot::Sender<()>, +} + +impl IngestionBackfillTask { + pub async fn new(remote_store_url: String, start_checkpoint: CheckpointSequenceNumber) -> Self { + let ready_checkpoints = Arc::new(DashMap::new()); + let notify = Arc::new(Notify::new()); + let adapter: Adapter = Adapter { + ready_checkpoints: ready_checkpoints.clone(), + notify: notify.clone(), + }; + let reader_options = ReaderOptions { + batch_size: 200, + ..Default::default() + }; + let (executor, _exit_sender) = setup_single_workflow( + adapter, + remote_store_url, + start_checkpoint, + 200, + Some(reader_options), + ) + .await + .unwrap(); + tokio::task::spawn(async move { + executor.await.unwrap(); + }); + Self { + ready_checkpoints, + notify, + _exit_sender, + } + } +} + +pub struct Adapter { + ready_checkpoints: Arc>>, + notify: Arc, +} + +#[async_trait::async_trait] +impl Worker for Adapter { + type Result = (); + async fn process_checkpoint(&self, checkpoint: &CheckpointData) -> anyhow::Result<()> { + let processed = T::process_checkpoint(checkpoint); + self.ready_checkpoints + .insert(checkpoint.checkpoint_summary.sequence_number, processed); + self.notify.notify_waiters(); + Ok(()) + } +} + +#[async_trait::async_trait] +impl BackfillTask for IngestionBackfillTask { + async fn backfill_range(&self, pool: ConnectionPool, range: &RangeInclusive) { + let mut processed_data = vec![]; + let mut start = *range.start(); + let end = *range.end(); + loop { + while start <= end { + if let Some((_, processed)) = self + .ready_checkpoints + .remove(&(start as CheckpointSequenceNumber)) + { + processed_data.extend(processed); + start += 1; + } else { + break; + } + } + if start <= end { + self.notify.notified().await; + } else { + break; + } + } + // TODO: Limit the size of each chunk. + // postgres has a parameter limit of 65535, meaning that row_count * col_count <= 65536. + T::commit_chunk(pool.clone(), processed_data).await; + } +} diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/mod.rs b/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/mod.rs new file mode 100644 index 0000000000000..935ba5562bd9c --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/mod.rs @@ -0,0 +1,18 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +pub(crate) mod digest_task; +pub(crate) mod ingestion_backfill_task; +pub(crate) mod raw_checkpoints; +pub(crate) mod tx_affected_objects; + +use crate::database::ConnectionPool; +use sui_types::full_checkpoint_content::CheckpointData; + +#[async_trait::async_trait] +pub trait IngestionBackfillTrait: Send + Sync { + type ProcessedType: Send + Sync; + + fn process_checkpoint(checkpoint: &CheckpointData) -> Vec; + async fn commit_chunk(pool: ConnectionPool, processed_data: Vec); +} diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/raw_checkpoints.rs b/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/raw_checkpoints.rs new file mode 100644 index 0000000000000..aec4f0263ee80 --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/raw_checkpoints.rs @@ -0,0 +1,34 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::backfill::backfill_instances::ingestion_backfills::IngestionBackfillTrait; +use crate::database::ConnectionPool; +use crate::models::raw_checkpoints::StoredRawCheckpoint; +use crate::schema::raw_checkpoints::dsl::raw_checkpoints; +use diesel_async::RunQueryDsl; +use sui_types::full_checkpoint_content::CheckpointData; + +pub struct RawCheckpointsBackFill; + +#[async_trait::async_trait] +impl IngestionBackfillTrait for RawCheckpointsBackFill { + type ProcessedType = StoredRawCheckpoint; + + fn process_checkpoint(checkpoint: &CheckpointData) -> Vec { + vec![StoredRawCheckpoint { + sequence_number: checkpoint.checkpoint_summary.sequence_number as i64, + certified_checkpoint: bcs::to_bytes(&checkpoint.checkpoint_summary).unwrap(), + checkpoint_contents: bcs::to_bytes(&checkpoint.checkpoint_contents).unwrap(), + }] + } + + async fn commit_chunk(pool: ConnectionPool, processed_data: Vec) { + let mut conn = pool.get().await.unwrap(); + diesel::insert_into(raw_checkpoints) + .values(processed_data) + .on_conflict_do_nothing() + .execute(&mut conn) + .await + .unwrap(); + } +} diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/tx_affected_objects.rs b/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/tx_affected_objects.rs new file mode 100644 index 0000000000000..4e6f6efa6a897 --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_instances/ingestion_backfills/tx_affected_objects.rs @@ -0,0 +1,48 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::backfill::backfill_instances::ingestion_backfills::IngestionBackfillTrait; +use crate::database::ConnectionPool; +use crate::models::tx_indices::StoredTxAffectedObjects; +use crate::schema::tx_affected_objects; +use diesel_async::RunQueryDsl; +use sui_types::effects::TransactionEffectsAPI; +use sui_types::full_checkpoint_content::CheckpointData; + +pub struct TxAffectedObjectsBackfill; + +#[async_trait::async_trait] +impl IngestionBackfillTrait for TxAffectedObjectsBackfill { + type ProcessedType = StoredTxAffectedObjects; + + fn process_checkpoint(checkpoint: &CheckpointData) -> Vec { + let first_tx = checkpoint.checkpoint_summary.network_total_transactions as usize + - checkpoint.transactions.len(); + + checkpoint + .transactions + .iter() + .enumerate() + .flat_map(|(i, tx)| { + tx.effects + .object_changes() + .into_iter() + .map(move |change| StoredTxAffectedObjects { + tx_sequence_number: (first_tx + i) as i64, + affected: change.id.to_vec(), + sender: tx.transaction.sender_address().to_vec(), + }) + }) + .collect() + } + + async fn commit_chunk(pool: ConnectionPool, processed_data: Vec) { + let mut conn = pool.get().await.unwrap(); + diesel::insert_into(tx_affected_objects::table) + .values(processed_data) + .on_conflict_do_nothing() + .execute(&mut conn) + .await + .unwrap(); + } +} diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_instances/mod.rs b/crates/sui-mvr-indexer/src/backfill/backfill_instances/mod.rs new file mode 100644 index 0000000000000..304ed4e715e1d --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_instances/mod.rs @@ -0,0 +1,55 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::backfill::backfill_instances::ingestion_backfills::digest_task::DigestBackfill; +use crate::backfill::backfill_instances::ingestion_backfills::ingestion_backfill_task::IngestionBackfillTask; +use crate::backfill::backfill_instances::ingestion_backfills::raw_checkpoints::RawCheckpointsBackFill; +use crate::backfill::backfill_instances::ingestion_backfills::tx_affected_objects::TxAffectedObjectsBackfill; +use crate::backfill::backfill_task::BackfillTask; +use crate::backfill::{BackfillTaskKind, IngestionBackfillKind}; +use std::sync::Arc; +use sui_types::messages_checkpoint::CheckpointSequenceNumber; + +mod ingestion_backfills; +mod sql_backfill; +mod system_state_summary_json; + +pub async fn get_backfill_task( + kind: BackfillTaskKind, + range_start: usize, +) -> Arc { + match kind { + BackfillTaskKind::SystemStateSummaryJson => { + Arc::new(system_state_summary_json::SystemStateSummaryJsonBackfill) + } + BackfillTaskKind::Sql { sql, key_column } => { + Arc::new(sql_backfill::SqlBackFill::new(sql, key_column)) + } + BackfillTaskKind::Ingestion { + kind, + remote_store_url, + } => match kind { + IngestionBackfillKind::Digest => Arc::new( + IngestionBackfillTask::::new( + remote_store_url, + range_start as CheckpointSequenceNumber, + ) + .await, + ), + IngestionBackfillKind::RawCheckpoints => Arc::new( + IngestionBackfillTask::::new( + remote_store_url, + range_start as CheckpointSequenceNumber, + ) + .await, + ), + IngestionBackfillKind::TxAffectedObjects => Arc::new( + IngestionBackfillTask::::new( + remote_store_url, + range_start as CheckpointSequenceNumber, + ) + .await, + ), + }, + } +} diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfill.rs b/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfill.rs new file mode 100644 index 0000000000000..543f077e2ba3b --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfill.rs @@ -0,0 +1,36 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::backfill::backfill_task::BackfillTask; +use crate::database::ConnectionPool; +use async_trait::async_trait; +use diesel_async::RunQueryDsl; +use std::ops::RangeInclusive; + +pub struct SqlBackFill { + sql: String, + key_column: String, +} + +impl SqlBackFill { + pub fn new(sql: String, key_column: String) -> Self { + Self { sql, key_column } + } +} + +#[async_trait] +impl BackfillTask for SqlBackFill { + async fn backfill_range(&self, pool: ConnectionPool, range: &RangeInclusive) { + let mut conn = pool.get().await.unwrap(); + + let query = format!( + "{} WHERE {} BETWEEN {} AND {} ON CONFLICT DO NOTHING", + self.sql, + self.key_column, + *range.start(), + *range.end() + ); + + diesel::sql_query(query).execute(&mut conn).await.unwrap(); + } +} diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfills/event_sender.sh b/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfills/event_sender.sh new file mode 100644 index 0000000000000..ea883107b31ca --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfills/event_sender.sh @@ -0,0 +1,6 @@ +# Copyright (c) Mysten Labs, Inc. +# SPDX-License-Identifier: Apache-2.0 + +INDEXER=${INDEXER:-"sui-mvr-indexer"} +DB=${DB:-"postgres://postgres:postgrespw@localhost:5432/postgres"} +"$INDEXER" --database-url "$DB" run-back-fill "$1" "$2" sql "UPDATE events SET sender = CASE WHEN cardinality(senders) > 0 THEN senders[1] ELSE NULL END" checkpoint_sequence_number diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfills/full_objects_history.sh b/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfills/full_objects_history.sh new file mode 100644 index 0000000000000..18a0e3b9e84de --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfills/full_objects_history.sh @@ -0,0 +1,6 @@ +# Copyright (c) Mysten Labs, Inc. +# SPDX-License-Identifier: Apache-2.0 + +INDEXER=${INDEXER:-"sui-mvr-indexer"} +DB=${DB:-"postgres://postgres:postgrespw@localhost:5432/postgres"} +"$INDEXER" --database-url "$DB" run-back-fill "$1" "$2" sql "INSERT INTO full_objects_history (object_id, object_version, serialized_object) SELECT object_id, object_version, serialized_object FROM objects_history" checkpoint_sequence_number diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfills/tx_affected_addresses.sh b/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfills/tx_affected_addresses.sh new file mode 100644 index 0000000000000..da0dc0915a0b4 --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_instances/sql_backfills/tx_affected_addresses.sh @@ -0,0 +1,7 @@ +# Copyright (c) Mysten Labs, Inc. +# SPDX-License-Identifier: Apache-2.0 + +INDEXER=${INDEXER:-"sui-mvr-indexer"} +DB=${DB:-"postgres://postgres:postgrespw@localhost:5432/postgres"} +"$INDEXER" --database-url "$DB" run-back-fill "$1" "$2" sql "INSERT INTO tx_affected_addresses SELECT tx_sequence_number, sender AS affected, sender FROM tx_senders" tx_sequence_number +"$INDEXER" --database-url "$DB" run-back-fill "$1" "$2" sql "INSERT INTO tx_affected_addresses SELECT tx_sequence_number, recipient AS affected, sender FROM tx_recipients" tx_sequence_number diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_instances/system_state_summary_json.rs b/crates/sui-mvr-indexer/src/backfill/backfill_instances/system_state_summary_json.rs new file mode 100644 index 0000000000000..912abdd871a1c --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_instances/system_state_summary_json.rs @@ -0,0 +1,56 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::backfill::backfill_task::BackfillTask; +use crate::database::ConnectionPool; +use crate::schema::epochs; +use async_trait::async_trait; +use diesel::{ExpressionMethods, QueryDsl}; +use diesel_async::{AsyncConnection, RunQueryDsl}; +use std::ops::RangeInclusive; +use sui_types::sui_system_state::sui_system_state_summary::SuiSystemStateSummary; + +pub struct SystemStateSummaryJsonBackfill; + +#[async_trait] +impl BackfillTask for SystemStateSummaryJsonBackfill { + async fn backfill_range(&self, pool: ConnectionPool, range: &RangeInclusive) { + let mut conn = pool.get().await.unwrap(); + + let results: Vec>> = epochs::table + .select(epochs::system_state) + .filter(epochs::epoch.between(*range.start() as i64, *range.end() as i64)) + .load(&mut conn) + .await + .unwrap(); + + let mut system_states = vec![]; + for bytes in results { + let Some(bytes) = bytes else { + continue; + }; + let system_state_summary: SuiSystemStateSummary = bcs::from_bytes(&bytes).unwrap(); + let json_ser = serde_json::to_value(&system_state_summary).unwrap(); + if system_state_summary.epoch == 1 { + // Each existing system state's epoch is off by 1. + // This means there won't be any row with a system state summary for epoch 0. + // We need to manually insert a row for epoch 0. + system_states.push((0, json_ser.clone())); + } + system_states.push((system_state_summary.epoch, json_ser)); + } + conn.transaction::<_, diesel::result::Error, _>(|conn| { + Box::pin(async move { + for (epoch, json_ser) in system_states { + diesel::update(epochs::table.filter(epochs::epoch.eq(epoch as i64))) + .set(epochs::system_state_summary_json.eq(Some(json_ser))) + .execute(conn) + .await?; + } + Ok(()) + }) + }) + .await + .unwrap(); + } +} diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_runner.rs b/crates/sui-mvr-indexer/src/backfill/backfill_runner.rs new file mode 100644 index 0000000000000..3126dc90fe35f --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_runner.rs @@ -0,0 +1,94 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::backfill::backfill_instances::get_backfill_task; +use crate::backfill::backfill_task::BackfillTask; +use crate::backfill::BackfillTaskKind; +use crate::config::BackFillConfig; +use crate::database::ConnectionPool; +use futures::StreamExt; +use std::collections::BTreeSet; +use std::ops::RangeInclusive; +use std::sync::Arc; +use std::time::Instant; +use tokio::sync::{mpsc, Mutex}; +use tokio_stream::wrappers::ReceiverStream; + +pub struct BackfillRunner {} + +impl BackfillRunner { + pub async fn run( + runner_kind: BackfillTaskKind, + pool: ConnectionPool, + backfill_config: BackFillConfig, + total_range: RangeInclusive, + ) { + let task = get_backfill_task(runner_kind, *total_range.start()).await; + Self::run_impl(pool, backfill_config, total_range, task).await; + } + + /// Main function to run the parallel queries and batch processing. + async fn run_impl( + pool: ConnectionPool, + config: BackFillConfig, + total_range: RangeInclusive, + task: Arc, + ) { + let cur_time = Instant::now(); + // Keeps track of the checkpoint ranges (using starting checkpoint number) + // that are in progress. + let in_progress = Arc::new(Mutex::new(BTreeSet::new())); + + let concurrency = config.max_concurrency; + let (tx, rx) = mpsc::channel(concurrency * 10); + // Spawn a task to send chunks lazily over the channel + tokio::spawn(async move { + for chunk in create_chunk_iter(total_range, config.chunk_size) { + if tx.send(chunk).await.is_err() { + // Channel closed, stop producing chunks + break; + } + } + }); + // Convert the receiver into a stream + let stream = ReceiverStream::new(rx); + + // Process chunks in parallel, limiting the number of concurrent query tasks + stream + .for_each_concurrent(concurrency, move |range| { + let pool_clone = pool.clone(); + let in_progress_clone = in_progress.clone(); + let task = task.clone(); + + async move { + in_progress_clone.lock().await.insert(*range.start()); + task.backfill_range(pool_clone, &range).await; + println!("Finished range: {:?}.", range); + in_progress_clone.lock().await.remove(range.start()); + let cur_min_in_progress = in_progress_clone.lock().await.iter().next().cloned(); + if let Some(cur_min_in_progress) = cur_min_in_progress { + println!( + "Average backfill speed: {} checkpoints/s. Minimum range start number still in progress: {:?}.", + cur_min_in_progress as f64 / cur_time.elapsed().as_secs_f64(), + cur_min_in_progress + ); + } + } + }) + .await; + + println!("Finished backfilling in {:?}", cur_time.elapsed()); + } +} + +/// Creates chunks based on the total range and chunk size. +fn create_chunk_iter( + total_range: RangeInclusive, + chunk_size: usize, +) -> impl Iterator> { + let end = *total_range.end(); + total_range.step_by(chunk_size).map(move |chunk_start| { + let chunk_end = std::cmp::min(chunk_start + chunk_size - 1, end); + chunk_start..=chunk_end + }) +} diff --git a/crates/sui-mvr-indexer/src/backfill/backfill_task.rs b/crates/sui-mvr-indexer/src/backfill/backfill_task.rs new file mode 100644 index 0000000000000..008bfa5b482c0 --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/backfill_task.rs @@ -0,0 +1,12 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::database::ConnectionPool; +use async_trait::async_trait; +use std::ops::RangeInclusive; + +#[async_trait] +pub trait BackfillTask: Send + Sync { + /// Backfill the database for a specific range. + async fn backfill_range(&self, pool: ConnectionPool, range: &RangeInclusive); +} diff --git a/crates/sui-mvr-indexer/src/backfill/mod.rs b/crates/sui-mvr-indexer/src/backfill/mod.rs new file mode 100644 index 0000000000000..e17ba40628ef1 --- /dev/null +++ b/crates/sui-mvr-indexer/src/backfill/mod.rs @@ -0,0 +1,35 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use clap::{Subcommand, ValueEnum}; + +pub mod backfill_instances; +pub mod backfill_runner; +pub mod backfill_task; + +#[derive(Subcommand, Clone, Debug)] +pub enum BackfillTaskKind { + SystemStateSummaryJson, + /// \sql is the SQL string to run, appended with the range between the start and end, + /// as well as conflict resolution (see sql_backfill.rs). + /// \key_column is the primary key column to use for the range. + Sql { + sql: String, + key_column: String, + }, + /// Starts a backfill pipeline from the ingestion engine. + /// \remote_store_url is the URL of the remote store to ingest from. + /// Any `IngestionBackfillKind` will need to map to a type that + /// implements `IngestionBackfillTrait`. + Ingestion { + kind: IngestionBackfillKind, + remote_store_url: String, + }, +} + +#[derive(ValueEnum, Clone, Debug)] +pub enum IngestionBackfillKind { + Digest, + RawCheckpoints, + TxAffectedObjects, +} diff --git a/crates/sui-mvr-indexer/src/benchmark.rs b/crates/sui-mvr-indexer/src/benchmark.rs new file mode 100644 index 0000000000000..96df25cba9fa6 --- /dev/null +++ b/crates/sui-mvr-indexer/src/benchmark.rs @@ -0,0 +1,130 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::config::{BenchmarkConfig, IngestionConfig, IngestionSources, UploadOptions}; +use crate::database::ConnectionPool; +use crate::db::{reset_database, run_migrations}; +use crate::errors::IndexerError; +use crate::indexer::Indexer; +use crate::metrics::IndexerMetrics; +use crate::store::PgIndexerStore; +use std::path::PathBuf; +use sui_synthetic_ingestion::benchmark::{run_benchmark, BenchmarkableIndexer}; +use sui_synthetic_ingestion::{IndexerProgress, SyntheticIngestionConfig}; +use tokio::sync::watch; +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; + +pub async fn run_indexer_benchmark( + config: BenchmarkConfig, + pool: ConnectionPool, + metrics: IndexerMetrics, +) { + if config.reset_db { + reset_database(pool.dedicated_connection().await.unwrap()) + .await + .unwrap(); + } else { + run_migrations(pool.dedicated_connection().await.unwrap()) + .await + .unwrap(); + } + let store = PgIndexerStore::new(pool, UploadOptions::default(), metrics.clone()); + let ingestion_dir = config + .workload_dir + .clone() + .unwrap_or_else(|| tempfile::tempdir().unwrap().into_path()); + // If we are using a non-temp directory, we should not delete the ingestion directory. + let gc_checkpoint_files = config.workload_dir.is_none(); + let synthetic_ingestion_config = SyntheticIngestionConfig { + ingestion_dir: ingestion_dir.clone(), + checkpoint_size: config.checkpoint_size, + num_checkpoints: config.num_checkpoints, + starting_checkpoint: config.starting_checkpoint, + }; + let indexer = BenchmarkIndexer::new(store, metrics, ingestion_dir, gc_checkpoint_files); + run_benchmark(synthetic_ingestion_config, indexer).await; +} + +pub struct BenchmarkIndexer { + inner: Option, + cancel: CancellationToken, + committed_checkpoints_rx: watch::Receiver>, + handle: Option>>, +} + +struct BenchmarkIndexerInner { + ingestion_dir: PathBuf, + gc_checkpoint_files: bool, + store: PgIndexerStore, + metrics: IndexerMetrics, + committed_checkpoints_tx: watch::Sender>, +} + +impl BenchmarkIndexer { + pub fn new( + store: PgIndexerStore, + metrics: IndexerMetrics, + ingestion_dir: PathBuf, + gc_checkpoint_files: bool, + ) -> Self { + let cancel = CancellationToken::new(); + let (committed_checkpoints_tx, committed_checkpoints_rx) = watch::channel(None); + Self { + inner: Some(BenchmarkIndexerInner { + ingestion_dir, + gc_checkpoint_files, + store, + metrics, + committed_checkpoints_tx, + }), + cancel, + committed_checkpoints_rx, + handle: None, + } + } +} + +#[async_trait::async_trait] +impl BenchmarkableIndexer for BenchmarkIndexer { + fn subscribe_to_committed_checkpoints(&self) -> watch::Receiver> { + self.committed_checkpoints_rx.clone() + } + + async fn start(&mut self) { + let BenchmarkIndexerInner { + ingestion_dir, + gc_checkpoint_files, + store, + metrics, + committed_checkpoints_tx, + } = self.inner.take().unwrap(); + let ingestion_config = IngestionConfig { + sources: IngestionSources { + data_ingestion_path: Some(ingestion_dir), + ..Default::default() + }, + gc_checkpoint_files, + ..Default::default() + }; + let cancel = self.cancel.clone(); + let handle = tokio::task::spawn(async move { + Indexer::start_writer( + ingestion_config, + store, + metrics, + Default::default(), + None, + cancel, + Some(committed_checkpoints_tx), + ) + .await + }); + self.handle = Some(handle); + } + + async fn stop(mut self) { + self.cancel.cancel(); + self.handle.unwrap().await.unwrap().unwrap(); + } +} diff --git a/crates/sui-mvr-indexer/src/config.rs b/crates/sui-mvr-indexer/src/config.rs new file mode 100644 index 0000000000000..6db349aa64747 --- /dev/null +++ b/crates/sui-mvr-indexer/src/config.rs @@ -0,0 +1,633 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::db::ConnectionPoolConfig; +use crate::{backfill::BackfillTaskKind, handlers::pruner::PrunableTable}; +use clap::{Args, Parser, Subcommand}; +use serde::{Deserialize, Serialize}; +use std::{collections::HashMap, net::SocketAddr, path::PathBuf}; +use strum::IntoEnumIterator; +use sui_json_rpc::name_service::NameServiceConfig; +use sui_types::base_types::{ObjectID, SuiAddress}; +use url::Url; + +/// The primary purpose of objects_history is to serve consistency query. +/// A short retention is sufficient. +const OBJECTS_HISTORY_EPOCHS_TO_KEEP: u64 = 2; + +#[derive(Parser, Clone, Debug)] +#[clap( + name = "Sui indexer", + about = "An off-fullnode service serving data from Sui protocol" +)] +pub struct IndexerConfig { + #[clap(long, alias = "db-url")] + pub database_url: Url, + + #[clap(flatten)] + pub connection_pool_config: ConnectionPoolConfig, + + #[clap(long, default_value = "0.0.0.0:9184")] + pub metrics_address: SocketAddr, + + #[command(subcommand)] + pub command: Command, +} + +#[derive(Args, Debug, Clone)] +pub struct NameServiceOptions { + #[arg(default_value_t = NameServiceConfig::default().package_address)] + #[arg(long = "name-service-package-address")] + pub package_address: SuiAddress, + #[arg(default_value_t = NameServiceConfig::default().registry_id)] + #[arg(long = "name-service-registry-id")] + pub registry_id: ObjectID, + #[arg(default_value_t = NameServiceConfig::default().reverse_registry_id)] + #[arg(long = "name-service-reverse-registry-id")] + pub reverse_registry_id: ObjectID, +} + +impl NameServiceOptions { + pub fn to_config(&self) -> NameServiceConfig { + let Self { + package_address, + registry_id, + reverse_registry_id, + } = self.clone(); + NameServiceConfig { + package_address, + registry_id, + reverse_registry_id, + } + } +} + +impl Default for NameServiceOptions { + fn default() -> Self { + let NameServiceConfig { + package_address, + registry_id, + reverse_registry_id, + } = NameServiceConfig::default(); + Self { + package_address, + registry_id, + reverse_registry_id, + } + } +} + +#[derive(Args, Debug, Clone)] +pub struct JsonRpcConfig { + #[command(flatten)] + pub name_service_options: NameServiceOptions, + + #[clap(long, default_value = "0.0.0.0:9000")] + pub rpc_address: SocketAddr, + + #[clap(long)] + pub rpc_client_url: String, +} + +#[derive(Args, Debug, Default, Clone)] +#[group(required = true, multiple = true)] +pub struct IngestionSources { + #[arg(long)] + pub data_ingestion_path: Option, + + #[arg(long)] + pub remote_store_url: Option, + + #[arg(long)] + pub rpc_client_url: Option, +} + +#[derive(Args, Debug, Clone)] +pub struct IngestionConfig { + #[clap(flatten)] + pub sources: IngestionSources, + + #[arg( + long, + default_value_t = Self::DEFAULT_CHECKPOINT_DOWNLOAD_QUEUE_SIZE, + env = "DOWNLOAD_QUEUE_SIZE", + )] + pub checkpoint_download_queue_size: usize, + + /// Start checkpoint to ingest from, this is optional and if not provided, the ingestion will + /// start from the next checkpoint after the latest committed checkpoint. + #[arg(long, env = "START_CHECKPOINT")] + pub start_checkpoint: Option, + + /// End checkpoint to ingest until, this is optional and if not provided, the ingestion will + /// continue until u64::MAX. + #[arg(long, env = "END_CHECKPOINT")] + pub end_checkpoint: Option, + + #[arg( + long, + default_value_t = Self::DEFAULT_CHECKPOINT_DOWNLOAD_TIMEOUT, + env = "INGESTION_READER_TIMEOUT_SECS", + )] + pub checkpoint_download_timeout: u64, + + /// Limit indexing parallelism on big checkpoints to avoid OOMing by limiting the total size of + /// the checkpoint download queue. + #[arg( + long, + default_value_t = Self::DEFAULT_CHECKPOINT_DOWNLOAD_QUEUE_SIZE_BYTES, + env = "CHECKPOINT_PROCESSING_BATCH_DATA_LIMIT", + )] + pub checkpoint_download_queue_size_bytes: usize, + + /// Whether to delete processed checkpoint files from the local directory, + /// when running Fullnode-colocated indexer. + #[arg(long, default_value_t = true)] + pub gc_checkpoint_files: bool, +} + +impl IngestionConfig { + const DEFAULT_CHECKPOINT_DOWNLOAD_QUEUE_SIZE: usize = 200; + const DEFAULT_CHECKPOINT_DOWNLOAD_QUEUE_SIZE_BYTES: usize = 20_000_000; + const DEFAULT_CHECKPOINT_DOWNLOAD_TIMEOUT: u64 = 20; +} + +impl Default for IngestionConfig { + fn default() -> Self { + Self { + sources: Default::default(), + start_checkpoint: None, + end_checkpoint: None, + checkpoint_download_queue_size: Self::DEFAULT_CHECKPOINT_DOWNLOAD_QUEUE_SIZE, + checkpoint_download_timeout: Self::DEFAULT_CHECKPOINT_DOWNLOAD_TIMEOUT, + checkpoint_download_queue_size_bytes: + Self::DEFAULT_CHECKPOINT_DOWNLOAD_QUEUE_SIZE_BYTES, + gc_checkpoint_files: true, + } + } +} + +#[derive(Args, Debug, Clone)] +pub struct BackFillConfig { + /// Maximum number of concurrent tasks to run. + #[arg( + long, + default_value_t = Self::DEFAULT_MAX_CONCURRENCY, + )] + pub max_concurrency: usize, + /// Number of checkpoints to backfill in a single SQL command. + #[arg( + long, + default_value_t = Self::DEFAULT_CHUNK_SIZE, + )] + pub chunk_size: usize, +} + +impl BackFillConfig { + const DEFAULT_MAX_CONCURRENCY: usize = 10; + const DEFAULT_CHUNK_SIZE: usize = 1000; +} + +#[derive(Subcommand, Clone, Debug)] +pub enum Command { + Indexer { + #[command(flatten)] + ingestion_config: IngestionConfig, + #[command(flatten)] + snapshot_config: SnapshotLagConfig, + #[command(flatten)] + pruning_options: PruningOptions, + #[command(flatten)] + upload_options: UploadOptions, + }, + JsonRpcService(JsonRpcConfig), + ResetDatabase { + #[clap(long)] + force: bool, + /// If true, only drop all tables but do not run the migrations. + /// That is, no tables will exist in the DB after the reset. + #[clap(long, default_value_t = false)] + skip_migrations: bool, + }, + /// Run through the migration scripts. + RunMigrations, + /// Backfill DB tables for some ID range [\start, \end]. + /// The tool will automatically slice it into smaller ranges and for each range, + /// it first makes a read query to the DB to get data needed for backfil if needed, + /// which then can be processed and written back to the DB. + /// To add a new backfill, add a new module and implement the `BackfillTask` trait. + /// full_objects_history.rs provides an example to do SQL-only backfills. + /// system_state_summary_json.rs provides an example to do SQL + processing backfills. + RunBackFill { + /// Start of the range to backfill, inclusive. + /// It can be a checkpoint number or an epoch or any other identifier that can be used to + /// slice the backfill range. + start: usize, + /// End of the range to backfill, inclusive. + end: usize, + #[clap(subcommand)] + runner_kind: BackfillTaskKind, + #[command(flatten)] + backfill_config: BackFillConfig, + }, + /// Restore the database from formal snaphots. + Restore(RestoreConfig), + Benchmark(BenchmarkConfig), +} + +#[derive(Args, Default, Debug, Clone)] +pub struct PruningOptions { + /// Path to TOML file containing configuration for retention policies. + #[arg(long)] + pub pruning_config_path: Option, +} + +/// Represents the default retention policy and overrides for prunable tables. Instantiated only if +/// `PruningOptions` is provided on indexer start. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RetentionConfig { + /// Default retention policy for all tables. + pub epochs_to_keep: u64, + /// A map of tables to their respective retention policies that will override the default. + /// Prunable tables not named here will use the default retention policy. + #[serde(default)] + pub overrides: HashMap, +} + +impl PruningOptions { + /// Load default retention policy and overrides from file. + pub fn load_from_file(&self) -> Option { + let config_path = self.pruning_config_path.as_ref()?; + + let contents = std::fs::read_to_string(config_path) + .expect("Failed to read default retention policy and overrides from file"); + let retention_with_overrides = toml::de::from_str::(&contents) + .expect("Failed to parse into RetentionConfig struct"); + + let default_retention = retention_with_overrides.epochs_to_keep; + + assert!( + default_retention > 0, + "Default retention must be greater than 0" + ); + assert!( + retention_with_overrides + .overrides + .values() + .all(|&policy| policy > 0), + "All retention overrides must be greater than 0" + ); + + Some(retention_with_overrides) + } +} + +impl RetentionConfig { + /// Create a new `RetentionConfig` with the specified default retention and overrides. Call + /// `finalize()` on the instance to update the `policies` field with the default retention + /// policy for all tables that do not have an override specified. + pub fn new(epochs_to_keep: u64, overrides: HashMap) -> Self { + Self { + epochs_to_keep, + overrides, + } + } + + pub fn new_with_default_retention_only_for_testing(epochs_to_keep: u64) -> Self { + let mut overrides = HashMap::new(); + overrides.insert( + PrunableTable::ObjectsHistory, + OBJECTS_HISTORY_EPOCHS_TO_KEEP, + ); + + Self::new(epochs_to_keep, HashMap::new()) + } + + /// Consumes this struct to produce a full mapping of every prunable table and its retention + /// policy. By default, every prunable table will have the default retention policy from + /// `epochs_to_keep`. Some tables like `objects_history` will observe a different default + /// retention policy. These default values are overridden by any entries in `overrides`. + pub fn retention_policies(self) -> HashMap { + let RetentionConfig { + epochs_to_keep, + mut overrides, + } = self; + + for table in PrunableTable::iter() { + let default_retention = match table { + PrunableTable::ObjectsHistory => OBJECTS_HISTORY_EPOCHS_TO_KEEP, + _ => epochs_to_keep, + }; + + overrides.entry(table).or_insert(default_retention); + } + + overrides + } +} + +#[derive(Args, Debug, Clone)] +pub struct SnapshotLagConfig { + #[arg( + long = "objects-snapshot-min-checkpoint-lag", + default_value_t = Self::DEFAULT_MIN_LAG, + env = "OBJECTS_SNAPSHOT_MIN_CHECKPOINT_LAG", + )] + pub snapshot_min_lag: usize, + + #[arg( + long = "objects-snapshot-sleep-duration", + default_value_t = Self::DEFAULT_SLEEP_DURATION_SEC, + )] + pub sleep_duration: u64, +} + +impl SnapshotLagConfig { + const DEFAULT_MIN_LAG: usize = 300; + const DEFAULT_SLEEP_DURATION_SEC: u64 = 5; +} + +impl Default for SnapshotLagConfig { + fn default() -> Self { + SnapshotLagConfig { + snapshot_min_lag: Self::DEFAULT_MIN_LAG, + sleep_duration: Self::DEFAULT_SLEEP_DURATION_SEC, + } + } +} + +#[derive(Args, Debug, Clone, Default)] +pub struct UploadOptions { + #[arg(long, env = "GCS_DISPLAY_BUCKET")] + pub gcs_display_bucket: Option, + #[arg(long, env = "GCS_CRED_PATH")] + pub gcs_cred_path: Option, +} + +#[derive(Args, Debug, Clone)] +pub struct RestoreConfig { + #[arg(long, env = "START_EPOCH", required = true)] + pub start_epoch: u64, + #[arg(long, env = "SNAPSHOT_ENDPOINT")] + pub snapshot_endpoint: String, + #[arg(long, env = "SNAPSHOT_BUCKET")] + pub snapshot_bucket: String, + #[arg(long, env = "SNAPSHOT_DOWNLOAD_DIR", required = true)] + pub snapshot_download_dir: String, + + #[arg(long, env = "GCS_ARCHIVE_BUCKET")] + pub gcs_archive_bucket: String, + #[arg(long, env = "GCS_DISPLAY_BUCKET")] + pub gcs_display_bucket: String, + + #[arg(env = "OBJECT_STORE_CONCURRENT_LIMIT")] + pub object_store_concurrent_limit: usize, + #[arg(env = "OBJECT_STORE_MAX_TIMEOUT_SECS")] + pub object_store_max_timeout_secs: u64, +} + +impl Default for RestoreConfig { + fn default() -> Self { + Self { + start_epoch: 0, // not used b/c it's required + snapshot_endpoint: "https://formal-snapshot.mainnet.sui.io".to_string(), + snapshot_bucket: "mysten-mainnet-formal".to_string(), + snapshot_download_dir: "".to_string(), // not used b/c it's required + gcs_archive_bucket: "mysten-mainnet-archives".to_string(), + gcs_display_bucket: "mysten-mainnet-display-table".to_string(), + object_store_concurrent_limit: 50, + object_store_max_timeout_secs: 512, + } + } +} + +#[derive(Args, Debug, Clone)] +pub struct BenchmarkConfig { + #[arg( + long, + default_value_t = 200, + help = "Number of transactions in a checkpoint." + )] + pub checkpoint_size: u64, + #[arg( + long, + default_value_t = 2000, + help = "Total number of synthetic checkpoints to generate." + )] + pub num_checkpoints: u64, + #[arg( + long, + default_value_t = 1, + help = "Customize the first checkpoint sequence number to be committed, must be non-zero." + )] + pub starting_checkpoint: u64, + #[arg( + long, + default_value_t = false, + help = "Whether to reset the database before running." + )] + pub reset_db: bool, + #[arg( + long, + help = "Path to workload directory. If not provided, a temporary directory will be created.\ + If provided, synthetic workload generator will either load data from it if it exists or generate new data.\ + This avoids repeat generation of the same data." + )] + pub workload_dir: Option, +} + +#[cfg(test)] +mod test { + use super::*; + use std::io::Write; + use tap::Pipe; + use tempfile::NamedTempFile; + + fn parse_args<'a, T>(args: impl IntoIterator) -> Result + where + T: clap::Args + clap::FromArgMatches, + { + clap::Command::new("test") + .no_binary_name(true) + .pipe(T::augment_args) + .try_get_matches_from(args) + .and_then(|matches| T::from_arg_matches(&matches)) + } + + #[test] + fn name_service() { + parse_args::(["--name-service-registry-id=0x1"]).unwrap(); + parse_args::([ + "--name-service-package-address", + "0x0000000000000000000000000000000000000000000000000000000000000001", + ]) + .unwrap(); + parse_args::(["--name-service-reverse-registry-id=0x1"]).unwrap(); + parse_args::([ + "--name-service-registry-id=0x1", + "--name-service-package-address", + "0x0000000000000000000000000000000000000000000000000000000000000002", + "--name-service-reverse-registry-id=0x3", + ]) + .unwrap(); + parse_args::([]).unwrap(); + } + + #[test] + fn ingestion_sources() { + parse_args::(["--data-ingestion-path=/tmp/foo"]).unwrap(); + parse_args::(["--remote-store-url=http://example.com"]).unwrap(); + parse_args::(["--rpc-client-url=http://example.com"]).unwrap(); + + parse_args::([ + "--data-ingestion-path=/tmp/foo", + "--remote-store-url=http://example.com", + "--rpc-client-url=http://example.com", + ]) + .unwrap(); + + // At least one must be present + parse_args::([]).unwrap_err(); + } + + #[test] + fn json_rpc_config() { + parse_args::(["--rpc-client-url=http://example.com"]).unwrap(); + + // Can include name service options and bind address + parse_args::([ + "--rpc-address=127.0.0.1:8080", + "--name-service-registry-id=0x1", + "--rpc-client-url=http://example.com", + ]) + .unwrap(); + + // fullnode rpc url must be present + parse_args::([]).unwrap_err(); + } + + #[test] + fn pruning_options_with_objects_history_override() { + let mut temp_file = NamedTempFile::new().unwrap(); + let toml_content = r#" + epochs_to_keep = 5 + + [overrides] + objects_history = 10 + transactions = 20 + "#; + temp_file.write_all(toml_content.as_bytes()).unwrap(); + let temp_path: PathBuf = temp_file.path().to_path_buf(); + let pruning_options = PruningOptions { + pruning_config_path: Some(temp_path.clone()), + }; + let retention_config = pruning_options.load_from_file().unwrap(); + + // Assert the parsed values + assert_eq!(retention_config.epochs_to_keep, 5); + assert_eq!( + retention_config + .overrides + .get(&PrunableTable::ObjectsHistory) + .copied(), + Some(10) + ); + assert_eq!( + retention_config + .overrides + .get(&PrunableTable::Transactions) + .copied(), + Some(20) + ); + assert_eq!(retention_config.overrides.len(), 2); + + let retention_policies = retention_config.retention_policies(); + + for table in PrunableTable::iter() { + let Some(retention) = retention_policies.get(&table).copied() else { + panic!("Expected a retention policy for table {:?}", table); + }; + + match table { + PrunableTable::ObjectsHistory => assert_eq!(retention, 10), + PrunableTable::Transactions => assert_eq!(retention, 20), + _ => assert_eq!(retention, 5), + }; + } + } + + #[test] + fn pruning_options_no_objects_history_override() { + let mut temp_file = NamedTempFile::new().unwrap(); + let toml_content = r#" + epochs_to_keep = 5 + + [overrides] + tx_affected_addresses = 10 + transactions = 20 + "#; + temp_file.write_all(toml_content.as_bytes()).unwrap(); + let temp_path: PathBuf = temp_file.path().to_path_buf(); + let pruning_options = PruningOptions { + pruning_config_path: Some(temp_path.clone()), + }; + let retention_config = pruning_options.load_from_file().unwrap(); + + // Assert the parsed values + assert_eq!(retention_config.epochs_to_keep, 5); + assert_eq!( + retention_config + .overrides + .get(&PrunableTable::TxAffectedAddresses) + .copied(), + Some(10) + ); + assert_eq!( + retention_config + .overrides + .get(&PrunableTable::Transactions) + .copied(), + Some(20) + ); + assert_eq!(retention_config.overrides.len(), 2); + + let retention_policies = retention_config.retention_policies(); + + for table in PrunableTable::iter() { + let Some(retention) = retention_policies.get(&table).copied() else { + panic!("Expected a retention policy for table {:?}", table); + }; + + match table { + PrunableTable::ObjectsHistory => { + assert_eq!(retention, OBJECTS_HISTORY_EPOCHS_TO_KEEP) + } + PrunableTable::TxAffectedAddresses => assert_eq!(retention, 10), + PrunableTable::Transactions => assert_eq!(retention, 20), + _ => assert_eq!(retention, 5), + }; + } + } + + #[test] + fn test_invalid_pruning_config_file() { + let toml_str = r#" + epochs_to_keep = 5 + + [overrides] + objects_history = 10 + transactions = 20 + invalid_table = 30 + "#; + + let result = toml::from_str::(toml_str); + assert!(result.is_err(), "Expected an error, but parsing succeeded"); + + if let Err(e) = result { + assert!( + e.to_string().contains("unknown variant `invalid_table`"), + "Error message doesn't mention the invalid table" + ); + } + } +} diff --git a/crates/sui-mvr-indexer/src/database.rs b/crates/sui-mvr-indexer/src/database.rs new file mode 100644 index 0000000000000..9c1446ff9c8ed --- /dev/null +++ b/crates/sui-mvr-indexer/src/database.rs @@ -0,0 +1,161 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::sync::Arc; + +use diesel::prelude::ConnectionError; +use diesel_async::pooled_connection::bb8::Pool; +use diesel_async::pooled_connection::bb8::PooledConnection; +use diesel_async::pooled_connection::bb8::RunError; +use diesel_async::pooled_connection::AsyncDieselConnectionManager; +use diesel_async::pooled_connection::PoolError; +use diesel_async::RunQueryDsl; +use diesel_async::{AsyncConnection, AsyncPgConnection}; +use futures::FutureExt; +use url::Url; + +use crate::db::ConnectionConfig; +use crate::db::ConnectionPoolConfig; + +#[derive(Clone, Debug)] +pub struct ConnectionPool { + database_url: Arc, + pool: Pool, +} + +impl ConnectionPool { + pub async fn new(database_url: Url, config: ConnectionPoolConfig) -> Result { + let database_url = Arc::new(database_url); + let connection_config = config.connection_config(); + let mut manager_config = diesel_async::pooled_connection::ManagerConfig::default(); + manager_config.custom_setup = + Box::new(move |url| establish_connection(url, connection_config).boxed()); + let manager = + AsyncDieselConnectionManager::new_with_config(database_url.as_str(), manager_config); + + Pool::builder() + .max_size(config.pool_size) + .connection_timeout(config.connection_timeout) + .build(manager) + .await + .map(|pool| Self { database_url, pool }) + } + + /// Retrieves a connection from the pool. + pub async fn get(&self) -> Result, RunError> { + self.pool.get().await.map(Connection::PooledConnection) + } + + /// Get a new dedicated connection that will not be managed by the pool. + /// An application may want a persistent connection (e.g. to do a + /// postgres LISTEN) that will not be closed or repurposed by the pool. + /// + /// This method allows reusing the manager's configuration but otherwise + /// bypassing the pool + pub async fn dedicated_connection(&self) -> Result, PoolError> { + self.pool + .dedicated_connection() + .await + .map(Connection::Dedicated) + } + + /// Returns information about the current state of the pool. + pub fn state(&self) -> bb8::State { + self.pool.state() + } + + /// Returns the database url that this pool is configured with + pub fn url(&self) -> &Url { + &self.database_url + } +} + +pub enum Connection<'a> { + PooledConnection(PooledConnection<'a, AsyncPgConnection>), + Dedicated(AsyncPgConnection), +} + +impl Connection<'static> { + pub async fn dedicated(database_url: &Url) -> Result { + AsyncPgConnection::establish(database_url.as_str()) + .await + .map(Connection::Dedicated) + } + + /// Run the provided Migrations + pub async fn run_pending_migrations( + self, + migrations: M, + ) -> diesel::migration::Result>> + where + M: diesel::migration::MigrationSource + Send + 'static, + { + use diesel::migration::MigrationVersion; + use diesel_migrations::MigrationHarness; + + let mut connection = + diesel_async::async_connection_wrapper::AsyncConnectionWrapper::::from(self); + + tokio::task::spawn_blocking(move || { + connection + .run_pending_migrations(migrations) + .map(|versions| versions.iter().map(MigrationVersion::as_owned).collect()) + }) + .await + .unwrap() + } +} + +impl<'a> std::ops::Deref for Connection<'a> { + type Target = AsyncPgConnection; + + fn deref(&self) -> &Self::Target { + match self { + Connection::PooledConnection(pooled) => pooled.deref(), + Connection::Dedicated(dedicated) => dedicated, + } + } +} + +impl<'a> std::ops::DerefMut for Connection<'a> { + fn deref_mut(&mut self) -> &mut AsyncPgConnection { + match self { + Connection::PooledConnection(pooled) => pooled.deref_mut(), + Connection::Dedicated(dedicated) => dedicated, + } + } +} + +impl ConnectionConfig { + async fn apply(&self, connection: &mut AsyncPgConnection) -> Result<(), diesel::result::Error> { + diesel::sql_query(format!( + "SET statement_timeout = {}", + self.statement_timeout.as_millis(), + )) + .execute(connection) + .await?; + + if self.read_only { + diesel::sql_query("SET default_transaction_read_only = 'on'") + .execute(connection) + .await?; + } + + Ok(()) + } +} + +/// Function used by the Connection Pool Manager to establish and setup new connections +async fn establish_connection( + url: &str, + config: ConnectionConfig, +) -> Result { + let mut connection = AsyncPgConnection::establish(url).await?; + + config + .apply(&mut connection) + .await + .map_err(ConnectionError::CouldntSetupConfiguration)?; + + Ok(connection) +} diff --git a/crates/sui-mvr-indexer/src/db.rs b/crates/sui-mvr-indexer/src/db.rs new file mode 100644 index 0000000000000..4a2893603bb10 --- /dev/null +++ b/crates/sui-mvr-indexer/src/db.rs @@ -0,0 +1,395 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::database::Connection; +use crate::errors::IndexerError; +use crate::handlers::pruner::PrunableTable; +use clap::Args; +use diesel::migration::{Migration, MigrationSource, MigrationVersion}; +use diesel::pg::Pg; +use diesel::prelude::QueryableByName; +use diesel::table; +use diesel::QueryDsl; +use diesel_migrations::{embed_migrations, EmbeddedMigrations}; +use std::collections::{BTreeSet, HashSet}; +use std::time::Duration; +use strum::IntoEnumIterator; +use tracing::info; + +table! { + __diesel_schema_migrations (version) { + version -> VarChar, + run_on -> Timestamp, + } +} + +const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/pg"); + +#[derive(Args, Debug, Clone)] +pub struct ConnectionPoolConfig { + #[arg(long, default_value_t = 100)] + #[arg(env = "DB_POOL_SIZE")] + pub pool_size: u32, + #[arg(long, value_parser = parse_duration, default_value = "30")] + #[arg(env = "DB_CONNECTION_TIMEOUT")] + pub connection_timeout: Duration, + #[arg(long, value_parser = parse_duration, default_value = "3600")] + #[arg(env = "DB_STATEMENT_TIMEOUT")] + pub statement_timeout: Duration, +} + +fn parse_duration(arg: &str) -> Result { + let seconds = arg.parse()?; + Ok(std::time::Duration::from_secs(seconds)) +} + +impl ConnectionPoolConfig { + const DEFAULT_POOL_SIZE: u32 = 100; + const DEFAULT_CONNECTION_TIMEOUT: u64 = 30; + const DEFAULT_STATEMENT_TIMEOUT: u64 = 3600; + + pub(crate) fn connection_config(&self) -> ConnectionConfig { + ConnectionConfig { + statement_timeout: self.statement_timeout, + read_only: false, + } + } + + pub fn set_pool_size(&mut self, size: u32) { + self.pool_size = size; + } + + pub fn set_connection_timeout(&mut self, timeout: Duration) { + self.connection_timeout = timeout; + } + + pub fn set_statement_timeout(&mut self, timeout: Duration) { + self.statement_timeout = timeout; + } +} + +impl Default for ConnectionPoolConfig { + fn default() -> Self { + Self { + pool_size: Self::DEFAULT_POOL_SIZE, + connection_timeout: Duration::from_secs(Self::DEFAULT_CONNECTION_TIMEOUT), + statement_timeout: Duration::from_secs(Self::DEFAULT_STATEMENT_TIMEOUT), + } + } +} + +#[derive(Debug, Clone, Copy)] +pub struct ConnectionConfig { + pub statement_timeout: Duration, + pub read_only: bool, +} + +/// Checks that the local migration scripts is a prefix of the records in the database. +/// This allows us run migration scripts against a DB at anytime, without worrying about +/// existing readers fail over. +/// We do however need to make sure that whenever we are deploying a new version of either reader or writer, +/// we must first run migration scripts to ensure that there is not more local scripts than in the DB record. +pub async fn check_db_migration_consistency(conn: &mut Connection<'_>) -> Result<(), IndexerError> { + info!("Starting compatibility check"); + let migrations: Vec>> = MIGRATIONS.migrations().map_err(|err| { + IndexerError::DbMigrationError(format!( + "Failed to fetch local migrations from schema: {err}" + )) + })?; + let local_migrations: Vec<_> = migrations + .into_iter() + .map(|m| m.name().version().as_owned()) + .collect(); + check_db_migration_consistency_impl(conn, local_migrations).await?; + info!("Compatibility check passed"); + Ok(()) +} + +async fn check_db_migration_consistency_impl( + conn: &mut Connection<'_>, + local_migrations: Vec>, +) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + // Unfortunately we cannot call applied_migrations() directly on the connection, + // since it implicitly creates the __diesel_schema_migrations table if it doesn't exist, + // which is a write operation that we don't want to do in this function. + let applied_migrations: BTreeSet> = BTreeSet::from_iter( + __diesel_schema_migrations::table + .select(__diesel_schema_migrations::version) + .load(conn) + .await?, + ); + + // We check that the local migrations is a subset of the applied migrations. + let unapplied_migrations: Vec<_> = local_migrations + .into_iter() + .filter(|m| !applied_migrations.contains(m)) + .collect(); + + if unapplied_migrations.is_empty() { + return Ok(()); + } + + Err(IndexerError::DbMigrationError(format!( + "This binary expected the following migrations to have been run, and they were not: {:?}", + unapplied_migrations + ))) +} + +/// Check that prunable tables exist in the database. +pub async fn check_prunable_tables_valid(conn: &mut Connection<'_>) -> Result<(), IndexerError> { + info!("Starting compatibility check"); + + use diesel_async::RunQueryDsl; + + let select_parent_tables = r#" + SELECT c.relname AS table_name + FROM pg_class c + JOIN pg_namespace n ON n.oid = c.relnamespace + LEFT JOIN pg_partitioned_table pt ON pt.partrelid = c.oid + WHERE c.relkind IN ('r', 'p') -- 'r' for regular tables, 'p' for partitioned tables + AND n.nspname = 'public' + AND ( + pt.partrelid IS NOT NULL -- This is a partitioned (parent) table + OR NOT EXISTS ( -- This is not a partition (child table) + SELECT 1 + FROM pg_inherits i + WHERE i.inhrelid = c.oid + ) + ); + "#; + + #[derive(QueryableByName)] + struct TableName { + #[diesel(sql_type = diesel::sql_types::Text)] + table_name: String, + } + + let result: Vec = diesel::sql_query(select_parent_tables) + .load(conn) + .await + .map_err(|e| IndexerError::DbMigrationError(format!("Failed to fetch tables: {e}")))?; + + let parent_tables_from_db: HashSet<_> = result.into_iter().map(|t| t.table_name).collect(); + + for key in PrunableTable::iter() { + if !parent_tables_from_db.contains(key.as_ref()) { + return Err(IndexerError::GenericError(format!( + "Invalid retention policy override provided for table {}: does not exist in the database", + key + ))); + } + } + + info!("Compatibility check passed"); + Ok(()) +} + +pub use setup_postgres::{reset_database, run_migrations}; + +pub mod setup_postgres { + use crate::{database::Connection, db::MIGRATIONS}; + use anyhow::anyhow; + use diesel_async::RunQueryDsl; + use tracing::info; + + pub async fn reset_database(mut conn: Connection<'static>) -> Result<(), anyhow::Error> { + info!("Resetting PG database ..."); + clear_database(&mut conn).await?; + run_migrations(conn).await?; + info!("Reset database complete."); + Ok(()) + } + + pub async fn clear_database(conn: &mut Connection<'static>) -> Result<(), anyhow::Error> { + info!("Clearing the database..."); + let drop_all_tables = " + DO $$ DECLARE + r RECORD; + BEGIN + FOR r IN (SELECT tablename FROM pg_tables WHERE schemaname = 'public') + LOOP + EXECUTE 'DROP TABLE IF EXISTS ' || quote_ident(r.tablename) || ' CASCADE'; + END LOOP; + END $$;"; + diesel::sql_query(drop_all_tables).execute(conn).await?; + info!("Dropped all tables."); + + let drop_all_procedures = " + DO $$ DECLARE + r RECORD; + BEGIN + FOR r IN (SELECT proname, oidvectortypes(proargtypes) as argtypes + FROM pg_proc INNER JOIN pg_namespace ns ON (pg_proc.pronamespace = ns.oid) + WHERE ns.nspname = 'public' AND prokind = 'p') + LOOP + EXECUTE 'DROP PROCEDURE IF EXISTS ' || quote_ident(r.proname) || '(' || r.argtypes || ') CASCADE'; + END LOOP; + END $$;"; + diesel::sql_query(drop_all_procedures).execute(conn).await?; + info!("Dropped all procedures."); + + let drop_all_functions = " + DO $$ DECLARE + r RECORD; + BEGIN + FOR r IN (SELECT proname, oidvectortypes(proargtypes) as argtypes + FROM pg_proc INNER JOIN pg_namespace ON (pg_proc.pronamespace = pg_namespace.oid) + WHERE pg_namespace.nspname = 'public' AND prokind = 'f') + LOOP + EXECUTE 'DROP FUNCTION IF EXISTS ' || quote_ident(r.proname) || '(' || r.argtypes || ') CASCADE'; + END LOOP; + END $$;"; + diesel::sql_query(drop_all_functions).execute(conn).await?; + info!("Database cleared."); + Ok(()) + } + + pub async fn run_migrations(conn: Connection<'static>) -> Result<(), anyhow::Error> { + info!("Running migrations ..."); + conn.run_pending_migrations(MIGRATIONS) + .await + .map_err(|e| anyhow!("Failed to run migrations {e}"))?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use crate::database::{Connection, ConnectionPool}; + use crate::db::{ + check_db_migration_consistency, check_db_migration_consistency_impl, reset_database, + ConnectionPoolConfig, MIGRATIONS, + }; + use crate::tempdb::TempDb; + use diesel::migration::{Migration, MigrationSource}; + use diesel::pg::Pg; + use diesel_migrations::MigrationHarness; + + // Check that the migration records in the database created from the local schema + // pass the consistency check. + #[tokio::test] + async fn db_migration_consistency_smoke_test() { + let database = TempDb::new().unwrap(); + let pool = ConnectionPool::new( + database.database().url().to_owned(), + ConnectionPoolConfig { + pool_size: 2, + ..Default::default() + }, + ) + .await + .unwrap(); + + reset_database(pool.dedicated_connection().await.unwrap()) + .await + .unwrap(); + check_db_migration_consistency(&mut pool.get().await.unwrap()) + .await + .unwrap(); + } + + #[tokio::test] + async fn db_migration_consistency_non_prefix_test() { + let database = TempDb::new().unwrap(); + let pool = ConnectionPool::new( + database.database().url().to_owned(), + ConnectionPoolConfig { + pool_size: 2, + ..Default::default() + }, + ) + .await + .unwrap(); + + reset_database(pool.dedicated_connection().await.unwrap()) + .await + .unwrap(); + let mut connection = pool.get().await.unwrap(); + + let mut sync_connection_wrapper = + diesel_async::async_connection_wrapper::AsyncConnectionWrapper::::from( + pool.dedicated_connection().await.unwrap(), + ); + + tokio::task::spawn_blocking(move || { + sync_connection_wrapper + .revert_migration(MIGRATIONS.migrations().unwrap().last().unwrap()) + .unwrap(); + }) + .await + .unwrap(); + // Local migrations is one record more than the applied migrations. + // This will fail the consistency check since it's not a prefix. + assert!(check_db_migration_consistency(&mut connection) + .await + .is_err()); + + pool.dedicated_connection() + .await + .unwrap() + .run_pending_migrations(MIGRATIONS) + .await + .unwrap(); + // After running pending migrations they should be consistent. + check_db_migration_consistency(&mut connection) + .await + .unwrap(); + } + + #[tokio::test] + async fn db_migration_consistency_prefix_test() { + let database = TempDb::new().unwrap(); + let pool = ConnectionPool::new( + database.database().url().to_owned(), + ConnectionPoolConfig { + pool_size: 2, + ..Default::default() + }, + ) + .await + .unwrap(); + + reset_database(pool.dedicated_connection().await.unwrap()) + .await + .unwrap(); + + let migrations: Vec>> = MIGRATIONS.migrations().unwrap(); + let mut local_migrations: Vec<_> = migrations.iter().map(|m| m.name().version()).collect(); + local_migrations.pop(); + // Local migrations is one record less than the applied migrations. + // This should pass the consistency check since it's still a prefix. + check_db_migration_consistency_impl(&mut pool.get().await.unwrap(), local_migrations) + .await + .unwrap(); + } + + #[tokio::test] + async fn db_migration_consistency_subset_test() { + let database = TempDb::new().unwrap(); + let pool = ConnectionPool::new( + database.database().url().to_owned(), + ConnectionPoolConfig { + pool_size: 2, + ..Default::default() + }, + ) + .await + .unwrap(); + + reset_database(pool.dedicated_connection().await.unwrap()) + .await + .unwrap(); + + let migrations: Vec>> = MIGRATIONS.migrations().unwrap(); + let mut local_migrations: Vec<_> = migrations.iter().map(|m| m.name().version()).collect(); + local_migrations.remove(2); + + // Local migrations are missing one record compared to the applied migrations, which should + // still be okay. + check_db_migration_consistency_impl(&mut pool.get().await.unwrap(), local_migrations) + .await + .unwrap(); + } +} diff --git a/crates/sui-mvr-indexer/src/errors.rs b/crates/sui-mvr-indexer/src/errors.rs new file mode 100644 index 0000000000000..c8971e39781ad --- /dev/null +++ b/crates/sui-mvr-indexer/src/errors.rs @@ -0,0 +1,172 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use fastcrypto::error::FastCryptoError; +use jsonrpsee::core::Error as RpcError; +use jsonrpsee::types::error::CallError; +use sui_json_rpc::name_service::NameServiceError; +use thiserror::Error; + +use sui_types::base_types::ObjectIDParseError; +use sui_types::error::{SuiError, SuiObjectResponseError, UserInputError}; + +#[derive(Debug, Error)] +pub struct DataDownloadError { + pub error: IndexerError, + pub next_checkpoint_sequence_number: u64, +} + +impl std::fmt::Display for DataDownloadError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "next_checkpoint_seq: {}, error: {}", + self.next_checkpoint_sequence_number, self.error + ) + } +} + +#[derive(Debug, Error)] +pub enum IndexerError { + #[error("Indexer failed to read from archives store with error: `{0}`")] + ArchiveReaderError(String), + + #[error("Stream closed unexpectedly with error: `{0}`")] + ChannelClosed(String), + + #[error("Indexer failed to convert timestamp to NaiveDateTime with error: `{0}`")] + DateTimeParsingError(String), + + #[error("Indexer failed to deserialize event from events table with error: `{0}`")] + EventDeserializationError(String), + + #[error("Fullnode returns unexpected responses, which may block indexers from proceeding, with error: `{0}`")] + UnexpectedFullnodeResponseError(String), + + #[error("Indexer failed to transform data with error: `{0}`")] + DataTransformationError(String), + + #[error("Indexer failed to read fullnode with error: `{0}`")] + FullNodeReadingError(String), + + #[error("Indexer failed to convert structs to diesel Insertable with error: `{0}`")] + InsertableParsingError(String), + + #[error("Indexer failed to build JsonRpcServer with error: `{0}`")] + JsonRpcServerError(#[from] sui_json_rpc::error::Error), + + #[error("Indexer failed to find object mutations, which should never happen.")] + ObjectMutationNotAvailable, + + #[error("Indexer failed to build PG connection pool with error: `{0}`")] + PgConnectionPoolInitError(String), + + #[error("Indexer failed to get a pool connection from PG connection pool with error: `{0}`")] + PgPoolConnectionError(String), + + #[error("Indexer failed to read PostgresDB with error: `{0}`")] + PostgresReadError(String), + + #[error("Indexer failed to reset PostgresDB with error: `{0}`")] + PostgresResetError(String), + + #[error("Indexer failed to commit changes to PostgresDB with error: `{0}`")] + PostgresWriteError(String), + + #[error(transparent)] + PostgresError(#[from] diesel::result::Error), + + #[error("Indexer failed to initialize fullnode Http client with error: `{0}`")] + HttpClientInitError(String), + + #[error("Indexer failed to serialize/deserialize with error: `{0}`")] + SerdeError(String), + + #[error("Indexer error related to dynamic field: `{0}`")] + DynamicFieldError(String), + + #[error("Indexer does not support the feature with error: `{0}`")] + NotSupportedError(String), + + #[error("Indexer read corrupted/incompatible data from persistent storage: `{0}`")] + PersistentStorageDataCorruptionError(String), + + #[error("Indexer generic error: `{0}`")] + GenericError(String), + + #[error("GCS error: `{0}`")] + GcsError(String), + + #[error("Indexer failed to resolve object to move struct with error: `{0}`")] + ResolveMoveStructError(String), + + #[error(transparent)] + UncategorizedError(#[from] anyhow::Error), + + #[error(transparent)] + ObjectIdParseError(#[from] ObjectIDParseError), + + #[error("Invalid transaction digest with error: `{0}`")] + InvalidTransactionDigestError(String), + + #[error(transparent)] + SuiError(#[from] SuiError), + + #[error(transparent)] + BcsError(#[from] bcs::Error), + + #[error("Invalid argument with error: `{0}`")] + InvalidArgumentError(String), + + #[error(transparent)] + UserInputError(#[from] UserInputError), + + #[error("Indexer failed to resolve module with error: `{0}`")] + ModuleResolutionError(String), + + #[error(transparent)] + ObjectResponseError(#[from] SuiObjectResponseError), + + #[error(transparent)] + FastCryptoError(#[from] FastCryptoError), + + #[error("`{0}`: `{1}`")] + ErrorWithContext(String, Box), + + #[error("Indexer failed to send item to channel with error: `{0}`")] + MpscChannelError(String), + + #[error(transparent)] + NameServiceError(#[from] NameServiceError), + + #[error("Inconsistent migration records: {0}")] + DbMigrationError(String), +} + +pub trait Context { + fn context(self, context: &str) -> Result; +} + +impl Context for Result { + fn context(self, context: &str) -> Result { + self.map_err(|e| IndexerError::ErrorWithContext(context.to_string(), Box::new(e))) + } +} + +impl From for RpcError { + fn from(e: IndexerError) -> Self { + RpcError::Call(CallError::Failed(e.into())) + } +} + +impl From for IndexerError { + fn from(value: tokio::task::JoinError) -> Self { + IndexerError::UncategorizedError(anyhow::Error::from(value)) + } +} + +impl From for IndexerError { + fn from(value: diesel_async::pooled_connection::bb8::RunError) -> Self { + Self::PgPoolConnectionError(value.to_string()) + } +} diff --git a/crates/sui-mvr-indexer/src/handlers/checkpoint_handler.rs b/crates/sui-mvr-indexer/src/handlers/checkpoint_handler.rs new file mode 100644 index 0000000000000..170bda5ff6108 --- /dev/null +++ b/crates/sui-mvr-indexer/src/handlers/checkpoint_handler.rs @@ -0,0 +1,653 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::BTreeMap; +use std::sync::Arc; + +use async_trait::async_trait; +use itertools::Itertools; +use sui_types::dynamic_field::DynamicFieldInfo; +use tokio_util::sync::CancellationToken; +use tracing::{info, warn}; + +use move_core_types::language_storage::{StructTag, TypeTag}; +use mysten_metrics::{get_metrics, spawn_monitored_task}; +use sui_data_ingestion_core::Worker; +use sui_rest_api::{CheckpointData, CheckpointTransaction}; +use sui_synthetic_ingestion::IndexerProgress; +use sui_types::dynamic_field::DynamicFieldType; +use sui_types::effects::{ObjectChange, TransactionEffectsAPI}; +use sui_types::event::SystemEpochInfoEvent; +use sui_types::messages_checkpoint::{ + CertifiedCheckpointSummary, CheckpointContents, CheckpointSequenceNumber, +}; +use sui_types::object::Object; +use sui_types::object::Owner; +use sui_types::sui_system_state::{get_sui_system_state, SuiSystemStateTrait}; +use sui_types::transaction::TransactionDataAPI; +use tokio::sync::watch; + +use crate::errors::IndexerError; +use crate::handlers::committer::start_tx_checkpoint_commit_task; +use crate::metrics::IndexerMetrics; +use crate::models::display::StoredDisplay; +use crate::models::epoch::{EndOfEpochUpdate, EpochEndInfo, EpochStartInfo, StartOfEpochUpdate}; +use crate::models::obj_indices::StoredObjectVersion; +use crate::store::{IndexerStore, PgIndexerStore}; +use crate::types::{ + EventIndex, IndexedCheckpoint, IndexedDeletedObject, IndexedEvent, IndexedObject, + IndexedPackage, IndexedTransaction, IndexerResult, TransactionKind, TxIndex, +}; + +use super::tx_processor::EpochEndIndexingObjectStore; +use super::tx_processor::TxChangesProcessor; +use super::CheckpointDataToCommit; +use super::EpochToCommit; +use super::TransactionObjectChangesToCommit; + +const CHECKPOINT_QUEUE_SIZE: usize = 100; + +pub async fn new_handlers( + state: PgIndexerStore, + metrics: IndexerMetrics, + cancel: CancellationToken, + committed_checkpoints_tx: Option>>, + start_checkpoint_opt: Option, + end_checkpoint_opt: Option, +) -> Result<(CheckpointHandler, u64), IndexerError> { + let start_checkpoint = match start_checkpoint_opt { + Some(start_checkpoint) => start_checkpoint, + None => state + .get_latest_checkpoint_sequence_number() + .await? + .map(|seq| seq.saturating_add(1)) + .unwrap_or_default(), + }; + + let checkpoint_queue_size = std::env::var("CHECKPOINT_QUEUE_SIZE") + .unwrap_or(CHECKPOINT_QUEUE_SIZE.to_string()) + .parse::() + .unwrap(); + let global_metrics = get_metrics().unwrap(); + let (indexed_checkpoint_sender, indexed_checkpoint_receiver) = + mysten_metrics::metered_channel::channel( + checkpoint_queue_size, + &global_metrics + .channel_inflight + .with_label_values(&["checkpoint_indexing"]), + ); + + let state_clone = state.clone(); + let metrics_clone = metrics.clone(); + spawn_monitored_task!(start_tx_checkpoint_commit_task( + state_clone, + metrics_clone, + indexed_checkpoint_receiver, + cancel.clone(), + committed_checkpoints_tx, + start_checkpoint, + end_checkpoint_opt, + )); + Ok(( + CheckpointHandler::new(state, metrics, indexed_checkpoint_sender), + start_checkpoint, + )) +} + +pub struct CheckpointHandler { + state: PgIndexerStore, + metrics: IndexerMetrics, + indexed_checkpoint_sender: mysten_metrics::metered_channel::Sender, +} + +#[async_trait] +impl Worker for CheckpointHandler { + type Result = (); + async fn process_checkpoint(&self, checkpoint: &CheckpointData) -> anyhow::Result<()> { + let time_now_ms = chrono::Utc::now().timestamp_millis(); + let cp_download_lag = time_now_ms - checkpoint.checkpoint_summary.timestamp_ms as i64; + info!( + "checkpoint download lag for cp {}: {} ms", + checkpoint.checkpoint_summary.sequence_number, cp_download_lag + ); + self.metrics.download_lag_ms.set(cp_download_lag); + self.metrics + .max_downloaded_checkpoint_sequence_number + .set(checkpoint.checkpoint_summary.sequence_number as i64); + self.metrics + .downloaded_checkpoint_timestamp_ms + .set(checkpoint.checkpoint_summary.timestamp_ms as i64); + info!( + "Indexer lag: downloaded checkpoint {} with time now {} and checkpoint time {}", + checkpoint.checkpoint_summary.sequence_number, + time_now_ms, + checkpoint.checkpoint_summary.timestamp_ms + ); + let checkpoint_data = Self::index_checkpoint( + &self.state, + checkpoint, + Arc::new(self.metrics.clone()), + Self::index_packages(std::slice::from_ref(checkpoint), &self.metrics), + ) + .await?; + self.indexed_checkpoint_sender.send(checkpoint_data).await?; + Ok(()) + } +} + +impl CheckpointHandler { + fn new( + state: PgIndexerStore, + metrics: IndexerMetrics, + indexed_checkpoint_sender: mysten_metrics::metered_channel::Sender, + ) -> Self { + Self { + state, + metrics, + indexed_checkpoint_sender, + } + } + + async fn index_epoch( + state: &PgIndexerStore, + data: &CheckpointData, + ) -> Result, IndexerError> { + let checkpoint_object_store = EpochEndIndexingObjectStore::new(data); + + let CheckpointData { + transactions, + checkpoint_summary, + checkpoint_contents: _, + } = data; + + // Genesis epoch + if *checkpoint_summary.sequence_number() == 0 { + info!("Processing genesis epoch"); + let system_state_summary = + get_sui_system_state(&checkpoint_object_store)?.into_sui_system_state_summary(); + return Ok(Some(EpochToCommit { + last_epoch: None, + new_epoch: StartOfEpochUpdate::new(system_state_summary, EpochStartInfo::default()), + })); + } + + // If not end of epoch, return + if checkpoint_summary.end_of_epoch_data.is_none() { + return Ok(None); + } + + let system_state_summary = + get_sui_system_state(&checkpoint_object_store)?.into_sui_system_state_summary(); + + let epoch_event_opt = transactions + .iter() + .find_map(|t| { + t.events.as_ref()?.data.iter().find_map(|ev| { + if ev.is_system_epoch_info_event() { + Some(bcs::from_bytes::(&ev.contents)) + } else { + None + } + }) + }) + .transpose()?; + if epoch_event_opt.is_none() { + warn!( + "No SystemEpochInfoEvent found at end of epoch {}, some epoch data will be set to default.", + checkpoint_summary.epoch, + ); + assert!( + system_state_summary.safe_mode, + "Sui is not in safe mode but no SystemEpochInfoEvent found at end of epoch {}", + checkpoint_summary.epoch + ); + } + + // At some point while committing data in epoch X - 1, we will encounter a new epoch X. We + // want to retrieve X - 2's network total transactions to calculate the number of + // transactions that occurred in epoch X - 1. + let first_tx_sequence_number = match system_state_summary.epoch { + // If first epoch change, this number is 0 + 1 => Ok(0), + _ => { + let last_epoch = system_state_summary.epoch - 2; + state + .get_network_total_transactions_by_end_of_epoch(last_epoch) + .await? + .ok_or_else(|| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Network total transactions for epoch {} not found", + last_epoch + )) + }) + } + }?; + + let epoch_end_info = EpochEndInfo::new(epoch_event_opt.as_ref()); + let epoch_start_info = EpochStartInfo::new( + checkpoint_summary.sequence_number.saturating_add(1), + checkpoint_summary.network_total_transactions, + epoch_event_opt.as_ref(), + ); + + Ok(Some(EpochToCommit { + last_epoch: Some(EndOfEpochUpdate::new( + checkpoint_summary, + first_tx_sequence_number, + epoch_end_info, + )), + new_epoch: StartOfEpochUpdate::new(system_state_summary, epoch_start_info), + })) + } + + fn derive_object_versions( + object_history_changes: &TransactionObjectChangesToCommit, + ) -> Vec { + let mut object_versions = vec![]; + for changed_obj in object_history_changes.changed_objects.iter() { + object_versions.push(StoredObjectVersion { + object_id: changed_obj.object.id().to_vec(), + object_version: changed_obj.object.version().value() as i64, + cp_sequence_number: changed_obj.checkpoint_sequence_number as i64, + }); + } + for deleted_obj in object_history_changes.deleted_objects.iter() { + object_versions.push(StoredObjectVersion { + object_id: deleted_obj.object_id.to_vec(), + object_version: deleted_obj.object_version as i64, + cp_sequence_number: deleted_obj.checkpoint_sequence_number as i64, + }); + } + object_versions + } + + async fn index_checkpoint( + state: &PgIndexerStore, + data: &CheckpointData, + metrics: Arc, + packages: Vec, + ) -> Result { + let checkpoint_seq = data.checkpoint_summary.sequence_number; + info!(checkpoint_seq, "Indexing checkpoint data blob"); + + // Index epoch + let epoch = Self::index_epoch(state, data).await?; + + // Index Objects + let object_changes: TransactionObjectChangesToCommit = + Self::index_objects(data, &metrics).await?; + let object_history_changes: TransactionObjectChangesToCommit = + Self::index_objects_history(data).await?; + let object_versions = Self::derive_object_versions(&object_history_changes); + + let (checkpoint, db_transactions, db_events, db_tx_indices, db_event_indices, db_displays) = { + let CheckpointData { + transactions, + checkpoint_summary, + checkpoint_contents, + } = data; + + let (db_transactions, db_events, db_tx_indices, db_event_indices, db_displays) = + Self::index_transactions( + transactions, + checkpoint_summary, + checkpoint_contents, + &metrics, + ) + .await?; + + let successful_tx_num: u64 = db_transactions.iter().map(|t| t.successful_tx_num).sum(); + ( + IndexedCheckpoint::from_sui_checkpoint( + checkpoint_summary, + checkpoint_contents, + successful_tx_num as usize, + ), + db_transactions, + db_events, + db_tx_indices, + db_event_indices, + db_displays, + ) + }; + let time_now_ms = chrono::Utc::now().timestamp_millis(); + metrics + .index_lag_ms + .set(time_now_ms - checkpoint.timestamp_ms as i64); + metrics + .max_indexed_checkpoint_sequence_number + .set(checkpoint.sequence_number as i64); + metrics + .indexed_checkpoint_timestamp_ms + .set(checkpoint.timestamp_ms as i64); + info!( + "Indexer lag: indexed checkpoint {} with time now {} and checkpoint time {}", + checkpoint.sequence_number, time_now_ms, checkpoint.timestamp_ms + ); + + Ok(CheckpointDataToCommit { + checkpoint, + transactions: db_transactions, + events: db_events, + tx_indices: db_tx_indices, + event_indices: db_event_indices, + display_updates: db_displays, + object_changes, + object_history_changes, + object_versions, + packages, + epoch, + }) + } + + async fn index_transactions( + transactions: &[CheckpointTransaction], + checkpoint_summary: &CertifiedCheckpointSummary, + checkpoint_contents: &CheckpointContents, + metrics: &IndexerMetrics, + ) -> IndexerResult<( + Vec, + Vec, + Vec, + Vec, + BTreeMap, + )> { + let checkpoint_seq = checkpoint_summary.sequence_number(); + + let mut tx_seq_num_iter = checkpoint_contents + .enumerate_transactions(checkpoint_summary) + .map(|(seq, execution_digest)| (execution_digest.transaction, seq)); + + if checkpoint_contents.size() != transactions.len() { + return Err(IndexerError::FullNodeReadingError(format!( + "CheckpointContents has different size {} compared to Transactions {} for checkpoint {}", + checkpoint_contents.size(), + transactions.len(), + checkpoint_seq + ))); + } + + let mut db_transactions = Vec::new(); + let mut db_events = Vec::new(); + let mut db_displays = BTreeMap::new(); + let mut db_tx_indices = Vec::new(); + let mut db_event_indices = Vec::new(); + + for tx in transactions { + let CheckpointTransaction { + transaction: sender_signed_data, + effects: fx, + events, + input_objects, + output_objects, + } = tx; + // Unwrap safe - we checked they have equal length above + let (tx_digest, tx_sequence_number) = tx_seq_num_iter.next().unwrap(); + if tx_digest != *sender_signed_data.digest() { + return Err(IndexerError::FullNodeReadingError(format!( + "Transactions has different ordering from CheckpointContents, for checkpoint {}, Mismatch found at {} v.s. {}", + checkpoint_seq, tx_digest, sender_signed_data.digest() + ))); + } + + let tx = sender_signed_data.transaction_data(); + let events = events + .as_ref() + .map(|events| events.data.clone()) + .unwrap_or_default(); + + let transaction_kind = if tx.is_system_tx() { + TransactionKind::SystemTransaction + } else { + TransactionKind::ProgrammableTransaction + }; + + db_events.extend(events.iter().enumerate().map(|(idx, event)| { + IndexedEvent::from_event( + tx_sequence_number, + idx as u64, + *checkpoint_seq, + tx_digest, + event, + checkpoint_summary.timestamp_ms, + ) + })); + + db_event_indices.extend( + events.iter().enumerate().map(|(idx, event)| { + EventIndex::from_event(tx_sequence_number, idx as u64, event) + }), + ); + + db_displays.extend( + events + .iter() + .flat_map(StoredDisplay::try_from_event) + .map(|display| (display.object_type.clone(), display)), + ); + + let objects: Vec<_> = input_objects.iter().chain(output_objects.iter()).collect(); + + let (balance_change, object_changes) = + TxChangesProcessor::new(&objects, metrics.clone()) + .get_changes(tx, fx, &tx_digest) + .await?; + + let db_txn = IndexedTransaction { + tx_sequence_number, + tx_digest, + checkpoint_sequence_number: *checkpoint_summary.sequence_number(), + timestamp_ms: checkpoint_summary.timestamp_ms, + sender_signed_data: sender_signed_data.data().clone(), + effects: fx.clone(), + object_changes, + balance_change, + events, + transaction_kind: transaction_kind.clone(), + successful_tx_num: if fx.status().is_ok() { + tx.kind().tx_count() as u64 + } else { + 0 + }, + }; + + db_transactions.push(db_txn); + + // Input Objects + let input_objects = tx + .input_objects() + .expect("committed txns have been validated") + .into_iter() + .map(|obj_kind| obj_kind.object_id()) + .collect(); + + // Changed Objects + let changed_objects = fx + .all_changed_objects() + .into_iter() + .map(|(object_ref, _owner, _write_kind)| object_ref.0) + .collect(); + + // Affected Objects + let affected_objects = fx + .object_changes() + .into_iter() + .map(|ObjectChange { id, .. }| id) + .collect(); + + // Payers + let payers = vec![tx.gas_owner()]; + + // Sender + let sender = tx.sender(); + + // Recipients + let recipients = fx + .all_changed_objects() + .into_iter() + .filter_map(|(_object_ref, owner, _write_kind)| match owner { + Owner::AddressOwner(address) => Some(address), + _ => None, + }) + .unique() + .collect(); + + // Move Calls + let move_calls = tx + .move_calls() + .into_iter() + .map(|(p, m, f)| (*p, m.to_string(), f.to_string())) + .collect(); + + db_tx_indices.push(TxIndex { + tx_sequence_number, + transaction_digest: tx_digest, + checkpoint_sequence_number: *checkpoint_seq, + input_objects, + changed_objects, + affected_objects, + sender, + payers, + recipients, + move_calls, + tx_kind: transaction_kind, + }); + } + Ok(( + db_transactions, + db_events, + db_tx_indices, + db_event_indices, + db_displays, + )) + } + + pub(crate) async fn index_objects( + data: &CheckpointData, + metrics: &IndexerMetrics, + ) -> Result { + let _timer = metrics.indexing_objects_latency.start_timer(); + let checkpoint_seq = data.checkpoint_summary.sequence_number; + + let eventually_removed_object_refs_post_version = + data.eventually_removed_object_refs_post_version(); + let indexed_eventually_removed_objects = eventually_removed_object_refs_post_version + .into_iter() + .map(|obj_ref| IndexedDeletedObject { + object_id: obj_ref.0, + object_version: obj_ref.1.into(), + checkpoint_sequence_number: checkpoint_seq, + }) + .collect(); + + let latest_live_output_objects = data.latest_live_output_objects(); + let changed_objects = latest_live_output_objects + .into_iter() + .map(|o| { + try_extract_df_kind(o) + .map(|df_kind| IndexedObject::from_object(checkpoint_seq, o.clone(), df_kind)) + }) + .collect::, _>>()?; + + Ok(TransactionObjectChangesToCommit { + changed_objects, + deleted_objects: indexed_eventually_removed_objects, + }) + } + + // similar to index_objects, but objects_history keeps all versions of objects + async fn index_objects_history( + data: &CheckpointData, + ) -> Result { + let checkpoint_seq = data.checkpoint_summary.sequence_number; + let deleted_objects = data + .transactions + .iter() + .flat_map(|tx| tx.removed_object_refs_post_version()) + .collect::>(); + let indexed_deleted_objects: Vec = deleted_objects + .into_iter() + .map(|obj_ref| IndexedDeletedObject { + object_id: obj_ref.0, + object_version: obj_ref.1.into(), + checkpoint_sequence_number: checkpoint_seq, + }) + .collect(); + + let output_objects: Vec<_> = data + .transactions + .iter() + .flat_map(|tx| &tx.output_objects) + .collect(); + + // TODO(gegaowp): the current df_info implementation is not correct, + // but we have decided remove all df_* except df_kind. + let changed_objects = output_objects + .into_iter() + .map(|o| { + try_extract_df_kind(o) + .map(|df_kind| IndexedObject::from_object(checkpoint_seq, o.clone(), df_kind)) + }) + .collect::, _>>()?; + + Ok(TransactionObjectChangesToCommit { + changed_objects, + deleted_objects: indexed_deleted_objects, + }) + } + + fn index_packages( + checkpoint_data: &[CheckpointData], + metrics: &IndexerMetrics, + ) -> Vec { + let _timer = metrics.indexing_packages_latency.start_timer(); + checkpoint_data + .iter() + .flat_map(|data| { + let checkpoint_sequence_number = data.checkpoint_summary.sequence_number; + data.transactions + .iter() + .flat_map(|tx| &tx.output_objects) + .filter_map(|o| { + if let sui_types::object::Data::Package(p) = &o.data { + Some(IndexedPackage { + package_id: o.id(), + move_package: p.clone(), + checkpoint_sequence_number, + }) + } else { + None + } + }) + .collect::>() + }) + .collect() + } +} + +/// If `o` is a dynamic `Field`, determine whether it represents a Dynamic Field or a Dynamic +/// Object Field based on its type. +fn try_extract_df_kind(o: &Object) -> IndexerResult> { + // Skip if not a move object + let Some(move_object) = o.data.try_as_move() else { + return Ok(None); + }; + + if !move_object.type_().is_dynamic_field() { + return Ok(None); + } + + let type_: StructTag = move_object.type_().clone().into(); + let [name, _] = type_.type_params.as_slice() else { + return Ok(None); + }; + + Ok(Some( + if matches!(name, TypeTag::Struct(s) if DynamicFieldInfo::is_dynamic_object_field_wrapper(s)) + { + DynamicFieldType::DynamicObject + } else { + DynamicFieldType::DynamicField + }, + )) +} diff --git a/crates/sui-mvr-indexer/src/handlers/committer.rs b/crates/sui-mvr-indexer/src/handlers/committer.rs new file mode 100644 index 0000000000000..9cc174e8496fa --- /dev/null +++ b/crates/sui-mvr-indexer/src/handlers/committer.rs @@ -0,0 +1,281 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::{BTreeMap, HashMap}; + +use sui_synthetic_ingestion::IndexerProgress; +use sui_types::messages_checkpoint::CheckpointSequenceNumber; +use tap::tap::TapFallible; +use tokio::sync::watch; +use tokio_util::sync::CancellationToken; +use tracing::instrument; +use tracing::{error, info}; + +use crate::metrics::IndexerMetrics; +use crate::store::IndexerStore; +use crate::types::IndexerResult; + +use super::{CheckpointDataToCommit, CommitterTables, CommitterWatermark, EpochToCommit}; + +pub(crate) const CHECKPOINT_COMMIT_BATCH_SIZE: usize = 100; + +pub async fn start_tx_checkpoint_commit_task( + state: S, + metrics: IndexerMetrics, + tx_indexing_receiver: mysten_metrics::metered_channel::Receiver, + cancel: CancellationToken, + mut committed_checkpoints_tx: Option>>, + mut next_checkpoint_sequence_number: CheckpointSequenceNumber, + end_checkpoint_opt: Option, +) -> IndexerResult<()> +where + S: IndexerStore + Clone + Sync + Send + 'static, +{ + use futures::StreamExt; + + info!("Indexer checkpoint commit task started..."); + let checkpoint_commit_batch_size = std::env::var("CHECKPOINT_COMMIT_BATCH_SIZE") + .unwrap_or(CHECKPOINT_COMMIT_BATCH_SIZE.to_string()) + .parse::() + .unwrap(); + info!("Using checkpoint commit batch size {checkpoint_commit_batch_size}"); + + let mut stream = mysten_metrics::metered_channel::ReceiverStream::new(tx_indexing_receiver) + .ready_chunks(checkpoint_commit_batch_size); + + let mut unprocessed = HashMap::new(); + let mut batch = vec![]; + + while let Some(indexed_checkpoint_batch) = stream.next().await { + if cancel.is_cancelled() { + break; + } + + // split the batch into smaller batches per epoch to handle partitioning + for checkpoint in indexed_checkpoint_batch { + unprocessed.insert(checkpoint.checkpoint.sequence_number, checkpoint); + } + while let Some(checkpoint) = unprocessed.remove(&next_checkpoint_sequence_number) { + let epoch = checkpoint.epoch.clone(); + batch.push(checkpoint); + next_checkpoint_sequence_number += 1; + let epoch_number_option = epoch.as_ref().map(|epoch| epoch.new_epoch_id()); + // The batch will consist of contiguous checkpoints and at most one epoch boundary at + // the end. + if batch.len() == checkpoint_commit_batch_size || epoch.is_some() { + commit_checkpoints( + &state, + batch, + epoch, + &metrics, + &mut committed_checkpoints_tx, + ) + .await; + batch = vec![]; + } + if let Some(epoch_number) = epoch_number_option { + state.upload_display(epoch_number).await.tap_err(|e| { + error!( + "Failed to upload display table before epoch {} with error: {}", + epoch_number, + e.to_string() + ); + })?; + } + // stop adding to the commit batch if we've reached the end checkpoint + if let Some(end_checkpoint_sequence_number) = end_checkpoint_opt { + if next_checkpoint_sequence_number > end_checkpoint_sequence_number { + break; + } + } + } + if !batch.is_empty() { + commit_checkpoints(&state, batch, None, &metrics, &mut committed_checkpoints_tx).await; + batch = vec![]; + } + + // stop the commit task if we've reached the end checkpoint + if let Some(end_checkpoint_sequence_number) = end_checkpoint_opt { + if next_checkpoint_sequence_number > end_checkpoint_sequence_number { + break; + } + } + } + Ok(()) +} + +/// Writes indexed checkpoint data to the database, and then update watermark upper bounds and +/// metrics. Expects `indexed_checkpoint_batch` to be non-empty, and contain contiguous checkpoints. +/// There can be at most one epoch boundary at the end. If an epoch boundary is detected, +/// epoch-partitioned tables must be advanced. +// Unwrap: Caller needs to make sure indexed_checkpoint_batch is not empty +#[instrument(skip_all, fields( + first = indexed_checkpoint_batch.first().as_ref().unwrap().checkpoint.sequence_number, + last = indexed_checkpoint_batch.last().as_ref().unwrap().checkpoint.sequence_number +))] +async fn commit_checkpoints( + state: &S, + indexed_checkpoint_batch: Vec, + epoch: Option, + metrics: &IndexerMetrics, + committed_checkpoints_tx: &mut Option>>, +) where + S: IndexerStore + Clone + Sync + Send + 'static, +{ + let mut checkpoint_batch = vec![]; + let mut tx_batch = vec![]; + let mut events_batch = vec![]; + let mut tx_indices_batch = vec![]; + let mut event_indices_batch = vec![]; + let mut display_updates_batch = BTreeMap::new(); + let mut object_changes_batch = vec![]; + let mut object_history_changes_batch = vec![]; + let mut object_versions_batch = vec![]; + let mut packages_batch = vec![]; + + for indexed_checkpoint in indexed_checkpoint_batch { + let CheckpointDataToCommit { + checkpoint, + transactions, + events, + event_indices, + tx_indices, + display_updates, + object_changes, + object_history_changes, + object_versions, + packages, + epoch: _, + } = indexed_checkpoint; + checkpoint_batch.push(checkpoint); + tx_batch.push(transactions); + events_batch.push(events); + tx_indices_batch.push(tx_indices); + event_indices_batch.push(event_indices); + display_updates_batch.extend(display_updates.into_iter()); + object_changes_batch.push(object_changes); + object_history_changes_batch.push(object_history_changes); + object_versions_batch.push(object_versions); + packages_batch.push(packages); + } + + let first_checkpoint_seq = checkpoint_batch.first().unwrap().sequence_number; + let last_checkpoint = checkpoint_batch.last().unwrap(); + let indexer_progress = IndexerProgress { + checkpoint: last_checkpoint.sequence_number, + network_total_transactions: last_checkpoint.network_total_transactions, + }; + let committer_watermark = CommitterWatermark::from(last_checkpoint); + + let guard = metrics.checkpoint_db_commit_latency.start_timer(); + let tx_batch = tx_batch.into_iter().flatten().collect::>(); + let packages_batch = packages_batch.into_iter().flatten().collect::>(); + let checkpoint_num = checkpoint_batch.len(); + let tx_count = tx_batch.len(); + + { + let _step_1_guard = metrics.checkpoint_db_commit_latency_step_1.start_timer(); + let mut persist_tasks = vec![ + state.persist_packages(packages_batch), + state.persist_object_history(object_history_changes_batch.clone()), + ]; + if let Some(epoch_data) = epoch.clone() { + persist_tasks.push(state.persist_epoch(epoch_data)); + } + futures::future::join_all(persist_tasks) + .await + .into_iter() + .map(|res| { + if res.is_err() { + error!("Failed to persist data with error: {:?}", res); + } + res + }) + .collect::>>() + .expect("Persisting data into DB should not fail."); + } + + let is_epoch_end = epoch.is_some(); + + // On epoch boundary, we need to modify the existing partitions' upper bound, and introduce a + // new partition for incoming data for the upcoming epoch. + if let Some(epoch_data) = epoch { + state + .advance_epoch(epoch_data) + .await + .tap_err(|e| { + error!("Failed to advance epoch with error: {}", e.to_string()); + }) + .expect("Advancing epochs in DB should not fail."); + metrics.total_epoch_committed.inc(); + } + + state + .persist_checkpoints(checkpoint_batch) + .await + .tap_err(|e| { + error!( + "Failed to persist checkpoint data with error: {}", + e.to_string() + ); + }) + .expect("Persisting data into DB should not fail."); + + if is_epoch_end { + // The epoch has advanced so we update the configs for the new protocol version, if it has changed. + let chain_id = state + .get_chain_identifier() + .await + .expect("Failed to get chain identifier") + .expect("Chain identifier should have been indexed at this point"); + let _ = state + .persist_protocol_configs_and_feature_flags(chain_id) + .await; + } + + state + .update_watermarks_upper_bound::(committer_watermark) + .await + .tap_err(|e| { + error!( + "Failed to update watermark upper bound with error: {}", + e.to_string() + ); + }) + .expect("Updating watermark upper bound in DB should not fail."); + + let elapsed = guard.stop_and_record(); + + info!( + elapsed, + "Checkpoint {}-{} committed with {} transactions.", + first_checkpoint_seq, + committer_watermark.checkpoint_hi_inclusive, + tx_count, + ); + metrics + .latest_tx_checkpoint_sequence_number + .set(committer_watermark.checkpoint_hi_inclusive as i64); + metrics + .total_tx_checkpoint_committed + .inc_by(checkpoint_num as u64); + metrics.total_transaction_committed.inc_by(tx_count as u64); + metrics.transaction_per_checkpoint.observe( + tx_count as f64 + / (committer_watermark.checkpoint_hi_inclusive - first_checkpoint_seq + 1) as f64, + ); + // 1000.0 is not necessarily the batch size, it's to roughly map average tx commit latency to [0.1, 1] seconds, + // which is well covered by DB_COMMIT_LATENCY_SEC_BUCKETS. + metrics + .thousand_transaction_avg_db_commit_latency + .observe(elapsed * 1000.0 / tx_count as f64); + + if let Some(committed_checkpoints_tx) = committed_checkpoints_tx.as_mut() { + if let Err(err) = committed_checkpoints_tx.send(Some(indexer_progress)) { + error!( + "Failed to send committed checkpoints to the watch channel: {}", + err + ); + } + } +} diff --git a/crates/sui-mvr-indexer/src/handlers/mod.rs b/crates/sui-mvr-indexer/src/handlers/mod.rs new file mode 100644 index 0000000000000..403ee8e22706c --- /dev/null +++ b/crates/sui-mvr-indexer/src/handlers/mod.rs @@ -0,0 +1,316 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::BTreeMap; + +use async_trait::async_trait; +use futures::{FutureExt, StreamExt}; + +use serde::{Deserialize, Serialize}; +use sui_rest_api::CheckpointData; +use tokio_util::sync::CancellationToken; + +use crate::{ + errors::IndexerError, + models::{ + display::StoredDisplay, + epoch::{EndOfEpochUpdate, StartOfEpochUpdate}, + obj_indices::StoredObjectVersion, + }, + types::{ + EventIndex, IndexedCheckpoint, IndexedDeletedObject, IndexedEvent, IndexedObject, + IndexedPackage, IndexedTransaction, IndexerResult, TxIndex, + }, +}; + +pub mod checkpoint_handler; +pub mod committer; +pub mod objects_snapshot_handler; +pub mod pruner; +pub mod tx_processor; + +pub(crate) const CHECKPOINT_COMMIT_BATCH_SIZE: usize = 100; +pub(crate) const UNPROCESSED_CHECKPOINT_SIZE_LIMIT: usize = 1000; + +#[derive(Debug)] +pub struct CheckpointDataToCommit { + pub checkpoint: IndexedCheckpoint, + pub transactions: Vec, + pub events: Vec, + pub event_indices: Vec, + pub tx_indices: Vec, + pub display_updates: BTreeMap, + pub object_changes: TransactionObjectChangesToCommit, + pub object_history_changes: TransactionObjectChangesToCommit, + pub object_versions: Vec, + pub packages: Vec, + pub epoch: Option, +} + +#[derive(Clone, Debug)] +pub struct TransactionObjectChangesToCommit { + pub changed_objects: Vec, + pub deleted_objects: Vec, +} + +#[derive(Clone, Debug)] +pub struct EpochToCommit { + pub last_epoch: Option, + pub new_epoch: StartOfEpochUpdate, +} + +impl EpochToCommit { + pub fn new_epoch_id(&self) -> u64 { + self.new_epoch.epoch as u64 + } + + pub fn new_epoch_first_checkpoint_id(&self) -> u64 { + self.new_epoch.first_checkpoint_id as u64 + } + + pub fn last_epoch_total_transactions(&self) -> Option { + self.last_epoch + .as_ref() + .map(|e| e.epoch_total_transactions as u64) + } + + pub fn new_epoch_first_tx_sequence_number(&self) -> u64 { + self.new_epoch.first_tx_sequence_number as u64 + } +} + +pub struct CommonHandler { + handler: Box>, +} + +impl CommonHandler { + pub fn new(handler: Box>) -> Self { + Self { handler } + } + + async fn start_transform_and_load( + &self, + cp_receiver: mysten_metrics::metered_channel::Receiver<(CommitterWatermark, T)>, + cancel: CancellationToken, + start_checkpoint: u64, + end_checkpoint_opt: Option, + ) -> IndexerResult<()> { + let checkpoint_commit_batch_size = std::env::var("CHECKPOINT_COMMIT_BATCH_SIZE") + .unwrap_or(CHECKPOINT_COMMIT_BATCH_SIZE.to_string()) + .parse::() + .unwrap(); + let mut stream = mysten_metrics::metered_channel::ReceiverStream::new(cp_receiver) + .ready_chunks(checkpoint_commit_batch_size); + + // Mapping of ordered checkpoint data to ensure that we process them in order. The key is + // just the checkpoint sequence number, and the tuple is (CommitterWatermark, T). + let mut unprocessed: BTreeMap = BTreeMap::new(); + let mut tuple_batch = vec![]; + let mut next_cp_to_process = start_checkpoint; + + loop { + if cancel.is_cancelled() { + return Ok(()); + } + + // Try to fetch new data tuple from the stream + if unprocessed.len() >= UNPROCESSED_CHECKPOINT_SIZE_LIMIT { + tracing::info!( + "Unprocessed checkpoint size reached limit {}, skip reading from stream...", + UNPROCESSED_CHECKPOINT_SIZE_LIMIT + ); + } else { + // Try to fetch new data tuple from the stream + match stream.next().now_or_never() { + Some(Some(tuple_chunk)) => { + if cancel.is_cancelled() { + return Ok(()); + } + for tuple in tuple_chunk { + unprocessed.insert(tuple.0.checkpoint_hi_inclusive, tuple); + } + } + Some(None) => break, // Stream has ended + None => {} // No new data tuple available right now + } + } + + // Process unprocessed checkpoints, even no new checkpoints from stream + let checkpoint_lag_limiter = self.handler.get_max_committable_checkpoint().await?; + let max_commitable_cp = std::cmp::min( + checkpoint_lag_limiter, + end_checkpoint_opt.unwrap_or(u64::MAX), + ); + // Stop pushing to tuple_batch if we've reached the end checkpoint. + while next_cp_to_process <= max_commitable_cp { + if let Some(data_tuple) = unprocessed.remove(&next_cp_to_process) { + tuple_batch.push(data_tuple); + next_cp_to_process += 1; + } else { + break; + } + } + + if !tuple_batch.is_empty() { + let committer_watermark = tuple_batch.last().unwrap().0; + let batch = tuple_batch.into_iter().map(|t| t.1).collect(); + self.handler.load(batch).await.map_err(|e| { + IndexerError::PostgresWriteError(format!( + "Failed to load transformed data into DB for handler {}: {}", + self.handler.name(), + e + )) + })?; + self.handler.set_watermark_hi(committer_watermark).await?; + tuple_batch = vec![]; + } + + if let Some(end_checkpoint) = end_checkpoint_opt { + if next_cp_to_process > end_checkpoint { + tracing::info!( + "Reached end checkpoint, stopping handler {}...", + self.handler.name() + ); + return Ok(()); + } + } + } + Err(IndexerError::ChannelClosed(format!( + "Checkpoint channel is closed unexpectedly for handler {}", + self.handler.name() + ))) + } +} + +#[async_trait] +pub trait Handler: Send + Sync { + /// return handler name + fn name(&self) -> String; + + /// commit batch of transformed data to DB + async fn load(&self, batch: Vec) -> IndexerResult<()>; + + /// read high watermark of the table DB + async fn get_watermark_hi(&self) -> IndexerResult>; + + /// Updates the relevant entries on the `watermarks` table with the full `CommitterWatermark`, + /// which tracks the latest epoch, cp, and tx sequence number of the committed batch. + async fn set_watermark_hi(&self, watermark: CommitterWatermark) -> IndexerResult<()>; + + /// By default, return u64::MAX, which means no extra waiting is needed before commiting; + /// get max committable checkpoint, for handlers that want to wait for some condition before commiting, + /// one use-case is the objects snapshot handler, + /// which waits for the lag between snapshot and latest checkpoint to reach a certain threshold. + async fn get_max_committable_checkpoint(&self) -> IndexerResult { + Ok(u64::MAX) + } +} + +/// The indexer writer operates on checkpoint data, which contains information on the current epoch, +/// checkpoint, and transaction. These three numbers form the watermark upper bound for each +/// committed table. The reader and pruner are responsible for determining which of the three units +/// will be used for a particular table. +#[derive(Clone, Copy, Ord, PartialOrd, Eq, PartialEq)] +pub struct CommitterWatermark { + pub epoch_hi_inclusive: u64, + pub checkpoint_hi_inclusive: u64, + pub tx_hi: u64, +} + +impl From<&IndexedCheckpoint> for CommitterWatermark { + fn from(checkpoint: &IndexedCheckpoint) -> Self { + Self { + epoch_hi_inclusive: checkpoint.epoch, + checkpoint_hi_inclusive: checkpoint.sequence_number, + tx_hi: checkpoint.network_total_transactions, + } + } +} + +impl From<&CheckpointData> for CommitterWatermark { + fn from(checkpoint: &CheckpointData) -> Self { + Self { + epoch_hi_inclusive: checkpoint.checkpoint_summary.epoch, + checkpoint_hi_inclusive: checkpoint.checkpoint_summary.sequence_number, + tx_hi: checkpoint.checkpoint_summary.network_total_transactions, + } + } +} + +/// Enum representing tables that the committer handler writes to. +#[derive( + Debug, + Eq, + PartialEq, + strum_macros::Display, + strum_macros::EnumString, + strum_macros::EnumIter, + strum_macros::AsRefStr, + Hash, + Serialize, + Deserialize, + Clone, +)] +#[strum(serialize_all = "snake_case")] +#[serde(rename_all = "snake_case")] +pub enum CommitterTables { + // Unpruned tables + ChainIdentifier, + Display, + Epochs, + FeatureFlags, + FullObjectsHistory, + Objects, + ObjectsVersion, + Packages, + ProtocolConfigs, + RawCheckpoints, + + // Prunable tables + ObjectsHistory, + Transactions, + Events, + + EventEmitPackage, + EventEmitModule, + EventSenders, + EventStructInstantiation, + EventStructModule, + EventStructName, + EventStructPackage, + + TxAffectedAddresses, + TxAffectedObjects, + TxCallsPkg, + TxCallsMod, + TxCallsFun, + TxChangedObjects, + TxDigests, + TxInputObjects, + TxKinds, + TxRecipients, + TxSenders, + + Checkpoints, + PrunerCpWatermark, +} + +/// Enum representing tables that the objects snapshot handler writes to. +#[derive( + Debug, + Eq, + PartialEq, + strum_macros::Display, + strum_macros::EnumString, + strum_macros::EnumIter, + strum_macros::AsRefStr, + Hash, + Serialize, + Deserialize, + Clone, +)] +#[strum(serialize_all = "snake_case")] +#[serde(rename_all = "snake_case")] +pub enum ObjectsSnapshotHandlerTables { + ObjectsSnapshot, +} diff --git a/crates/sui-mvr-indexer/src/handlers/objects_snapshot_handler.rs b/crates/sui-mvr-indexer/src/handlers/objects_snapshot_handler.rs new file mode 100644 index 0000000000000..d37d532827947 --- /dev/null +++ b/crates/sui-mvr-indexer/src/handlers/objects_snapshot_handler.rs @@ -0,0 +1,139 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use async_trait::async_trait; +use mysten_metrics::get_metrics; +use mysten_metrics::metered_channel::Sender; +use mysten_metrics::spawn_monitored_task; +use sui_data_ingestion_core::Worker; +use sui_rest_api::CheckpointData; +use tokio_util::sync::CancellationToken; +use tracing::info; + +use crate::config::SnapshotLagConfig; +use crate::store::PgIndexerStore; +use crate::types::IndexerResult; +use crate::{metrics::IndexerMetrics, store::IndexerStore}; + +use super::checkpoint_handler::CheckpointHandler; +use super::{CommitterWatermark, ObjectsSnapshotHandlerTables, TransactionObjectChangesToCommit}; +use super::{CommonHandler, Handler}; + +#[derive(Clone)] +pub struct ObjectsSnapshotHandler { + pub store: PgIndexerStore, + pub sender: Sender<(CommitterWatermark, TransactionObjectChangesToCommit)>, + snapshot_config: SnapshotLagConfig, + metrics: IndexerMetrics, +} + +pub struct CheckpointObjectChanges { + pub checkpoint_sequence_number: u64, + pub object_changes: TransactionObjectChangesToCommit, +} + +#[async_trait] +impl Worker for ObjectsSnapshotHandler { + type Result = (); + async fn process_checkpoint(&self, checkpoint: &CheckpointData) -> anyhow::Result<()> { + let transformed_data = CheckpointHandler::index_objects(checkpoint, &self.metrics).await?; + self.sender + .send((CommitterWatermark::from(checkpoint), transformed_data)) + .await?; + Ok(()) + } +} + +#[async_trait] +impl Handler for ObjectsSnapshotHandler { + fn name(&self) -> String { + "objects_snapshot_handler".to_string() + } + + async fn load( + &self, + transformed_data: Vec, + ) -> IndexerResult<()> { + self.store + .persist_objects_snapshot(transformed_data) + .await?; + Ok(()) + } + + async fn get_watermark_hi(&self) -> IndexerResult> { + self.store + .get_latest_object_snapshot_checkpoint_sequence_number() + .await + } + + async fn set_watermark_hi(&self, watermark: CommitterWatermark) -> IndexerResult<()> { + self.store + .update_watermarks_upper_bound::(watermark) + .await?; + + self.metrics + .latest_object_snapshot_sequence_number + .set(watermark.checkpoint_hi_inclusive as i64); + Ok(()) + } + + async fn get_max_committable_checkpoint(&self) -> IndexerResult { + let latest_checkpoint = self.store.get_latest_checkpoint_sequence_number().await?; + Ok(latest_checkpoint + .map(|seq| seq.saturating_sub(self.snapshot_config.snapshot_min_lag as u64)) + .unwrap_or_default()) // hold snapshot handler until at least one checkpoint is in DB + } +} + +pub async fn start_objects_snapshot_handler( + store: PgIndexerStore, + metrics: IndexerMetrics, + snapshot_config: SnapshotLagConfig, + cancel: CancellationToken, + start_checkpoint_opt: Option, + end_checkpoint_opt: Option, +) -> IndexerResult<(ObjectsSnapshotHandler, u64)> { + info!("Starting object snapshot handler..."); + + let global_metrics = get_metrics().unwrap(); + let (sender, receiver) = mysten_metrics::metered_channel::channel( + 600, + &global_metrics + .channel_inflight + .with_label_values(&["objects_snapshot_handler_checkpoint_data"]), + ); + + let objects_snapshot_handler = + ObjectsSnapshotHandler::new(store.clone(), sender, metrics.clone(), snapshot_config); + + let next_cp_from_db = objects_snapshot_handler + .get_watermark_hi() + .await? + .map(|cp| cp.saturating_add(1)) + .unwrap_or_default(); + let start_checkpoint = start_checkpoint_opt.unwrap_or(next_cp_from_db); + let common_handler = CommonHandler::new(Box::new(objects_snapshot_handler.clone())); + spawn_monitored_task!(common_handler.start_transform_and_load( + receiver, + cancel, + start_checkpoint, + end_checkpoint_opt, + )); + Ok((objects_snapshot_handler, start_checkpoint)) +} + +impl ObjectsSnapshotHandler { + pub fn new( + store: PgIndexerStore, + sender: Sender<(CommitterWatermark, TransactionObjectChangesToCommit)>, + metrics: IndexerMetrics, + snapshot_config: SnapshotLagConfig, + ) -> ObjectsSnapshotHandler { + Self { + store, + sender, + metrics, + snapshot_config, + } + } +} diff --git a/crates/sui-mvr-indexer/src/handlers/pruner.rs b/crates/sui-mvr-indexer/src/handlers/pruner.rs new file mode 100644 index 0000000000000..85b6faa12f071 --- /dev/null +++ b/crates/sui-mvr-indexer/src/handlers/pruner.rs @@ -0,0 +1,288 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use mysten_metrics::spawn_monitored_task; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::time::Duration; +use strum_macros; +use tokio_util::sync::CancellationToken; +use tracing::{error, info}; + +use crate::config::RetentionConfig; +use crate::errors::IndexerError; +use crate::store::pg_partition_manager::PgPartitionManager; +use crate::store::PgIndexerStore; +use crate::{metrics::IndexerMetrics, store::IndexerStore, types::IndexerResult}; + +pub struct Pruner { + pub store: PgIndexerStore, + pub partition_manager: PgPartitionManager, + // TODO: (wlmyng) - we can remove this when pruner logic is updated to use `retention_policies`. + pub epochs_to_keep: u64, + pub retention_policies: HashMap, + pub metrics: IndexerMetrics, +} + +/// Enum representing tables that the pruner is allowed to prune. This corresponds to table names in +/// the database, and should be used in lieu of string literals. This enum is also meant to +/// facilitate the process of determining which unit (epoch, cp, or tx) should be used for the +/// table's range. Pruner will ignore any table that is not listed here. +#[derive( + Debug, + Eq, + PartialEq, + strum_macros::Display, + strum_macros::EnumString, + strum_macros::EnumIter, + strum_macros::AsRefStr, + Hash, + Serialize, + Deserialize, + Clone, +)] +#[strum(serialize_all = "snake_case")] +#[serde(rename_all = "snake_case")] +pub enum PrunableTable { + ObjectsHistory, + Transactions, + Events, + + EventEmitPackage, + EventEmitModule, + EventSenders, + EventStructInstantiation, + EventStructModule, + EventStructName, + EventStructPackage, + + TxAffectedAddresses, + TxAffectedObjects, + TxCallsPkg, + TxCallsMod, + TxCallsFun, + TxChangedObjects, + TxDigests, + TxInputObjects, + TxKinds, + TxRecipients, + TxSenders, + + Checkpoints, + PrunerCpWatermark, +} + +impl PrunableTable { + pub fn select_reader_lo(&self, cp: u64, tx: u64) -> u64 { + match self { + PrunableTable::ObjectsHistory => cp, + PrunableTable::Transactions => tx, + PrunableTable::Events => tx, + + PrunableTable::EventEmitPackage => tx, + PrunableTable::EventEmitModule => tx, + PrunableTable::EventSenders => tx, + PrunableTable::EventStructInstantiation => tx, + PrunableTable::EventStructModule => tx, + PrunableTable::EventStructName => tx, + PrunableTable::EventStructPackage => tx, + + PrunableTable::TxAffectedAddresses => tx, + PrunableTable::TxAffectedObjects => tx, + PrunableTable::TxCallsPkg => tx, + PrunableTable::TxCallsMod => tx, + PrunableTable::TxCallsFun => tx, + PrunableTable::TxChangedObjects => tx, + PrunableTable::TxDigests => tx, + PrunableTable::TxInputObjects => tx, + PrunableTable::TxKinds => tx, + PrunableTable::TxRecipients => tx, + PrunableTable::TxSenders => tx, + + PrunableTable::Checkpoints => cp, + PrunableTable::PrunerCpWatermark => cp, + } + } +} + +impl Pruner { + /// Instantiates a pruner with default retention and overrides. Pruner will finalize the + /// retention policies so there is a value for every prunable table. + pub fn new( + store: PgIndexerStore, + retention_config: RetentionConfig, + metrics: IndexerMetrics, + ) -> Result { + let partition_manager = PgPartitionManager::new(store.pool())?; + let epochs_to_keep = retention_config.epochs_to_keep; + let retention_policies = retention_config.retention_policies(); + + Ok(Self { + store, + epochs_to_keep, + partition_manager, + retention_policies, + metrics, + }) + } + + /// Given a table name, return the number of epochs to keep for that table. Return `None` if the + /// table is not prunable. + fn table_retention(&self, table_name: &str) -> Option { + if let Ok(variant) = table_name.parse::() { + self.retention_policies.get(&variant).copied() + } else { + None + } + } + + pub async fn start(&self, cancel: CancellationToken) -> IndexerResult<()> { + let store_clone = self.store.clone(); + let retention_policies = self.retention_policies.clone(); + let cancel_clone = cancel.clone(); + spawn_monitored_task!(update_watermarks_lower_bounds_task( + store_clone, + retention_policies, + cancel_clone + )); + + let mut last_seen_max_epoch = 0; + // The first epoch that has not yet been pruned. + let mut next_prune_epoch = None; + while !cancel.is_cancelled() { + let (min_epoch, max_epoch) = self.store.get_available_epoch_range().await?; + if max_epoch == last_seen_max_epoch { + tokio::time::sleep(Duration::from_secs(5)).await; + continue; + } + last_seen_max_epoch = max_epoch; + + // Not all partitioned tables are epoch-partitioned, so we need to filter them out. + let table_partitions: HashMap<_, _> = self + .partition_manager + .get_table_partitions() + .await? + .into_iter() + .filter(|(table_name, _)| { + self.partition_manager + .get_strategy(table_name) + .is_epoch_partitioned() + }) + .collect(); + + for (table_name, (min_partition, max_partition)) in &table_partitions { + if let Some(epochs_to_keep) = self.table_retention(table_name) { + if last_seen_max_epoch != *max_partition { + error!( + "Epochs are out of sync for table {}: max_epoch={}, max_partition={}", + table_name, last_seen_max_epoch, max_partition + ); + } + + for epoch in + *min_partition..last_seen_max_epoch.saturating_sub(epochs_to_keep - 1) + { + if cancel.is_cancelled() { + info!("Pruner task cancelled."); + return Ok(()); + } + self.partition_manager + .drop_table_partition(table_name.clone(), epoch) + .await?; + info!( + "Batch dropped table partition {} epoch {}", + table_name, epoch + ); + } + } + } + + // TODO: (wlmyng) Once we have the watermarks table, we can iterate through each row + // returned from `watermarks`, look it up against `retention_policies`, and process them + // independently. This also means that pruning overrides will only apply for + // epoch-partitioned tables right now. + let prune_to_epoch = last_seen_max_epoch.saturating_sub(self.epochs_to_keep - 1); + let prune_start_epoch = next_prune_epoch.unwrap_or(min_epoch); + for epoch in prune_start_epoch..prune_to_epoch { + if cancel.is_cancelled() { + info!("Pruner task cancelled."); + return Ok(()); + } + info!("Pruning epoch {}", epoch); + if let Err(err) = self.store.prune_epoch(epoch).await { + error!("Failed to prune epoch {}: {}", epoch, err); + break; + }; + self.metrics.last_pruned_epoch.set(epoch as i64); + info!("Pruned epoch {}", epoch); + next_prune_epoch = Some(epoch + 1); + } + } + info!("Pruner task cancelled."); + Ok(()) + } +} + +/// Task to periodically query the `watermarks` table and update the lower bounds for all watermarks +/// if the entry exceeds epoch-level retention policy. +async fn update_watermarks_lower_bounds_task( + store: PgIndexerStore, + retention_policies: HashMap, + cancel: CancellationToken, +) -> IndexerResult<()> { + let mut interval = tokio::time::interval(Duration::from_secs(5)); + loop { + tokio::select! { + _ = cancel.cancelled() => { + info!("Pruner watermark lower bound update task cancelled."); + return Ok(()); + } + _ = interval.tick() => { + update_watermarks_lower_bounds(&store, &retention_policies, &cancel).await?; + } + } + } +} + +/// Fetches all entries from the `watermarks` table, and updates the `reader_lo` for each entry if +/// its epoch range exceeds the respective retention policy. +async fn update_watermarks_lower_bounds( + store: &PgIndexerStore, + retention_policies: &HashMap, + cancel: &CancellationToken, +) -> IndexerResult<()> { + let (watermarks, _) = store.get_watermarks().await?; + let mut lower_bound_updates = vec![]; + + for watermark in watermarks.iter() { + if cancel.is_cancelled() { + info!("Pruner watermark lower bound update task cancelled."); + return Ok(()); + } + + let Some(prunable_table) = watermark.entity() else { + continue; + }; + + let Some(epochs_to_keep) = retention_policies.get(&prunable_table) else { + error!( + "No retention policy found for prunable table {}", + prunable_table + ); + continue; + }; + + if let Some(new_epoch_lo) = watermark.new_epoch_lo(*epochs_to_keep) { + lower_bound_updates.push((prunable_table, new_epoch_lo)); + }; + } + + if !lower_bound_updates.is_empty() { + store + .update_watermarks_lower_bound(lower_bound_updates) + .await?; + info!("Finished updating lower bounds for watermarks"); + } + + Ok(()) +} diff --git a/crates/sui-mvr-indexer/src/handlers/tx_processor.rs b/crates/sui-mvr-indexer/src/handlers/tx_processor.rs new file mode 100644 index 0000000000000..0a8051ee8eabb --- /dev/null +++ b/crates/sui-mvr-indexer/src/handlers/tx_processor.rs @@ -0,0 +1,223 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::HashMap; + +use async_trait::async_trait; +use sui_json_rpc::get_balance_changes_from_effect; +use sui_json_rpc::get_object_changes; +use sui_json_rpc::ObjectProvider; +use sui_rest_api::CheckpointData; +use sui_types::base_types::ObjectID; +use sui_types::base_types::SequenceNumber; +use sui_types::digests::TransactionDigest; +use sui_types::effects::{TransactionEffects, TransactionEffectsAPI}; +use sui_types::object::Object; +use sui_types::transaction::{TransactionData, TransactionDataAPI}; + +use crate::errors::IndexerError; +use crate::metrics::IndexerMetrics; +use crate::types::{IndexedObjectChange, IndexerResult}; + +pub struct InMemObjectCache { + id_map: HashMap, + seq_map: HashMap<(ObjectID, SequenceNumber), Object>, +} + +impl InMemObjectCache { + pub fn new() -> Self { + Self { + id_map: HashMap::new(), + seq_map: HashMap::new(), + } + } + + pub fn insert_object(&mut self, obj: Object) { + self.id_map.insert(obj.id(), obj.clone()); + self.seq_map.insert((obj.id(), obj.version()), obj); + } + + pub fn get(&self, id: &ObjectID, version: Option<&SequenceNumber>) -> Option<&Object> { + if let Some(version) = version { + self.seq_map.get(&(*id, *version)) + } else { + self.id_map.get(id) + } + } +} + +impl Default for InMemObjectCache { + fn default() -> Self { + Self::new() + } +} + +/// Along with InMemObjectCache, TxChangesProcessor implements ObjectProvider +/// so it can be used in indexing write path to get object/balance changes. +/// Its lifetime is per checkpoint. +pub struct TxChangesProcessor { + object_cache: InMemObjectCache, + metrics: IndexerMetrics, +} + +impl TxChangesProcessor { + pub fn new(objects: &[&Object], metrics: IndexerMetrics) -> Self { + let mut object_cache = InMemObjectCache::new(); + for obj in objects { + object_cache.insert_object(<&Object>::clone(obj).clone()); + } + Self { + object_cache, + metrics, + } + } + + pub(crate) async fn get_changes( + &self, + tx: &TransactionData, + effects: &TransactionEffects, + tx_digest: &TransactionDigest, + ) -> IndexerResult<( + Vec, + Vec, + )> { + let _timer = self + .metrics + .indexing_tx_object_changes_latency + .start_timer(); + let object_change: Vec<_> = get_object_changes( + self, + effects, + tx.sender(), + effects.modified_at_versions(), + effects.all_changed_objects(), + effects.all_removed_objects(), + ) + .await? + .into_iter() + .map(IndexedObjectChange::from) + .collect(); + let balance_change = get_balance_changes_from_effect( + self, + effects, + tx.input_objects().unwrap_or_else(|e| { + panic!( + "Checkpointed tx {:?} has inavlid input objects: {e}", + tx_digest, + ) + }), + None, + ) + .await?; + Ok((balance_change, object_change)) + } +} + +#[async_trait] +impl ObjectProvider for TxChangesProcessor { + type Error = IndexerError; + + async fn get_object( + &self, + id: &ObjectID, + version: &SequenceNumber, + ) -> Result { + let object = self + .object_cache + .get(id, Some(version)) + .as_ref() + .map(|o| <&Object>::clone(o).clone()); + if let Some(o) = object { + self.metrics.indexing_get_object_in_mem_hit.inc(); + return Ok(o); + } + + panic!( + "Object {} is not found in TxChangesProcessor as an ObjectProvider (fn get_object)", + id + ); + } + + async fn find_object_lt_or_eq_version( + &self, + id: &ObjectID, + version: &SequenceNumber, + ) -> Result, Self::Error> { + // First look up the exact version in object_cache. + let object = self + .object_cache + .get(id, Some(version)) + .as_ref() + .map(|o| <&Object>::clone(o).clone()); + if let Some(o) = object { + self.metrics.indexing_get_object_in_mem_hit.inc(); + return Ok(Some(o)); + } + + // Second look up the latest version in object_cache. This may be + // called when the object is deleted hence the version at deletion + // is given. + let object = self + .object_cache + .get(id, None) + .as_ref() + .map(|o| <&Object>::clone(o).clone()); + if let Some(o) = object { + if o.version() > *version { + panic!( + "Found a higher version {} for object {}, expected lt_or_eq {}", + o.version(), + id, + *version + ); + } + if o.version() <= *version { + self.metrics.indexing_get_object_in_mem_hit.inc(); + return Ok(Some(o)); + } + } + + panic!("Object {} is not found in TxChangesProcessor as an ObjectProvider (fn find_object_lt_or_eq_version)", id); + } +} + +// This is a struct that is used to extract SuiSystemState and its dynamic children +// for end-of-epoch indexing. +pub(crate) struct EpochEndIndexingObjectStore<'a> { + objects: Vec<&'a Object>, +} + +impl<'a> EpochEndIndexingObjectStore<'a> { + pub fn new(data: &'a CheckpointData) -> Self { + Self { + objects: data.latest_live_output_objects(), + } + } +} + +impl<'a> sui_types::storage::ObjectStore for EpochEndIndexingObjectStore<'a> { + fn get_object( + &self, + object_id: &ObjectID, + ) -> Result, sui_types::storage::error::Error> { + Ok(self + .objects + .iter() + .find(|o| o.id() == *object_id) + .cloned() + .cloned()) + } + + fn get_object_by_key( + &self, + object_id: &ObjectID, + version: sui_types::base_types::VersionNumber, + ) -> Result, sui_types::storage::error::Error> { + Ok(self + .objects + .iter() + .find(|o| o.id() == *object_id && o.version() == version) + .cloned() + .cloned()) + } +} diff --git a/crates/sui-mvr-indexer/src/indexer.rs b/crates/sui-mvr-indexer/src/indexer.rs new file mode 100644 index 0000000000000..d1819a90a7416 --- /dev/null +++ b/crates/sui-mvr-indexer/src/indexer.rs @@ -0,0 +1,214 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::HashMap; +use std::env; + +use anyhow::Result; +use prometheus::Registry; +use tokio::sync::{oneshot, watch}; +use tokio_util::sync::CancellationToken; +use tracing::info; + +use async_trait::async_trait; +use futures::future::try_join_all; +use mysten_metrics::spawn_monitored_task; +use sui_data_ingestion_core::{ + DataIngestionMetrics, IndexerExecutor, ProgressStore, ReaderOptions, WorkerPool, +}; +use sui_synthetic_ingestion::IndexerProgress; +use sui_types::messages_checkpoint::CheckpointSequenceNumber; + +use crate::build_json_rpc_server; +use crate::config::{IngestionConfig, JsonRpcConfig, RetentionConfig, SnapshotLagConfig}; +use crate::database::ConnectionPool; +use crate::errors::IndexerError; +use crate::handlers::checkpoint_handler::new_handlers; +use crate::handlers::objects_snapshot_handler::start_objects_snapshot_handler; +use crate::handlers::pruner::Pruner; +use crate::indexer_reader::IndexerReader; +use crate::metrics::IndexerMetrics; +use crate::store::{IndexerStore, PgIndexerStore}; + +pub struct Indexer; + +impl Indexer { + pub async fn start_writer( + config: IngestionConfig, + store: PgIndexerStore, + metrics: IndexerMetrics, + snapshot_config: SnapshotLagConfig, + retention_config: Option, + cancel: CancellationToken, + committed_checkpoints_tx: Option>>, + ) -> Result<(), IndexerError> { + info!( + "Sui Indexer Writer (version {:?}) started...", + env!("CARGO_PKG_VERSION") + ); + info!("Sui Indexer Writer config: {config:?}",); + + let extra_reader_options = ReaderOptions { + batch_size: config.checkpoint_download_queue_size, + timeout_secs: config.checkpoint_download_timeout, + data_limit: config.checkpoint_download_queue_size_bytes, + gc_checkpoint_files: config.gc_checkpoint_files, + ..Default::default() + }; + + // Start objects snapshot processor, which is a separate pipeline with its ingestion pipeline. + let (object_snapshot_worker, object_snapshot_watermark) = start_objects_snapshot_handler( + store.clone(), + metrics.clone(), + snapshot_config, + cancel.clone(), + config.start_checkpoint, + config.end_checkpoint, + ) + .await?; + + if let Some(retention_config) = retention_config { + let pruner = Pruner::new(store.clone(), retention_config, metrics.clone())?; + let cancel_clone = cancel.clone(); + spawn_monitored_task!(pruner.start(cancel_clone)); + } + + // If we already have chain identifier indexed (i.e. the first checkpoint has been indexed), + // then we persist protocol configs for protocol versions not yet in the db. + // Otherwise, we would do the persisting in `commit_checkpoint` while the first cp is + // being indexed. + if let Some(chain_id) = IndexerStore::get_chain_identifier(&store).await? { + store + .persist_protocol_configs_and_feature_flags(chain_id) + .await?; + } + + let mut exit_senders = vec![]; + let mut executors = vec![]; + + let (worker, primary_watermark) = new_handlers( + store, + metrics, + cancel.clone(), + committed_checkpoints_tx, + config.start_checkpoint, + config.end_checkpoint, + ) + .await?; + // Ingestion task watermarks are snapshotted once on indexer startup based on the + // corresponding watermark table before being handed off to the ingestion task. + let progress_store = ShimIndexerProgressStore::new(vec![ + ("primary".to_string(), primary_watermark), + ("object_snapshot".to_string(), object_snapshot_watermark), + ]); + let mut executor = IndexerExecutor::new( + progress_store.clone(), + 2, + DataIngestionMetrics::new(&Registry::new()), + ); + + let worker_pool = WorkerPool::new( + worker, + "primary".to_string(), + config.checkpoint_download_queue_size, + ); + executor.register(worker_pool).await?; + let (exit_sender, exit_receiver) = oneshot::channel(); + executors.push((executor, exit_receiver)); + exit_senders.push(exit_sender); + + // in a non-colocated setup, start a separate indexer for processing object snapshots + if config.sources.data_ingestion_path.is_none() { + let executor = IndexerExecutor::new( + progress_store, + 1, + DataIngestionMetrics::new(&Registry::new()), + ); + let (exit_sender, exit_receiver) = oneshot::channel(); + exit_senders.push(exit_sender); + executors.push((executor, exit_receiver)); + } + + let worker_pool = WorkerPool::new( + object_snapshot_worker, + "object_snapshot".to_string(), + config.checkpoint_download_queue_size, + ); + let executor = executors.last_mut().expect("executors is not empty"); + executor.0.register(worker_pool).await?; + + // Spawn a task that links the cancellation token to the exit sender + spawn_monitored_task!(async move { + cancel.cancelled().await; + for exit_sender in exit_senders { + let _ = exit_sender.send(()); + } + }); + + info!("Starting data ingestion executor..."); + let futures = executors.into_iter().map(|(executor, exit_receiver)| { + executor.run( + config + .sources + .data_ingestion_path + .clone() + .unwrap_or(tempfile::tempdir().unwrap().into_path()), + config + .sources + .remote_store_url + .as_ref() + .map(|url| url.as_str().to_owned()), + vec![], + extra_reader_options.clone(), + exit_receiver, + ) + }); + try_join_all(futures).await?; + Ok(()) + } + + pub async fn start_reader( + config: &JsonRpcConfig, + registry: &Registry, + pool: ConnectionPool, + cancel: CancellationToken, + ) -> Result<(), IndexerError> { + info!( + "Sui Indexer Reader (version {:?}) started...", + env!("CARGO_PKG_VERSION") + ); + let indexer_reader = IndexerReader::new(pool); + let handle = build_json_rpc_server(registry, indexer_reader, config, cancel) + .await + .expect("Json rpc server should not run into errors upon start."); + tokio::spawn(async move { handle.stopped().await }) + .await + .expect("Rpc server task failed"); + + Ok(()) + } +} + +#[derive(Clone)] +struct ShimIndexerProgressStore { + watermarks: HashMap, +} + +impl ShimIndexerProgressStore { + fn new(watermarks: Vec<(String, CheckpointSequenceNumber)>) -> Self { + Self { + watermarks: watermarks.into_iter().collect(), + } + } +} + +#[async_trait] +impl ProgressStore for ShimIndexerProgressStore { + async fn load(&mut self, task_name: String) -> Result { + Ok(*self.watermarks.get(&task_name).expect("missing watermark")) + } + + async fn save(&mut self, _: String, _: CheckpointSequenceNumber) -> Result<()> { + Ok(()) + } +} diff --git a/crates/sui-mvr-indexer/src/indexer_reader.rs b/crates/sui-mvr-indexer/src/indexer_reader.rs new file mode 100644 index 0000000000000..d0eed2ee4a461 --- /dev/null +++ b/crates/sui-mvr-indexer/src/indexer_reader.rs @@ -0,0 +1,1511 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::anyhow; +use anyhow::Result; +use diesel::{ + dsl::sql, sql_types::Bool, ExpressionMethods, JoinOnDsl, NullableExpressionMethods, + OptionalExtension, QueryDsl, SelectableHelper, TextExpressionMethods, +}; +use itertools::Itertools; +use std::sync::Arc; +use sui_types::dynamic_field::visitor as DFV; +use sui_types::object::bounded_visitor::BoundedVisitor; +use tap::{Pipe, TapFallible}; +use tracing::{debug, error, warn}; + +use fastcrypto::encoding::Encoding; +use fastcrypto::encoding::Hex; +use move_core_types::annotated_value::MoveStructLayout; +use move_core_types::language_storage::{StructTag, TypeTag}; +use sui_json_rpc_types::DisplayFieldsResponse; +use sui_json_rpc_types::{Balance, Coin as SuiCoin, SuiCoinMetadata, SuiMoveValue}; +use sui_json_rpc_types::{ + CheckpointId, EpochInfo, EventFilter, SuiEvent, SuiObjectDataFilter, + SuiTransactionBlockResponse, TransactionFilter, +}; +use sui_package_resolver::Package; +use sui_package_resolver::PackageStore; +use sui_package_resolver::{PackageStoreWithLruCache, Resolver}; +use sui_types::effects::TransactionEvents; +use sui_types::{balance::Supply, coin::TreasuryCap, dynamic_field::DynamicFieldName}; +use sui_types::{ + base_types::{ObjectID, SuiAddress, VersionNumber}, + committee::EpochId, + digests::TransactionDigest, + dynamic_field::DynamicFieldInfo, + object::{Object, ObjectRead}, + sui_system_state::{sui_system_state_summary::SuiSystemStateSummary, SuiSystemStateTrait}, +}; +use sui_types::{coin::CoinMetadata, event::EventID}; + +use crate::database::ConnectionPool; +use crate::db::ConnectionPoolConfig; +use crate::models::transactions::{stored_events_to_events, StoredTransactionEvents}; +use crate::schema::pruner_cp_watermark; +use crate::schema::tx_digests; +use crate::{ + errors::IndexerError, + models::{ + checkpoints::StoredCheckpoint, + display::StoredDisplay, + epoch::StoredEpochInfo, + events::StoredEvent, + objects::{CoinBalance, StoredObject}, + transactions::{tx_events_to_sui_tx_events, StoredTransaction}, + tx_indices::TxSequenceNumber, + }, + schema::{checkpoints, display, epochs, events, objects, transactions}, + store::package_resolver::IndexerStorePackageResolver, + types::{IndexerResult, OwnerType}, +}; + +pub const TX_SEQUENCE_NUMBER_STR: &str = "tx_sequence_number"; +pub const TRANSACTION_DIGEST_STR: &str = "transaction_digest"; +pub const EVENT_SEQUENCE_NUMBER_STR: &str = "event_sequence_number"; + +#[derive(Clone)] +pub struct IndexerReader { + pool: ConnectionPool, + package_resolver: PackageResolver, +} + +pub type PackageResolver = Arc>>; + +// Impl for common initialization and utilities +impl IndexerReader { + pub fn new(pool: ConnectionPool) -> Self { + let indexer_store_pkg_resolver = IndexerStorePackageResolver::new(pool.clone()); + let package_cache = PackageStoreWithLruCache::new(indexer_store_pkg_resolver); + let package_resolver = Arc::new(Resolver::new(package_cache)); + Self { + pool, + package_resolver, + } + } + + pub async fn new_with_config>( + db_url: T, + config: ConnectionPoolConfig, + ) -> Result { + let db_url = db_url.into(); + + let pool = ConnectionPool::new(db_url.parse()?, config).await?; + + let indexer_store_pkg_resolver = IndexerStorePackageResolver::new(pool.clone()); + let package_cache = PackageStoreWithLruCache::new(indexer_store_pkg_resolver); + let package_resolver = Arc::new(Resolver::new(package_cache)); + Ok(Self { + pool, + package_resolver, + }) + } + + pub fn pool(&self) -> &ConnectionPool { + &self.pool + } +} + +// Impl for reading data from the DB +impl IndexerReader { + async fn get_object_from_db( + &self, + object_id: &ObjectID, + version: Option, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let mut query = objects::table + .filter(objects::object_id.eq(object_id.to_vec())) + .into_boxed(); + if let Some(version) = version { + query = query.filter(objects::object_version.eq(version.value() as i64)) + } + + query + .first::(&mut connection) + .await + .optional() + .map_err(Into::into) + } + + pub async fn get_object( + &self, + object_id: &ObjectID, + version: Option, + ) -> Result, IndexerError> { + let Some(stored_package) = self.get_object_from_db(object_id, version).await? else { + return Ok(None); + }; + + let object = stored_package.try_into()?; + Ok(Some(object)) + } + + pub async fn get_object_read(&self, object_id: ObjectID) -> Result { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let stored_object = objects::table + .filter(objects::object_id.eq(object_id.to_vec())) + .first::(&mut connection) + .await + .optional()?; + + if let Some(object) = stored_object { + object + .try_into_object_read(self.package_resolver.clone()) + .await + } else { + Ok(ObjectRead::NotExists(object_id)) + } + } + + pub async fn get_package(&self, package_id: ObjectID) -> Result { + let store = self.package_resolver.package_store(); + let pkg = store + .fetch(package_id.into()) + .await + .map_err(|e| { + IndexerError::PostgresReadError(format!( + "Fail to fetch package from package store with error {:?}", + e + )) + })? + .as_ref() + .clone(); + Ok(pkg) + } + + async fn get_epoch_info_from_db( + &self, + epoch: Option, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let stored_epoch = epochs::table + .into_boxed() + .pipe(|query| { + if let Some(epoch) = epoch { + query.filter(epochs::epoch.eq(epoch as i64)) + } else { + query.order_by(epochs::epoch.desc()) + } + }) + .first::(&mut connection) + .await + .optional()?; + + Ok(stored_epoch) + } + + pub async fn get_latest_epoch_info_from_db(&self) -> Result { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let stored_epoch = epochs::table + .order_by(epochs::epoch.desc()) + .first::(&mut connection) + .await?; + + Ok(stored_epoch) + } + + pub async fn get_epoch_info( + &self, + epoch: Option, + ) -> Result, IndexerError> { + let stored_epoch = self.get_epoch_info_from_db(epoch).await?; + + let stored_epoch = match stored_epoch { + Some(stored_epoch) => stored_epoch, + None => return Ok(None), + }; + + let epoch_info = EpochInfo::try_from(stored_epoch)?; + Ok(Some(epoch_info)) + } + + async fn get_epochs_from_db( + &self, + cursor: Option, + limit: usize, + descending_order: bool, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let mut query = epochs::table.into_boxed(); + + if let Some(cursor) = cursor { + if descending_order { + query = query.filter(epochs::epoch.lt(cursor as i64)); + } else { + query = query.filter(epochs::epoch.gt(cursor as i64)); + } + } + + if descending_order { + query = query.order_by(epochs::epoch.desc()); + } else { + query = query.order_by(epochs::epoch.asc()); + } + + query + .limit(limit as i64) + .load(&mut connection) + .await + .map_err(Into::into) + } + + pub async fn get_epochs( + &self, + cursor: Option, + limit: usize, + descending_order: bool, + ) -> Result, IndexerError> { + self.get_epochs_from_db(cursor, limit, descending_order) + .await? + .into_iter() + .map(EpochInfo::try_from) + .collect::, _>>() + .map_err(Into::into) + } + + pub async fn get_latest_sui_system_state(&self) -> Result { + let object_store = ConnectionAsObjectStore::from_pool(&self.pool) + .await + .map_err(|e| IndexerError::PgPoolConnectionError(e.to_string()))?; + + let system_state = tokio::task::spawn_blocking(move || { + sui_types::sui_system_state::get_sui_system_state(&object_store) + }) + .await + .unwrap()? + .into_sui_system_state_summary(); + + Ok(system_state) + } + + pub async fn get_validator_from_table( + &self, + table_id: ObjectID, + pool_id: sui_types::id::ID, + ) -> Result< + sui_types::sui_system_state::sui_system_state_summary::SuiValidatorSummary, + IndexerError, + > { + let object_store = ConnectionAsObjectStore::from_pool(&self.pool) + .await + .map_err(|e| IndexerError::PgPoolConnectionError(e.to_string()))?; + + let validator = tokio::task::spawn_blocking(move || { + sui_types::sui_system_state::get_validator_from_table(&object_store, table_id, &pool_id) + }) + .await + .unwrap()?; + Ok(validator) + } + + /// Retrieve the system state data for the given epoch. If no epoch is given, + /// it will retrieve the latest epoch's data and return the system state. + /// System state of the an epoch is written at the end of the epoch, so system state + /// of the current epoch is empty until the epoch ends. You can call + /// `get_latest_sui_system_state` for current epoch instead. + pub async fn get_epoch_sui_system_state( + &self, + epoch: Option, + ) -> Result { + let stored_epoch = self.get_epoch_info_from_db(epoch).await?; + let stored_epoch = match stored_epoch { + Some(stored_epoch) => stored_epoch, + None => return Err(IndexerError::InvalidArgumentError("Invalid epoch".into())), + }; + stored_epoch.get_json_system_state_summary() + } + + async fn get_checkpoint_from_db( + &self, + checkpoint_id: CheckpointId, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + let stored_checkpoint = checkpoints::table + .into_boxed() + .pipe(|query| match checkpoint_id { + CheckpointId::SequenceNumber(seq) => { + query.filter(checkpoints::sequence_number.eq(seq as i64)) + } + CheckpointId::Digest(digest) => { + query.filter(checkpoints::checkpoint_digest.eq(digest.into_inner().to_vec())) + } + }) + .first::(&mut connection) + .await + .optional()?; + + Ok(stored_checkpoint) + } + + async fn get_latest_checkpoint_from_db(&self) -> Result { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + checkpoints::table + .order_by(checkpoints::sequence_number.desc()) + .first::(&mut connection) + .await + .map_err(Into::into) + } + + pub async fn get_checkpoint( + &self, + checkpoint_id: CheckpointId, + ) -> Result, IndexerError> { + let stored_checkpoint = match self.get_checkpoint_from_db(checkpoint_id).await? { + Some(stored_checkpoint) => stored_checkpoint, + None => return Ok(None), + }; + + let checkpoint = sui_json_rpc_types::Checkpoint::try_from(stored_checkpoint)?; + Ok(Some(checkpoint)) + } + + pub async fn get_latest_checkpoint( + &self, + ) -> Result { + let stored_checkpoint = self.get_latest_checkpoint_from_db().await?; + + sui_json_rpc_types::Checkpoint::try_from(stored_checkpoint) + } + + async fn get_checkpoints_from_db( + &self, + cursor: Option, + limit: usize, + descending_order: bool, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let mut query = checkpoints::table.into_boxed(); + if let Some(cursor) = cursor { + if descending_order { + query = query.filter(checkpoints::sequence_number.lt(cursor as i64)); + } else { + query = query.filter(checkpoints::sequence_number.gt(cursor as i64)); + } + } + if descending_order { + query = query.order_by(checkpoints::sequence_number.desc()); + } else { + query = query.order_by(checkpoints::sequence_number.asc()); + } + + query + .limit(limit as i64) + .load::(&mut connection) + .await + .map_err(Into::into) + } + + pub async fn get_checkpoints( + &self, + cursor: Option, + limit: usize, + descending_order: bool, + ) -> Result, IndexerError> { + self.get_checkpoints_from_db(cursor, limit, descending_order) + .await? + .into_iter() + .map(sui_json_rpc_types::Checkpoint::try_from) + .collect() + } + + async fn multi_get_transactions( + &self, + digests: &[TransactionDigest], + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let digests = digests + .iter() + .map(|digest| digest.inner().to_vec()) + .collect::>(); + + transactions::table + .inner_join( + tx_digests::table + .on(transactions::tx_sequence_number.eq(tx_digests::tx_sequence_number)), + ) + .filter(tx_digests::tx_digest.eq_any(digests)) + .select(StoredTransaction::as_select()) + .load::(&mut connection) + .await + .map_err(Into::into) + } + + async fn stored_transaction_to_transaction_block( + &self, + stored_txes: Vec, + options: sui_json_rpc_types::SuiTransactionBlockResponseOptions, + ) -> IndexerResult> { + let mut tx_block_responses_futures = vec![]; + for stored_tx in stored_txes { + let package_resolver_clone = self.package_resolver(); + let options_clone = options.clone(); + tx_block_responses_futures.push(tokio::task::spawn( + stored_tx + .try_into_sui_transaction_block_response(options_clone, package_resolver_clone), + )); + } + + let tx_blocks = futures::future::join_all(tx_block_responses_futures) + .await + .into_iter() + .collect::, _>>() + .tap_err(|e| error!("Failed to join all tx block futures: {}", e))? + .into_iter() + .collect::, _>>() + .tap_err(|e| error!("Failed to collect tx block futures: {}", e))?; + Ok(tx_blocks) + } + + async fn multi_get_transactions_with_sequence_numbers( + &self, + tx_sequence_numbers: Vec, + // Some(true) for desc, Some(false) for asc, None for undefined order + is_descending: Option, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let mut query = transactions::table + .filter(transactions::tx_sequence_number.eq_any(tx_sequence_numbers)) + .into_boxed(); + match is_descending { + Some(true) => { + query = query.order(transactions::dsl::tx_sequence_number.desc()); + } + Some(false) => { + query = query.order(transactions::dsl::tx_sequence_number.asc()); + } + None => (), + } + + query + .load::(&mut connection) + .await + .map_err(Into::into) + } + + pub async fn get_owned_objects( + &self, + address: SuiAddress, + filter: Option, + cursor: Option, + limit: usize, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let mut query = objects::table + .filter(objects::owner_type.eq(OwnerType::Address as i16)) + .filter(objects::owner_id.eq(address.to_vec())) + .order(objects::object_id.asc()) + .limit(limit as i64) + .into_boxed(); + if let Some(filter) = filter { + match filter { + SuiObjectDataFilter::StructType(struct_tag) => { + let object_type = struct_tag.to_canonical_string(/* with_prefix */ true); + query = query.filter(objects::object_type.like(format!("{}%", object_type))); + } + SuiObjectDataFilter::MatchAny(filters) => { + let mut condition = "(".to_string(); + for (i, filter) in filters.iter().enumerate() { + if let SuiObjectDataFilter::StructType(struct_tag) = filter { + let object_type = + struct_tag.to_canonical_string(/* with_prefix */ true); + if i == 0 { + condition += + format!("objects.object_type LIKE '{}%'", object_type).as_str(); + } else { + condition += + format!(" OR objects.object_type LIKE '{}%'", object_type) + .as_str(); + } + } else { + return Err(IndexerError::InvalidArgumentError( + "Invalid filter type. Only struct, MatchAny and MatchNone of struct filters are supported.".into(), + )); + } + } + condition += ")"; + query = query.filter(sql::(&condition)); + } + SuiObjectDataFilter::MatchNone(filters) => { + for filter in filters { + if let SuiObjectDataFilter::StructType(struct_tag) = filter { + let object_type = + struct_tag.to_canonical_string(/* with_prefix */ true); + query = query + .filter(objects::object_type.not_like(format!("{}%", object_type))); + } else { + return Err(IndexerError::InvalidArgumentError( + "Invalid filter type. Only struct, MatchAny and MatchNone of struct filters are supported.".into(), + )); + } + } + } + _ => { + return Err(IndexerError::InvalidArgumentError( + "Invalid filter type. Only struct, MatchAny and MatchNone of struct filters are supported.".into(), + )); + } + } + } + + if let Some(object_cursor) = cursor { + query = query.filter(objects::object_id.gt(object_cursor.to_vec())); + } + + query + .load::(&mut connection) + .await + .map_err(|e| IndexerError::PostgresReadError(e.to_string())) + } + + pub async fn multi_get_objects( + &self, + object_ids: Vec, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + let object_ids = object_ids.into_iter().map(|id| id.to_vec()).collect_vec(); + + objects::table + .filter(objects::object_id.eq_any(object_ids)) + .load::(&mut connection) + .await + .map_err(Into::into) + } + + async fn query_transaction_blocks_by_checkpoint( + &self, + checkpoint_seq: u64, + options: sui_json_rpc_types::SuiTransactionBlockResponseOptions, + cursor_tx_seq: Option, + limit: usize, + is_descending: bool, + ) -> IndexerResult> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let tx_range: (i64, i64) = pruner_cp_watermark::dsl::pruner_cp_watermark + .select(( + pruner_cp_watermark::min_tx_sequence_number, + pruner_cp_watermark::max_tx_sequence_number, + )) + .filter(pruner_cp_watermark::checkpoint_sequence_number.eq(checkpoint_seq as i64)) + .first::<(i64, i64)>(&mut connection) + .await?; + + let mut query = transactions::table + .filter(transactions::tx_sequence_number.between(tx_range.0, tx_range.1)) + .into_boxed(); + + if let Some(cursor_tx_seq) = cursor_tx_seq { + if is_descending { + query = query.filter(transactions::tx_sequence_number.lt(cursor_tx_seq)); + } else { + query = query.filter(transactions::tx_sequence_number.gt(cursor_tx_seq)); + } + } + if is_descending { + query = query.order(transactions::tx_sequence_number.desc()); + } else { + query = query.order(transactions::tx_sequence_number.asc()); + } + let stored_txes = query + .limit(limit as i64) + .load::(&mut connection) + .await?; + self.stored_transaction_to_transaction_block(stored_txes, options) + .await + } + + pub async fn query_transaction_blocks( + &self, + filter: Option, + options: sui_json_rpc_types::SuiTransactionBlockResponseOptions, + cursor: Option, + limit: usize, + is_descending: bool, + ) -> IndexerResult> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let cursor_tx_seq = if let Some(cursor) = cursor { + let tx_seq = tx_digests::table + .select(tx_digests::tx_sequence_number) + .filter(tx_digests::tx_digest.eq(cursor.into_inner().to_vec())) + .first::(&mut connection) + .await?; + Some(tx_seq) + } else { + None + }; + let cursor_clause = if let Some(cursor_tx_seq) = cursor_tx_seq { + if is_descending { + format!("AND {TX_SEQUENCE_NUMBER_STR} < {}", cursor_tx_seq) + } else { + format!("AND {TX_SEQUENCE_NUMBER_STR} > {}", cursor_tx_seq) + } + } else { + "".to_string() + }; + let order_str = if is_descending { "DESC" } else { "ASC" }; + let (table_name, main_where_clause) = match filter { + // Processed above + Some(TransactionFilter::Checkpoint(seq)) => { + return self + .query_transaction_blocks_by_checkpoint( + seq, + options, + cursor_tx_seq, + limit, + is_descending, + ) + .await + } + // FIXME: sanitize module & function + Some(TransactionFilter::MoveFunction { + package, + module, + function, + }) => { + let package = Hex::encode(package.to_vec()); + match (module, function) { + (Some(module), Some(function)) => ( + "tx_calls_fun".to_owned(), + format!( + "package = '\\x{package}'::bytea AND module = '{module}' AND func = '{function}'", + ), + ), + (Some(module), None) => ( + "tx_calls_mod".to_owned(), + format!( + "package = '\\x{package}'::bytea AND module = '{module}'", + ), + ), + (None, Some(_)) => { + return Err(IndexerError::InvalidArgumentError( + "Function cannot be present without Module.".into(), + )); + } + (None, None) => ( + "tx_calls_pkg".to_owned(), + format!("package = '\\x{package}'::bytea"), + ), + } + } + Some(TransactionFilter::AffectedObject(object_id)) => { + let object_id = Hex::encode(object_id.to_vec()); + ( + "tx_affected_objects".to_owned(), + format!("affected = '\\x{object_id}'::bytea"), + ) + } + Some(TransactionFilter::FromAddress(from_address)) => { + let from_address = Hex::encode(from_address.to_vec()); + ( + "tx_affected_addresses".to_owned(), + format!("sender = '\\x{from_address}'::bytea AND affected = '\\x{from_address}'::bytea"), + ) + } + Some(TransactionFilter::FromAndToAddress { from, to }) => { + let from_address = Hex::encode(from.to_vec()); + let to_address = Hex::encode(to.to_vec()); + ( + "tx_affected_addresses".to_owned(), + format!("sender = '\\x{from_address}'::bytea AND affected = '\\x{to_address}'::bytea"), + ) + } + Some(TransactionFilter::FromOrToAddress { addr }) => { + let address = Hex::encode(addr.to_vec()); + ( + "tx_affected_addresses".to_owned(), + format!("affected = '\\x{address}'::bytea"), + ) + } + Some( + TransactionFilter::TransactionKind(_) | TransactionFilter::TransactionKindIn(_), + ) => { + return Err(IndexerError::NotSupportedError( + "TransactionKind filter is not supported.".into(), + )); + } + Some(TransactionFilter::InputObject(_) | TransactionFilter::ChangedObject(_)) => { + return Err(IndexerError::NotSupportedError( + "InputObject and OutputObject filters are not supported, please use AffectedObject instead.".into() + )) + } + Some(TransactionFilter::ToAddress(_)) => { + return Err(IndexerError::NotSupportedError( + "ToAddress filter is not supported, please use FromOrToAddress instead.".into() + )) + } + None => { + // apply no filter + ("transactions".to_owned(), "1 = 1".into()) + } + }; + + let query = format!( + "SELECT {TX_SEQUENCE_NUMBER_STR} FROM {} WHERE {} {} ORDER BY {TX_SEQUENCE_NUMBER_STR} {} LIMIT {}", + table_name, + main_where_clause, + cursor_clause, + order_str, + limit, + ); + + debug!("query transaction blocks: {}", query); + let tx_sequence_numbers = diesel::sql_query(query.clone()) + .load::(&mut connection) + .await? + .into_iter() + .map(|tsn| tsn.tx_sequence_number) + .collect::>(); + self.multi_get_transaction_block_response_by_sequence_numbers( + tx_sequence_numbers, + options, + Some(is_descending), + ) + .await + } + + async fn multi_get_transaction_block_response_in_blocking_task_impl( + &self, + digests: &[TransactionDigest], + options: sui_json_rpc_types::SuiTransactionBlockResponseOptions, + ) -> Result, IndexerError> { + let stored_txes = self.multi_get_transactions(digests).await?; + self.stored_transaction_to_transaction_block(stored_txes, options) + .await + } + + async fn multi_get_transaction_block_response_by_sequence_numbers( + &self, + tx_sequence_numbers: Vec, + options: sui_json_rpc_types::SuiTransactionBlockResponseOptions, + // Some(true) for desc, Some(false) for asc, None for undefined order + is_descending: Option, + ) -> Result, IndexerError> { + let stored_txes: Vec = self + .multi_get_transactions_with_sequence_numbers(tx_sequence_numbers, is_descending) + .await?; + self.stored_transaction_to_transaction_block(stored_txes, options) + .await + } + + pub async fn multi_get_transaction_block_response_in_blocking_task( + &self, + digests: Vec, + options: sui_json_rpc_types::SuiTransactionBlockResponseOptions, + ) -> Result, IndexerError> { + self.multi_get_transaction_block_response_in_blocking_task_impl(&digests, options) + .await + } + + pub async fn get_transaction_events( + &self, + digest: TransactionDigest, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + // Use the tx_digests lookup table for the corresponding tx_sequence_number, and then fetch + // event-relevant data from the entry on the transactions table. + let (timestamp_ms, serialized_events) = transactions::table + .filter( + transactions::tx_sequence_number + .nullable() + .eq(tx_digests::table + .select(tx_digests::tx_sequence_number) + .filter(tx_digests::tx_digest.eq(digest.into_inner().to_vec())) + .single_value()), + ) + .select((transactions::timestamp_ms, transactions::events)) + .first::<(i64, StoredTransactionEvents)>(&mut connection) + .await?; + + let events = stored_events_to_events(serialized_events)?; + let tx_events = TransactionEvents { data: events }; + + let sui_tx_events = tx_events_to_sui_tx_events( + tx_events, + self.package_resolver(), + digest, + timestamp_ms as u64, + ) + .await?; + Ok(sui_tx_events.map_or(vec![], |ste| ste.data)) + } + + async fn query_events_by_tx_digest( + &self, + tx_digest: TransactionDigest, + cursor: Option, + cursor_tx_seq: i64, + limit: usize, + descending_order: bool, + ) -> IndexerResult> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let mut query = events::table.into_boxed(); + + if let Some(cursor) = cursor { + if cursor.tx_digest != tx_digest { + return Err(IndexerError::InvalidArgumentError( + "Cursor tx_digest does not match the tx_digest in the query.".into(), + )); + } + if descending_order { + query = query.filter(events::event_sequence_number.lt(cursor.event_seq as i64)); + } else { + query = query.filter(events::event_sequence_number.gt(cursor.event_seq as i64)); + } + } else if descending_order { + query = query.filter(events::event_sequence_number.le(i64::MAX)); + } else { + query = query.filter(events::event_sequence_number.ge(0)); + }; + + if descending_order { + query = query.order(events::event_sequence_number.desc()); + } else { + query = query.order(events::event_sequence_number.asc()); + } + + // If the cursor is provided and matches tx_digest, we've already fetched the + // tx_sequence_number and can query events table directly. Otherwise, we can just consult + // the tx_digests table for the tx_sequence_number to key into events table. + if cursor.is_some() { + query = query.filter(events::tx_sequence_number.eq(cursor_tx_seq)); + } else { + query = query.filter( + events::tx_sequence_number.nullable().eq(tx_digests::table + .select(tx_digests::tx_sequence_number) + .filter(tx_digests::tx_digest.eq(tx_digest.into_inner().to_vec())) + .single_value()), + ); + } + + let stored_events = query + .limit(limit as i64) + .load::(&mut connection) + .await?; + + let mut sui_event_futures = vec![]; + for stored_event in stored_events { + sui_event_futures.push(tokio::task::spawn( + stored_event.try_into_sui_event(self.package_resolver.clone()), + )); + } + + let sui_events = futures::future::join_all(sui_event_futures) + .await + .into_iter() + .collect::, _>>() + .tap_err(|e| error!("Failed to join sui event futures: {}", e))? + .into_iter() + .collect::, _>>() + .tap_err(|e| error!("Failed to collect sui event futures: {}", e))?; + Ok(sui_events) + } + + pub async fn query_events( + &self, + filter: EventFilter, + cursor: Option, + limit: usize, + descending_order: bool, + ) -> IndexerResult> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let (tx_seq, event_seq) = if let Some(cursor) = cursor { + let EventID { + tx_digest, + event_seq, + } = cursor; + let tx_seq = transactions::table + .select(transactions::tx_sequence_number) + .filter( + transactions::tx_sequence_number + .nullable() + .eq(tx_digests::table + .select(tx_digests::tx_sequence_number) + .filter(tx_digests::tx_digest.eq(tx_digest.into_inner().to_vec())) + .single_value()), + ) + .first::(&mut connection) + .await?; + (tx_seq, event_seq as i64) + } else if descending_order { + (i64::MAX, i64::MAX) + } else { + (-1, 0) + }; + + let query = if let EventFilter::Sender(sender) = &filter { + // Need to remove ambiguities for tx_sequence_number column + let cursor_clause = if descending_order { + format!("(e.{TX_SEQUENCE_NUMBER_STR} < {} OR (e.{TX_SEQUENCE_NUMBER_STR} = {} AND e.{EVENT_SEQUENCE_NUMBER_STR} < {}))", tx_seq, tx_seq, event_seq) + } else { + format!("(e.{TX_SEQUENCE_NUMBER_STR} > {} OR (e.{TX_SEQUENCE_NUMBER_STR} = {} AND e.{EVENT_SEQUENCE_NUMBER_STR} > {}))", tx_seq, tx_seq, event_seq) + }; + let order_clause = if descending_order { + format!("e.{TX_SEQUENCE_NUMBER_STR} DESC, e.{EVENT_SEQUENCE_NUMBER_STR} DESC") + } else { + format!("e.{TX_SEQUENCE_NUMBER_STR} ASC, e.{EVENT_SEQUENCE_NUMBER_STR} ASC") + }; + format!( + "( \ + SELECT * + FROM event_senders s + JOIN events e + USING (tx_sequence_number, event_sequence_number) + WHERE s.sender = '\\x{}'::bytea AND {} \ + ORDER BY {} \ + LIMIT {} + )", + Hex::encode(sender.to_vec()), + cursor_clause, + order_clause, + limit, + ) + } else if let EventFilter::Transaction(tx_digest) = filter { + return self + .query_events_by_tx_digest(tx_digest, cursor, tx_seq, limit, descending_order) + .await; + } else { + let main_where_clause = match filter { + EventFilter::All([]) => { + // No filter + "1 = 1".to_string() + } + EventFilter::MoveModule { package, module } => { + format!( + "package = '\\x{}'::bytea AND module = '{}'", + package.to_hex(), + module, + ) + } + EventFilter::MoveEventType(struct_tag) => { + format!("event_type = '{}'", struct_tag) + } + EventFilter::MoveEventModule { package, module } => { + let package_module_prefix = format!("{}::{}", package.to_hex_literal(), module); + format!("event_type LIKE '{package_module_prefix}::%'") + } + EventFilter::Sender(_) => { + // Processed above + unreachable!() + } + EventFilter::Transaction(_) => { + // Processed above + unreachable!() + } + EventFilter::TimeRange { .. } | EventFilter::Any(_) => { + return Err(IndexerError::NotSupportedError( + "This type of EventFilter is not supported.".to_owned(), + )); + } + }; + + let cursor_clause = if descending_order { + format!("AND ({TX_SEQUENCE_NUMBER_STR} < {} OR ({TX_SEQUENCE_NUMBER_STR} = {} AND {EVENT_SEQUENCE_NUMBER_STR} < {}))", tx_seq, tx_seq, event_seq) + } else { + format!("AND ({TX_SEQUENCE_NUMBER_STR} > {} OR ({TX_SEQUENCE_NUMBER_STR} = {} AND {EVENT_SEQUENCE_NUMBER_STR} > {}))", tx_seq, tx_seq, event_seq) + }; + let order_clause = if descending_order { + format!("{TX_SEQUENCE_NUMBER_STR} DESC, {EVENT_SEQUENCE_NUMBER_STR} DESC") + } else { + format!("{TX_SEQUENCE_NUMBER_STR} ASC, {EVENT_SEQUENCE_NUMBER_STR} ASC") + }; + + format!( + " + SELECT * FROM events \ + WHERE {} {} \ + ORDER BY {} \ + LIMIT {} + ", + main_where_clause, cursor_clause, order_clause, limit, + ) + }; + debug!("query events: {}", query); + let stored_events = diesel::sql_query(query) + .load::(&mut connection) + .await?; + + let mut sui_event_futures = vec![]; + for stored_event in stored_events { + sui_event_futures.push(tokio::task::spawn( + stored_event.try_into_sui_event(self.package_resolver.clone()), + )); + } + + let sui_events = futures::future::join_all(sui_event_futures) + .await + .into_iter() + .collect::, _>>() + .tap_err(|e| error!("Failed to join sui event futures: {}", e))? + .into_iter() + .collect::, _>>() + .tap_err(|e| error!("Failed to collect sui event futures: {}", e))?; + Ok(sui_events) + } + + pub async fn get_dynamic_fields( + &self, + parent_object_id: ObjectID, + cursor: Option, + limit: usize, + ) -> Result, IndexerError> { + let stored_objects = self + .get_dynamic_fields_raw(parent_object_id, cursor, limit) + .await?; + let mut df_futures = vec![]; + let indexer_reader_arc = Arc::new(self.clone()); + for stored_object in stored_objects { + let indexer_reader_arc_clone = Arc::clone(&indexer_reader_arc); + df_futures.push(tokio::task::spawn(async move { + indexer_reader_arc_clone + .try_create_dynamic_field_info(stored_object) + .await + })); + } + let df_infos = futures::future::join_all(df_futures) + .await + .into_iter() + .collect::, _>>() + .tap_err(|e| error!("Error joining DF futures: {:?}", e))? + .into_iter() + .collect::, _>>() + .tap_err(|e| error!("Error calling try_create_dynamic_field_info: {:?}", e))? + .into_iter() + .flatten() + .collect::>(); + Ok(df_infos) + } + + pub async fn get_dynamic_fields_raw( + &self, + parent_object_id: ObjectID, + cursor: Option, + limit: usize, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let mut query = objects::table + .filter(objects::owner_type.eq(OwnerType::Object as i16)) + .filter(objects::owner_id.eq(parent_object_id.to_vec())) + .order(objects::object_id.asc()) + .limit(limit as i64) + .into_boxed(); + + if let Some(object_cursor) = cursor { + query = query.filter(objects::object_id.gt(object_cursor.to_vec())); + } + + query + .load::(&mut connection) + .await + .map_err(Into::into) + } + + async fn try_create_dynamic_field_info( + &self, + stored_object: StoredObject, + ) -> Result, IndexerError> { + if stored_object.df_kind.is_none() { + return Ok(None); + } + + let object: Object = stored_object.try_into()?; + let move_object = match object.data.try_as_move().cloned() { + Some(move_object) => move_object, + None => { + return Err(IndexerError::ResolveMoveStructError( + "Object is not a MoveObject".to_string(), + )); + } + }; + let type_tag: TypeTag = move_object.type_().clone().into(); + let layout = self + .package_resolver + .type_layout(type_tag.clone()) + .await + .map_err(|e| { + IndexerError::ResolveMoveStructError(format!( + "Failed to get type layout for type {}: {e}", + type_tag.to_canonical_display(/* with_prefix */ true), + )) + })?; + + let field = DFV::FieldVisitor::deserialize(move_object.contents(), &layout) + .tap_err(|e| warn!("{e}"))?; + + let type_ = field.kind; + let name_type: TypeTag = field.name_layout.into(); + let bcs_name = field.name_bytes.to_owned(); + + let name_value = BoundedVisitor::deserialize_value(field.name_bytes, field.name_layout) + .tap_err(|e| warn!("{e}"))?; + + let name = DynamicFieldName { + type_: name_type, + value: SuiMoveValue::from(name_value).to_json_value(), + }; + + let value_metadata = field.value_metadata().map_err(|e| { + warn!("{e}"); + IndexerError::UncategorizedError(anyhow!(e)) + })?; + + Ok(Some(match value_metadata { + DFV::ValueMetadata::DynamicField(object_type) => DynamicFieldInfo { + name, + bcs_name, + type_, + object_type: object_type.to_canonical_string(/* with_prefix */ true), + object_id: object.id(), + version: object.version(), + digest: object.digest(), + }, + + DFV::ValueMetadata::DynamicObjectField(object_id) => { + let object = self.get_object(&object_id, None).await?.ok_or_else(|| { + IndexerError::UncategorizedError(anyhow!( + "Failed to find object_id {} when trying to create dynamic field info", + object_id.to_canonical_display(/* with_prefix */ true), + )) + })?; + + let object_type = object.data.type_().unwrap().clone(); + DynamicFieldInfo { + name, + bcs_name, + type_, + object_type: object_type.to_canonical_string(/* with_prefix */ true), + object_id, + version: object.version(), + digest: object.digest(), + } + } + })) + } + + pub async fn bcs_name_from_dynamic_field_name( + &self, + name: &DynamicFieldName, + ) -> Result, IndexerError> { + let move_type_layout = self + .package_resolver() + .type_layout(name.type_.clone()) + .await + .map_err(|e| { + IndexerError::ResolveMoveStructError(format!( + "Failed to get type layout for type {}: {}", + name.type_, e + )) + })?; + let sui_json_value = sui_json::SuiJsonValue::new(name.value.clone())?; + let name_bcs_value = sui_json_value.to_bcs_bytes(&move_type_layout)?; + Ok(name_bcs_value) + } + + async fn get_display_object_by_type( + &self, + object_type: &move_core_types::language_storage::StructTag, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let object_type = object_type.to_canonical_string(/* with_prefix */ true); + let stored_display = display::table + .filter(display::object_type.eq(object_type)) + .first::(&mut connection) + .await + .optional()?; + + let stored_display = match stored_display { + Some(display) => display, + None => return Ok(None), + }; + + let display_update = stored_display.to_display_update_event()?; + + Ok(Some(display_update)) + } + + pub async fn get_owned_coins( + &self, + owner: SuiAddress, + // If coin_type is None, look for all coins. + coin_type: Option, + cursor: ObjectID, + limit: usize, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + let mut query = objects::dsl::objects + .filter(objects::dsl::owner_type.eq(OwnerType::Address as i16)) + .filter(objects::dsl::owner_id.eq(owner.to_vec())) + .filter(objects::dsl::object_id.gt(cursor.to_vec())) + .into_boxed(); + if let Some(coin_type) = coin_type { + query = query.filter(objects::dsl::coin_type.eq(Some(coin_type))); + } else { + query = query.filter(objects::dsl::coin_type.is_not_null()); + } + + query + .order((objects::dsl::coin_type.asc(), objects::dsl::object_id.asc())) + .limit(limit as i64) + .load::(&mut connection) + .await? + .into_iter() + .map(|o| o.try_into()) + .collect::>>() + } + + pub async fn get_coin_balances( + &self, + owner: SuiAddress, + // If coin_type is None, look for all coins. + coin_type: Option, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let coin_type_filter = if let Some(coin_type) = coin_type { + format!("= '{}'", coin_type) + } else { + "IS NOT NULL".to_string() + }; + // Note: important to cast to BIGINT to avoid deserialize confusion + let query = format!( + " + SELECT coin_type, \ + CAST(COUNT(*) AS BIGINT) AS coin_num, \ + CAST(SUM(coin_balance) AS BIGINT) AS coin_balance \ + FROM objects \ + WHERE owner_type = {} \ + AND owner_id = '\\x{}'::BYTEA \ + AND coin_type {} \ + GROUP BY coin_type \ + ORDER BY coin_type ASC + ", + OwnerType::Address as i16, + Hex::encode(owner.to_vec()), + coin_type_filter, + ); + + debug!("get coin balances query: {query}"); + diesel::sql_query(query) + .load::(&mut connection) + .await? + .into_iter() + .map(|cb| cb.try_into()) + .collect::>>() + } + + pub(crate) async fn get_display_fields( + &self, + original_object: &sui_types::object::Object, + original_layout: &Option, + ) -> Result { + let (object_type, layout) = if let Some((object_type, layout)) = + sui_json_rpc::read_api::get_object_type_and_struct(original_object, original_layout) + .map_err(|e| IndexerError::GenericError(e.to_string()))? + { + (object_type, layout) + } else { + return Ok(DisplayFieldsResponse { + data: None, + error: None, + }); + }; + + if let Some(display_object) = self.get_display_object_by_type(&object_type).await? { + return sui_json_rpc::read_api::get_rendered_fields(display_object.fields, &layout) + .map_err(|e| IndexerError::GenericError(e.to_string())); + } + Ok(DisplayFieldsResponse { + data: None, + error: None, + }) + } + + pub async fn get_singleton_object(&self, type_: &StructTag) -> Result> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let object = match objects::table + .filter(objects::object_type_package.eq(type_.address.to_vec())) + .filter(objects::object_type_module.eq(type_.module.to_string())) + .filter(objects::object_type_name.eq(type_.name.to_string())) + .filter(objects::object_type.eq(type_.to_canonical_string(/* with_prefix */ true))) + .first::(&mut connection) + .await + .optional()? + { + Some(object) => object, + None => return Ok(None), + } + .try_into()?; + + Ok(Some(object)) + } + + pub async fn get_coin_metadata( + &self, + coin_struct: StructTag, + ) -> Result, IndexerError> { + let coin_metadata_type = CoinMetadata::type_(coin_struct); + + self.get_singleton_object(&coin_metadata_type) + .await? + .and_then(|o| SuiCoinMetadata::try_from(o).ok()) + .pipe(Ok) + } + + pub async fn get_total_supply(&self, coin_struct: StructTag) -> Result { + let treasury_cap_type = TreasuryCap::type_(coin_struct); + + self.get_singleton_object(&treasury_cap_type) + .await? + .and_then(|o| TreasuryCap::try_from(o).ok()) + .ok_or(IndexerError::GenericError(format!( + "Cannot find treasury cap object with type {}", + treasury_cap_type + )))? + .total_supply + .pipe(Ok) + } + + pub fn package_resolver(&self) -> PackageResolver { + self.package_resolver.clone() + } +} + +// NOTE: Do not make this public and easily accessible as we need to be careful that it is only +// used in non-async contexts via the use of tokio::task::spawn_blocking in order to avoid blocking +// the async runtime. +// +// Maybe we should look into introducing an async object store trait... +struct ConnectionAsObjectStore { + inner: std::sync::Mutex< + diesel_async::async_connection_wrapper::AsyncConnectionWrapper< + crate::database::Connection<'static>, + >, + >, +} + +impl ConnectionAsObjectStore { + async fn from_pool( + pool: &ConnectionPool, + ) -> Result { + let connection = std::sync::Mutex::new(pool.dedicated_connection().await?.into()); + + Ok(Self { inner: connection }) + } + + fn get_object_from_db( + &self, + object_id: &ObjectID, + version: Option, + ) -> Result, IndexerError> { + use diesel::RunQueryDsl; + + let mut guard = self.inner.lock().unwrap(); + let connection: &mut diesel_async::async_connection_wrapper::AsyncConnectionWrapper<_> = + &mut guard; + + let mut query = objects::table + .filter(objects::object_id.eq(object_id.to_vec())) + .into_boxed(); + if let Some(version) = version { + query = query.filter(objects::object_version.eq(version.value() as i64)) + } + + query + .first::(connection) + .optional() + .map_err(Into::into) + } + + fn get_object( + &self, + object_id: &ObjectID, + version: Option, + ) -> Result, IndexerError> { + let Some(stored_package) = self.get_object_from_db(object_id, version)? else { + return Ok(None); + }; + + let object = stored_package.try_into()?; + Ok(Some(object)) + } +} + +impl sui_types::storage::ObjectStore for ConnectionAsObjectStore { + fn get_object( + &self, + object_id: &ObjectID, + ) -> Result, sui_types::storage::error::Error> { + self.get_object(object_id, None) + .map_err(sui_types::storage::error::Error::custom) + } + + fn get_object_by_key( + &self, + object_id: &ObjectID, + version: sui_types::base_types::VersionNumber, + ) -> Result, sui_types::storage::error::Error> { + self.get_object(object_id, Some(version)) + .map_err(sui_types::storage::error::Error::custom) + } +} diff --git a/crates/sui-mvr-indexer/src/lib.rs b/crates/sui-mvr-indexer/src/lib.rs new file mode 100644 index 0000000000000..f40b0fdfcfb8a --- /dev/null +++ b/crates/sui-mvr-indexer/src/lib.rs @@ -0,0 +1,97 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +#![recursion_limit = "256"] + +use std::time::Duration; + +use anyhow::Result; +use config::JsonRpcConfig; +use jsonrpsee::http_client::{HeaderMap, HeaderValue, HttpClient, HttpClientBuilder}; +use metrics::IndexerMetrics; +use mysten_metrics::spawn_monitored_task; +use prometheus::Registry; +use system_package_task::SystemPackageTask; +use tokio_util::sync::CancellationToken; +use tracing::warn; + +use sui_json_rpc::ServerType; +use sui_json_rpc::{JsonRpcServerBuilder, ServerHandle}; +use sui_json_rpc_api::CLIENT_SDK_TYPE_HEADER; + +use crate::apis::{ + CoinReadApi, ExtendedApi, GovernanceReadApi, IndexerApi, MoveUtilsApi, ReadApi, + TransactionBuilderApi, WriteApi, +}; +use crate::indexer_reader::IndexerReader; +use errors::IndexerError; + +pub mod apis; +pub mod backfill; +pub mod benchmark; +pub mod config; +pub mod database; +pub mod db; +pub mod errors; +pub mod handlers; +pub mod indexer; +pub mod indexer_reader; +pub mod metrics; +pub mod models; +pub mod restorer; +pub mod schema; +pub mod store; +pub mod system_package_task; +pub mod tempdb; +pub mod test_utils; +pub mod types; + +pub async fn build_json_rpc_server( + prometheus_registry: &Registry, + reader: IndexerReader, + config: &JsonRpcConfig, + cancel: CancellationToken, +) -> Result { + let mut builder = + JsonRpcServerBuilder::new(env!("CARGO_PKG_VERSION"), prometheus_registry, None, None); + let http_client = crate::get_http_client(&config.rpc_client_url)?; + + builder.register_module(WriteApi::new(http_client.clone()))?; + builder.register_module(IndexerApi::new( + reader.clone(), + config.name_service_options.to_config(), + ))?; + builder.register_module(TransactionBuilderApi::new(reader.clone()))?; + builder.register_module(MoveUtilsApi::new(reader.clone()))?; + builder.register_module(GovernanceReadApi::new(reader.clone()))?; + builder.register_module(ReadApi::new(reader.clone()))?; + builder.register_module(CoinReadApi::new(reader.clone()))?; + builder.register_module(ExtendedApi::new(reader.clone()))?; + + let system_package_task = + SystemPackageTask::new(reader.clone(), cancel.clone(), Duration::from_secs(10)); + + tracing::info!("Starting system package task"); + spawn_monitored_task!(async move { system_package_task.run().await }); + + Ok(builder + .start(config.rpc_address, None, ServerType::Http, Some(cancel)) + .await?) +} + +fn get_http_client(rpc_client_url: &str) -> Result { + let mut headers = HeaderMap::new(); + headers.insert(CLIENT_SDK_TYPE_HEADER, HeaderValue::from_static("indexer")); + + HttpClientBuilder::default() + .max_request_body_size(2 << 30) + .max_concurrent_requests(usize::MAX) + .set_headers(headers.clone()) + .build(rpc_client_url) + .map_err(|e| { + warn!("Failed to get new Http client with error: {:?}", e); + IndexerError::HttpClientInitError(format!( + "Failed to initialize fullnode RPC client with error: {:?}", + e + )) + }) +} diff --git a/crates/sui-mvr-indexer/src/main.rs b/crates/sui-mvr-indexer/src/main.rs new file mode 100644 index 0000000000000..703ac457398a1 --- /dev/null +++ b/crates/sui-mvr-indexer/src/main.rs @@ -0,0 +1,117 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use clap::Parser; +use sui_mvr_indexer::backfill::backfill_runner::BackfillRunner; +use sui_mvr_indexer::benchmark::run_indexer_benchmark; +use sui_mvr_indexer::config::{Command, UploadOptions}; +use sui_mvr_indexer::database::ConnectionPool; +use sui_mvr_indexer::db::setup_postgres::clear_database; +use sui_mvr_indexer::db::{ + check_db_migration_consistency, check_prunable_tables_valid, reset_database, run_migrations, +}; +use sui_mvr_indexer::indexer::Indexer; +use sui_mvr_indexer::metrics::{ + spawn_connection_pool_metric_collector, start_prometheus_server, IndexerMetrics, +}; +use sui_mvr_indexer::restorer::formal_snapshot::IndexerFormalSnapshotRestorer; +use sui_mvr_indexer::store::PgIndexerStore; +use tokio_util::sync::CancellationToken; +use tracing::warn; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let opts = sui_mvr_indexer::config::IndexerConfig::parse(); + + // NOTE: this is to print out tracing like info, warn & error. + let _guard = telemetry_subscribers::TelemetryConfig::new() + .with_env() + .init(); + warn!("WARNING: Sui indexer is still experimental and we expect occasional breaking changes that require backfills."); + + let (_registry_service, registry) = start_prometheus_server(opts.metrics_address)?; + mysten_metrics::init_metrics(®istry); + let indexer_metrics = IndexerMetrics::new(®istry); + + let pool = ConnectionPool::new( + opts.database_url.clone(), + opts.connection_pool_config.clone(), + ) + .await?; + spawn_connection_pool_metric_collector(indexer_metrics.clone(), pool.clone()); + + match opts.command { + Command::Indexer { + ingestion_config, + snapshot_config, + pruning_options, + upload_options, + } => { + // Make sure to run all migrations on startup, and also serve as a compatibility check. + run_migrations(pool.dedicated_connection().await?).await?; + let retention_config = pruning_options.load_from_file(); + if retention_config.is_some() { + check_prunable_tables_valid(&mut pool.get().await?).await?; + } + + let store = PgIndexerStore::new(pool, upload_options, indexer_metrics.clone()); + + Indexer::start_writer( + ingestion_config, + store, + indexer_metrics, + snapshot_config, + retention_config, + CancellationToken::new(), + None, + ) + .await?; + } + Command::JsonRpcService(json_rpc_config) => { + check_db_migration_consistency(&mut pool.get().await?).await?; + + Indexer::start_reader(&json_rpc_config, ®istry, pool, CancellationToken::new()) + .await?; + } + Command::ResetDatabase { + force, + skip_migrations, + } => { + if !force { + return Err(anyhow::anyhow!( + "Resetting the DB requires use of the `--force` flag", + )); + } + + if skip_migrations { + clear_database(&mut pool.dedicated_connection().await?).await?; + } else { + reset_database(pool.dedicated_connection().await?).await?; + } + } + Command::RunMigrations => { + run_migrations(pool.dedicated_connection().await?).await?; + } + Command::RunBackFill { + start, + end, + runner_kind, + backfill_config, + } => { + let total_range = start..=end; + BackfillRunner::run(runner_kind, pool, backfill_config, total_range).await; + } + Command::Restore(restore_config) => { + let store = + PgIndexerStore::new(pool, UploadOptions::default(), indexer_metrics.clone()); + let mut formal_restorer = + IndexerFormalSnapshotRestorer::new(store, restore_config).await?; + formal_restorer.restore().await?; + } + Command::Benchmark(benchmark_config) => { + run_indexer_benchmark(benchmark_config, pool, indexer_metrics).await; + } + } + + Ok(()) +} diff --git a/crates/sui-mvr-indexer/src/metrics.rs b/crates/sui-mvr-indexer/src/metrics.rs new file mode 100644 index 0000000000000..0b1f8c1e5bed5 --- /dev/null +++ b/crates/sui-mvr-indexer/src/metrics.rs @@ -0,0 +1,813 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use axum::{extract::Extension, http::StatusCode, routing::get, Router}; +use mysten_metrics::RegistryService; +use prometheus::{ + register_histogram_with_registry, register_int_counter_with_registry, + register_int_gauge_with_registry, Histogram, IntCounter, IntGauge, +}; +use prometheus::{Registry, TextEncoder}; +use std::net::SocketAddr; +use tracing::info; + +const METRICS_ROUTE: &str = "/metrics"; + +pub fn start_prometheus_server( + addr: SocketAddr, +) -> Result<(RegistryService, Registry), anyhow::Error> { + info!(address =% addr, "Starting prometheus server"); + let registry = Registry::new_custom(Some("indexer".to_string()), None)?; + let registry_service = RegistryService::new(registry.clone()); + + let app = Router::new() + .route(METRICS_ROUTE, get(metrics)) + .layer(Extension(registry_service.clone())); + + tokio::spawn(async move { + let listener = tokio::net::TcpListener::bind(&addr).await.unwrap(); + axum::serve(listener, app).await.unwrap(); + }); + Ok((registry_service, registry)) +} + +async fn metrics(Extension(registry_service): Extension) -> (StatusCode, String) { + let metrics_families = registry_service.gather_all(); + match TextEncoder.encode_to_string(&metrics_families) { + Ok(metrics) => (StatusCode::OK, metrics), + Err(error) => ( + StatusCode::INTERNAL_SERVER_ERROR, + format!("unable to encode metrics: {error}"), + ), + } +} + +/// NOTE: for various data ingestion steps, which are expected to be within [0.001, 100] seconds, +/// and high double digits usually means something is broken. +const DATA_INGESTION_LATENCY_SEC_BUCKETS: &[f64] = &[ + 0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, +]; +/// NOTE: for objects_snapshot update and advance_epoch, which are expected to be within [0.1, 100] seconds, +/// and can go up to high hundreds of seconds when things go wrong. +const DB_UPDATE_QUERY_LATENCY_SEC_BUCKETS: &[f64] = &[ + 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1000.0, 2000.0, 5000.0, + 10000.0, +]; +/// NOTE: for json_rpc calls, which are expected to be within [0.01, 100] seconds, +/// high hundreds of seconds usually means something is broken. +const JSON_RPC_LATENCY_SEC_BUCKETS: &[f64] = &[ + 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1000.0, +]; + +#[derive(Clone)] +pub struct IndexerMetrics { + pub total_checkpoint_received: IntCounter, + pub total_tx_checkpoint_committed: IntCounter, + pub total_object_checkpoint_committed: IntCounter, + pub total_transaction_committed: IntCounter, + pub total_object_change_committed: IntCounter, + pub total_transaction_chunk_committed: IntCounter, + pub total_object_change_chunk_committed: IntCounter, + pub total_epoch_committed: IntCounter, + pub latest_fullnode_checkpoint_sequence_number: IntGauge, + pub latest_tx_checkpoint_sequence_number: IntGauge, + pub latest_indexer_object_checkpoint_sequence_number: IntGauge, + pub latest_object_snapshot_sequence_number: IntGauge, + // max checkpoint sequence numbers on various stages of indexer data ingestion + pub max_downloaded_checkpoint_sequence_number: IntGauge, + pub max_indexed_checkpoint_sequence_number: IntGauge, + pub max_committed_checkpoint_sequence_number: IntGauge, + // the related timestamps of max checkpoint ^ on various stages + pub downloaded_checkpoint_timestamp_ms: IntGauge, + pub indexed_checkpoint_timestamp_ms: IntGauge, + pub committed_checkpoint_timestamp_ms: IntGauge, + // lag starting from the timestamp of the latest checkpoint to the current time + pub download_lag_ms: IntGauge, + pub index_lag_ms: IntGauge, + pub db_commit_lag_ms: IntGauge, + // latencies of various steps of data ingestion. + // checkpoint E2E latency is: fullnode_download_latency + checkpoint_index_latency + db_commit_latency + pub checkpoint_download_bytes_size: IntGauge, + pub tokio_blocking_task_wait_latency: Histogram, + pub fullnode_checkpoint_data_download_latency: Histogram, + pub fullnode_checkpoint_wait_and_download_latency: Histogram, + pub fullnode_transaction_download_latency: Histogram, + pub fullnode_object_download_latency: Histogram, + pub checkpoint_index_latency: Histogram, + pub indexing_batch_size: IntGauge, + pub indexing_tx_object_changes_latency: Histogram, + pub indexing_objects_latency: Histogram, + pub indexing_get_object_in_mem_hit: IntCounter, + pub indexing_get_object_db_hit: IntCounter, + pub indexing_module_resolver_in_mem_hit: IntCounter, + pub indexing_package_resolver_in_mem_hit: IntCounter, + pub indexing_packages_latency: Histogram, + pub checkpoint_objects_index_latency: Histogram, + pub checkpoint_db_commit_latency: Histogram, + pub checkpoint_db_commit_latency_step_1: Histogram, + pub checkpoint_db_commit_latency_transactions: Histogram, + pub checkpoint_db_commit_latency_transactions_chunks: Histogram, + pub checkpoint_db_commit_latency_transactions_chunks_transformation: Histogram, + pub checkpoint_db_commit_latency_objects: Histogram, + pub checkpoint_db_commit_latency_objects_snapshot: Histogram, + pub checkpoint_db_commit_latency_objects_version: Histogram, + pub checkpoint_db_commit_latency_objects_history: Histogram, + pub checkpoint_db_commit_latency_full_objects_history: Histogram, + pub checkpoint_db_commit_latency_objects_chunks: Histogram, + pub checkpoint_db_commit_latency_objects_snapshot_chunks: Histogram, + pub checkpoint_db_commit_latency_objects_version_chunks: Histogram, + pub checkpoint_db_commit_latency_objects_history_chunks: Histogram, + pub checkpoint_db_commit_latency_full_objects_history_chunks: Histogram, + pub checkpoint_db_commit_latency_events: Histogram, + pub checkpoint_db_commit_latency_events_chunks: Histogram, + pub checkpoint_db_commit_latency_event_indices: Histogram, + pub checkpoint_db_commit_latency_event_indices_chunks: Histogram, + pub checkpoint_db_commit_latency_packages: Histogram, + pub checkpoint_db_commit_latency_tx_indices: Histogram, + pub checkpoint_db_commit_latency_tx_indices_chunks: Histogram, + pub checkpoint_db_commit_latency_checkpoints: Histogram, + pub checkpoint_db_commit_latency_epoch: Histogram, + pub checkpoint_db_commit_latency_watermarks: Histogram, + pub thousand_transaction_avg_db_commit_latency: Histogram, + pub object_db_commit_latency: Histogram, + pub object_mutation_db_commit_latency: Histogram, + pub object_deletion_db_commit_latency: Histogram, + pub epoch_db_commit_latency: Histogram, + // latencies of slow DB update queries, now only advance epoch and objects_snapshot update + pub advance_epoch_latency: Histogram, + // latencies of RPC endpoints in read.rs + pub get_transaction_block_latency: Histogram, + pub multi_get_transaction_blocks_latency: Histogram, + pub get_object_latency: Histogram, + pub multi_get_objects_latency: Histogram, + pub try_get_past_object_latency: Histogram, + pub try_multi_get_past_objects_latency: Histogram, + pub get_checkpoint_latency: Histogram, + pub get_checkpoints_latency: Histogram, + pub get_events_latency: Histogram, + pub get_loaded_child_objects_latency: Histogram, + pub get_total_transaction_blocks_latency: Histogram, + pub get_latest_checkpoint_sequence_number_latency: Histogram, + // latencies of RPC endpoints in indexer.rs + pub get_owned_objects_latency: Histogram, + pub query_transaction_blocks_latency: Histogram, + pub query_events_latency: Histogram, + pub get_dynamic_fields_latency: Histogram, + pub get_dynamic_field_object_latency: Histogram, + pub get_protocol_config_latency: Histogram, + // latency of event websocket subscription + pub subscription_process_latency: Histogram, + pub transaction_per_checkpoint: Histogram, + // indexer state metrics + pub db_conn_pool_size: IntGauge, + pub idle_db_conn: IntGauge, + pub address_processor_failure: IntCounter, + pub checkpoint_metrics_processor_failure: IntCounter, + // pruner metrics + pub last_pruned_epoch: IntGauge, + pub last_pruned_checkpoint: IntGauge, + pub last_pruned_transaction: IntGauge, + pub epoch_pruning_latency: Histogram, +} + +impl IndexerMetrics { + pub fn new(registry: &Registry) -> Self { + Self { + total_checkpoint_received: register_int_counter_with_registry!( + "total_checkpoint_received", + "Total number of checkpoint received", + registry, + ) + .unwrap(), + total_tx_checkpoint_committed: register_int_counter_with_registry!( + "total_checkpoint_committed", + "Total number of checkpoint committed", + registry, + ) + .unwrap(), + total_object_checkpoint_committed: register_int_counter_with_registry!( + "total_object_checkpoint_committed", + "Total number of object checkpoint committed", + registry, + ) + .unwrap(), + total_transaction_committed: register_int_counter_with_registry!( + "total_transaction_committed", + "Total number of transaction committed", + registry, + ) + .unwrap(), + total_object_change_committed: register_int_counter_with_registry!( + "total_object_change_committed", + "Total number of object change committed", + registry, + ) + .unwrap(), + total_transaction_chunk_committed: register_int_counter_with_registry!( + "total_transaction_chunk_committed", + "Total number of transaction chunk committed", + registry, + ) + .unwrap(), + total_object_change_chunk_committed: register_int_counter_with_registry!( + "total_object_change_chunk_committed", + "Total number of object change chunk committed", + registry, + ) + .unwrap(), + total_epoch_committed: register_int_counter_with_registry!( + "total_epoch_committed", + "Total number of epoch committed", + registry, + ) + .unwrap(), + latest_fullnode_checkpoint_sequence_number: register_int_gauge_with_registry!( + "latest_fullnode_checkpoint_sequence_number", + "Latest checkpoint sequence number from the Full Node", + registry, + ) + .unwrap(), + latest_tx_checkpoint_sequence_number: register_int_gauge_with_registry!( + "latest_indexer_checkpoint_sequence_number", + "Latest checkpoint sequence number from the Indexer", + registry, + ) + .unwrap(), + latest_indexer_object_checkpoint_sequence_number: register_int_gauge_with_registry!( + "latest_indexer_object_checkpoint_sequence_number", + "Latest object checkpoint sequence number from the Indexer", + registry, + ) + .unwrap(), + latest_object_snapshot_sequence_number: register_int_gauge_with_registry!( + "latest_object_snapshot_sequence_number", + "Latest object snapshot sequence number from the Indexer", + registry, + ).unwrap(), + max_downloaded_checkpoint_sequence_number: register_int_gauge_with_registry!( + "max_downloaded_checkpoint_sequence_number", + "Max downloaded checkpoint sequence number", + registry, + ).unwrap(), + max_indexed_checkpoint_sequence_number: register_int_gauge_with_registry!( + "max_indexed_checkpoint_sequence_number", + "Max indexed checkpoint sequence number", + registry, + ).unwrap(), + max_committed_checkpoint_sequence_number: register_int_gauge_with_registry!( + "max_committed_checkpoint_sequence_number", + "Max committed checkpoint sequence number", + registry, + ).unwrap(), + downloaded_checkpoint_timestamp_ms: register_int_gauge_with_registry!( + "downloaded_checkpoint_timestamp_ms", + "Timestamp of the downloaded checkpoint", + registry, + ).unwrap(), + indexed_checkpoint_timestamp_ms: register_int_gauge_with_registry!( + "indexed_checkpoint_timestamp_ms", + "Timestamp of the indexed checkpoint", + registry, + ).unwrap(), + committed_checkpoint_timestamp_ms: register_int_gauge_with_registry!( + "committed_checkpoint_timestamp_ms", + "Timestamp of the committed checkpoint", + registry, + ).unwrap(), + download_lag_ms: register_int_gauge_with_registry!( + "download_lag_ms", + "Lag of the latest checkpoint in milliseconds", + registry, + ).unwrap(), + index_lag_ms: register_int_gauge_with_registry!( + "index_lag_ms", + "Lag of the latest checkpoint in milliseconds", + registry, + ).unwrap(), + db_commit_lag_ms: register_int_gauge_with_registry!( + "db_commit_lag_ms", + "Lag of the latest checkpoint in milliseconds", + registry, + ).unwrap(), + checkpoint_download_bytes_size: register_int_gauge_with_registry!( + "checkpoint_download_bytes_size", + "Size of the downloaded checkpoint in bytes", + registry, + ).unwrap(), + fullnode_checkpoint_data_download_latency: register_histogram_with_registry!( + "fullnode_checkpoint_data_download_latency", + "Time spent in downloading checkpoint and transaction for a new checkpoint from the Full Node", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + fullnode_checkpoint_wait_and_download_latency: register_histogram_with_registry!( + "fullnode_checkpoint_wait_and_download_latency", + "Time spent in waiting for a new checkpoint from the Full Node", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + + fullnode_transaction_download_latency: register_histogram_with_registry!( + "fullnode_transaction_download_latency", + "Time spent in waiting for a new transaction from the Full Node", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + fullnode_object_download_latency: register_histogram_with_registry!( + "fullnode_object_download_latency", + "Time spent in waiting for a new epoch from the Full Node", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_index_latency: register_histogram_with_registry!( + "checkpoint_index_latency", + "Time spent in indexing a checkpoint", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + indexing_batch_size: register_int_gauge_with_registry!( + "indexing_batch_size", + "Size of the indexing batch", + registry, + ).unwrap(), + indexing_tx_object_changes_latency: register_histogram_with_registry!( + "indexing_tx_object_changes_latency", + "Time spent in indexing object changes for a transaction", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + indexing_objects_latency: register_histogram_with_registry!( + "indexing_objects_latency", + "Time spent in indexing objects", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + indexing_packages_latency: register_histogram_with_registry!( + "indexing_packages_latency", + "Time spent in indexing packages", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + indexing_get_object_in_mem_hit: register_int_counter_with_registry!( + "indexing_get_object_in_mem_hit", + "Total number get object hit in mem", + registry, + ) + .unwrap(), + indexing_get_object_db_hit: register_int_counter_with_registry!( + "indexing_get_object_db_hit", + "Total number get object hit in db", + registry, + ) + .unwrap(), + indexing_module_resolver_in_mem_hit: register_int_counter_with_registry!( + "indexing_module_resolver_in_mem_hit", + "Total number module resolver hit in mem", + registry, + ) + .unwrap(), + indexing_package_resolver_in_mem_hit: register_int_counter_with_registry!( + "indexing_package_resolver_in_mem_hit", + "Total number package resolver hit in mem", + registry, + ) + .unwrap(), + checkpoint_objects_index_latency: register_histogram_with_registry!( + "checkpoint_object_index_latency", + "Time spent in indexing a checkpoint objects", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency: register_histogram_with_registry!( + "checkpoint_db_commit_latency", + "Time spent committing a checkpoint to the db", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + + checkpoint_db_commit_latency_step_1: register_histogram_with_registry!( + "checkpoint_db_commit_latency_step_1", + "Time spent committing a checkpoint to the db, step 1", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_transactions: register_histogram_with_registry!( + "checkpoint_db_commit_latency_transactions", + "Time spent committing transactions", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_transactions_chunks: register_histogram_with_registry!( + "checkpoint_db_commit_latency_transactions_chunks", + "Time spent committing transactions chunks", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_transactions_chunks_transformation: register_histogram_with_registry!( + "checkpoint_db_commit_latency_transactions_transaformation", + "Time spent in transactions chunks transformation prior to commit", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_objects: register_histogram_with_registry!( + "checkpoint_db_commit_latency_objects", + "Time spent committing objects", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_objects_snapshot: register_histogram_with_registry!( + "checkpoint_db_commit_latency_objects_snapshot", + "Time spent committing objects snapshots", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_objects_version: register_histogram_with_registry!( + "checkpoint_db_commit_latency_objects_version", + "Time spent committing objects version", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ).unwrap(), + checkpoint_db_commit_latency_objects_history: register_histogram_with_registry!( + "checkpoint_db_commit_latency_objects_history", + "Time spent committing objects history", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ).unwrap(), + checkpoint_db_commit_latency_full_objects_history: register_histogram_with_registry!( + "checkpoint_db_commit_latency_full_objects_history", + "Time spent committing full objects history", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ).unwrap(), + checkpoint_db_commit_latency_objects_chunks: register_histogram_with_registry!( + "checkpoint_db_commit_latency_objects_chunks", + "Time spent committing objects chunks", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_objects_snapshot_chunks: register_histogram_with_registry!( + "checkpoint_db_commit_latency_objects_snapshot_chunks", + "Time spent committing objects snapshot chunks", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_objects_version_chunks: register_histogram_with_registry!( + "checkpoint_db_commit_latency_objects_version_chunks", + "Time spent committing objects version chunks", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ).unwrap(), + checkpoint_db_commit_latency_objects_history_chunks: register_histogram_with_registry!( + "checkpoint_db_commit_latency_objects_history_chunks", + "Time spent committing objects history chunks", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ).unwrap(), + checkpoint_db_commit_latency_full_objects_history_chunks: register_histogram_with_registry!( + "checkpoint_db_commit_latency_full_objects_history_chunks", + "Time spent committing full objects history chunks", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_events: register_histogram_with_registry!( + "checkpoint_db_commit_latency_events", + "Time spent committing events", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_events_chunks: register_histogram_with_registry!( + "checkpoint_db_commit_latency_events_chunks", + "Time spent committing events chunks", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_event_indices: register_histogram_with_registry!( + "checkpoint_db_commit_latency_event_indices", + "Time spent committing event indices", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_event_indices_chunks: register_histogram_with_registry!( + "checkpoint_db_commit_latency_event_indices_chunks", + "Time spent committing event indices chunks", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_packages: register_histogram_with_registry!( + "checkpoint_db_commit_latency_packages", + "Time spent committing packages", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_tx_indices: register_histogram_with_registry!( + "checkpoint_db_commit_latency_tx_indices", + "Time spent committing tx indices", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_tx_indices_chunks: register_histogram_with_registry!( + "checkpoint_db_commit_latency_tx_indices_chunks", + "Time spent committing tx_indices chunks", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_checkpoints: register_histogram_with_registry!( + "checkpoint_db_commit_latency_checkpoints", + "Time spent committing checkpoints", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_epoch: register_histogram_with_registry!( + "checkpoint_db_commit_latency_epochs", + "Time spent committing epochs", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + checkpoint_db_commit_latency_watermarks: register_histogram_with_registry!( + "checkpoint_db_commit_latency_watermarks", + "Time spent committing watermarks", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + tokio_blocking_task_wait_latency: register_histogram_with_registry!( + "tokio_blocking_task_wait_latency", + "Time spent to wait for tokio blocking task pool", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ).unwrap(), + thousand_transaction_avg_db_commit_latency: register_histogram_with_registry!( + "transaction_db_commit_latency", + "Average time spent committing 1000 transactions to the db", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + object_db_commit_latency: register_histogram_with_registry!( + "object_db_commit_latency", + "Time spent committing a object to the db", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + object_mutation_db_commit_latency: register_histogram_with_registry!( + "object_mutation_db_commit_latency", + "Time spent committing a object mutation to the db", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + object_deletion_db_commit_latency: register_histogram_with_registry!( + "object_deletion_db_commit_latency", + "Time spent committing a object deletion to the db", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + epoch_db_commit_latency: register_histogram_with_registry!( + "epoch_db_commit_latency", + "Time spent committing a epoch to the db", + DATA_INGESTION_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + advance_epoch_latency: register_histogram_with_registry!( + "advance_epoch_latency", + "Time spent in advancing epoch", + DB_UPDATE_QUERY_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ).unwrap(), + subscription_process_latency: register_histogram_with_registry!( + "subscription_process_latency", + "Time spent in process Websocket subscription", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry, + ) + .unwrap(), + transaction_per_checkpoint: register_histogram_with_registry!( + "transaction_per_checkpoint", + "Number of transactions per checkpoint", + vec![1.0, 2.0, 5.0, 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1000.0, 2000.0, 5000.0], + registry, + ) + .unwrap(), + get_transaction_block_latency: register_histogram_with_registry!( + "get_transaction_block_latency", + "Time spent in get_transaction_block on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + multi_get_transaction_blocks_latency: register_histogram_with_registry!( + "multi_get_transaction_blocks_latency", + "Time spent in multi_get_transaction_blocks on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + get_object_latency: register_histogram_with_registry!( + "get_object_latency", + "Time spent in get_object on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + multi_get_objects_latency: register_histogram_with_registry!( + "multi_get_objects_latency", + "Time spent in multi_get_objects on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + try_get_past_object_latency: register_histogram_with_registry!( + "try_get_past_object_latency", + "Time spent in try_get_past_object on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + try_multi_get_past_objects_latency: register_histogram_with_registry!( + "try_multi_get_past_objects_latency", + "Time spent in try_multi_get_past_objects on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + get_checkpoint_latency: register_histogram_with_registry!( + "get_checkpoint_latency", + "Time spent in get_checkpoint on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + get_checkpoints_latency: register_histogram_with_registry!( + "get_checkpoints_latency", + "Time spent in get_checkpoints on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + get_events_latency: register_histogram_with_registry!( + "get_events_latency", + "Time spent in get_events on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + get_total_transaction_blocks_latency: register_histogram_with_registry!( + "get_total_transaction_blocks_latency", + "Time spent in get_total_transaction_blocks on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + get_latest_checkpoint_sequence_number_latency: register_histogram_with_registry!( + "get_latest_checkpoint_sequence_number_latency", + "Time spent in get_latest_checkpoint_sequence_number on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + get_owned_objects_latency: register_histogram_with_registry!( + "get_owned_objects_latency", + "Time spent in get_owned_objects on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + query_transaction_blocks_latency: register_histogram_with_registry!( + "query_transaction_blocks_latency", + "Time spent in query_transaction_blocks on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + query_events_latency: register_histogram_with_registry!( + "query_events_latency", + "Time spent in query_events on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + get_dynamic_fields_latency: register_histogram_with_registry!( + "get_dynamic_fields_latency", + "Time spent in get_dynamic_fields on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + get_dynamic_field_object_latency: register_histogram_with_registry!( + "get_dynamic_field_object_latency", + "Time spent in get_dynamic_field_object on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + get_loaded_child_objects_latency: register_histogram_with_registry!( + "get_loaded_child_objects_latency", + "Time spent in get_loaded_child_objects_latency on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + get_protocol_config_latency: register_histogram_with_registry!( + "get_protocol_config_latency", + "Time spent in get_protocol_config_latency on the fullnode behind.", + JSON_RPC_LATENCY_SEC_BUCKETS.to_vec(), + registry + ) + .unwrap(), + db_conn_pool_size: register_int_gauge_with_registry!( + "db_conn_pool_size", + "Size of the database connection pool", + registry + ).unwrap(), + idle_db_conn: register_int_gauge_with_registry!( + "idle_db_conn", + "Number of idle database connections", + registry + ).unwrap(), + address_processor_failure: register_int_counter_with_registry!( + "address_processor_failure", + "Total number of address processor failure", + registry, + ) + .unwrap(), + checkpoint_metrics_processor_failure: register_int_counter_with_registry!( + "checkpoint_metrics_processor_failure", + "Total number of checkpoint metrics processor failure", + registry, + ) + .unwrap(), + last_pruned_epoch: register_int_gauge_with_registry!( + "last_pruned_epoch", + "Last pruned epoch number", + registry, + ) + .unwrap(), + last_pruned_checkpoint: register_int_gauge_with_registry!( + "last_pruned_checkpoint", + "Last pruned checkpoint sequence number", + registry, + ) + .unwrap(), + last_pruned_transaction: register_int_gauge_with_registry!( + "last_pruned_transaction", + "Last pruned transaction sequence number", + registry, + ).unwrap(), + epoch_pruning_latency: register_histogram_with_registry!( + "epoch_pruning_latency", + "Time spent in pruning one epoch", + DB_UPDATE_QUERY_LATENCY_SEC_BUCKETS.to_vec(), + registry + ).unwrap(), + } + } +} + +pub fn spawn_connection_pool_metric_collector( + metrics: IndexerMetrics, + connection_pool: crate::database::ConnectionPool, +) { + tokio::spawn(async move { + loop { + let cp_state = connection_pool.state(); + tracing::debug!( + connection_pool_size =% cp_state.connections, + idle_connections =% cp_state.idle_connections, + ); + metrics.db_conn_pool_size.set(cp_state.connections as i64); + metrics.idle_db_conn.set(cp_state.idle_connections as i64); + tokio::time::sleep(tokio::time::Duration::from_secs(60)).await; + } + }); +} diff --git a/crates/sui-mvr-indexer/src/models/checkpoints.rs b/crates/sui-mvr-indexer/src/models/checkpoints.rs new file mode 100644 index 0000000000000..d18c1a1a7ce9e --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/checkpoints.rs @@ -0,0 +1,186 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use diesel::prelude::*; + +use sui_json_rpc_types::Checkpoint as RpcCheckpoint; +use sui_types::base_types::TransactionDigest; +use sui_types::digests::CheckpointDigest; +use sui_types::gas::GasCostSummary; + +use crate::errors::IndexerError; +use crate::schema::{chain_identifier, checkpoints, pruner_cp_watermark}; +use crate::types::IndexedCheckpoint; + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = chain_identifier)] +pub struct StoredChainIdentifier { + pub checkpoint_digest: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = checkpoints)] +pub struct StoredCheckpoint { + pub sequence_number: i64, + pub checkpoint_digest: Vec, + pub epoch: i64, + pub network_total_transactions: i64, + pub previous_checkpoint_digest: Option>, + pub end_of_epoch: bool, + pub tx_digests: Vec>>, + pub timestamp_ms: i64, + pub total_gas_cost: i64, + pub computation_cost: i64, + pub storage_cost: i64, + pub storage_rebate: i64, + pub non_refundable_storage_fee: i64, + pub checkpoint_commitments: Vec, + pub validator_signature: Vec, + pub end_of_epoch_data: Option>, + pub min_tx_sequence_number: Option, + pub max_tx_sequence_number: Option, +} + +impl From<&IndexedCheckpoint> for StoredCheckpoint { + fn from(c: &IndexedCheckpoint) -> Self { + Self { + sequence_number: c.sequence_number as i64, + checkpoint_digest: c.checkpoint_digest.into_inner().to_vec(), + epoch: c.epoch as i64, + tx_digests: c + .tx_digests + .iter() + .map(|tx| Some(tx.into_inner().to_vec())) + .collect(), + network_total_transactions: c.network_total_transactions as i64, + previous_checkpoint_digest: c + .previous_checkpoint_digest + .as_ref() + .map(|d| (*d).into_inner().to_vec()), + timestamp_ms: c.timestamp_ms as i64, + total_gas_cost: c.total_gas_cost, + computation_cost: c.computation_cost as i64, + storage_cost: c.storage_cost as i64, + storage_rebate: c.storage_rebate as i64, + non_refundable_storage_fee: c.non_refundable_storage_fee as i64, + checkpoint_commitments: bcs::to_bytes(&c.checkpoint_commitments).unwrap(), + validator_signature: bcs::to_bytes(&c.validator_signature).unwrap(), + end_of_epoch_data: c + .end_of_epoch_data + .as_ref() + .map(|d| bcs::to_bytes(d).unwrap()), + end_of_epoch: c.end_of_epoch_data.is_some(), + min_tx_sequence_number: Some(c.min_tx_sequence_number as i64), + max_tx_sequence_number: Some(c.max_tx_sequence_number as i64), + } + } +} + +impl TryFrom for RpcCheckpoint { + type Error = IndexerError; + fn try_from(checkpoint: StoredCheckpoint) -> Result { + let parsed_digest = CheckpointDigest::try_from(checkpoint.checkpoint_digest.clone()) + .map_err(|e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Failed to decode checkpoint digest: {:?} with err: {:?}", + checkpoint.checkpoint_digest, e + )) + })?; + + let parsed_previous_digest: Option = checkpoint + .previous_checkpoint_digest + .map(|digest| { + CheckpointDigest::try_from(digest.clone()).map_err(|e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Failed to decode previous checkpoint digest: {:?} with err: {:?}", + digest, e + )) + }) + }) + .transpose()?; + + let transactions: Vec = { + checkpoint + .tx_digests + .into_iter() + .map(|tx_digest| match tx_digest { + None => Err(IndexerError::PersistentStorageDataCorruptionError( + "tx_digests should not contain null elements".to_string(), + )), + Some(tx_digest) => { + TransactionDigest::try_from(tx_digest.as_slice()).map_err(|e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Failed to decode transaction digest: {:?} with err: {:?}", + tx_digest, e + )) + }) + } + }) + .collect::, IndexerError>>()? + }; + let validator_signature = + bcs::from_bytes(&checkpoint.validator_signature).map_err(|e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Failed to decode validator signature: {:?} with err: {:?}", + checkpoint.validator_signature, e + )) + })?; + + let checkpoint_commitments = + bcs::from_bytes(&checkpoint.checkpoint_commitments).map_err(|e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Failed to decode checkpoint commitments: {:?} with err: {:?}", + checkpoint.checkpoint_commitments, e + )) + })?; + + let end_of_epoch_data = checkpoint + .end_of_epoch_data + .map(|data| { + bcs::from_bytes(&data).map_err(|e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Failed to decode end of epoch data: {:?} with err: {:?}", + data, e + )) + }) + }) + .transpose()?; + + Ok(RpcCheckpoint { + epoch: checkpoint.epoch as u64, + sequence_number: checkpoint.sequence_number as u64, + digest: parsed_digest, + previous_digest: parsed_previous_digest, + end_of_epoch_data, + epoch_rolling_gas_cost_summary: GasCostSummary { + computation_cost: checkpoint.computation_cost as u64, + storage_cost: checkpoint.storage_cost as u64, + storage_rebate: checkpoint.storage_rebate as u64, + non_refundable_storage_fee: checkpoint.non_refundable_storage_fee as u64, + }, + network_total_transactions: checkpoint.network_total_transactions as u64, + timestamp_ms: checkpoint.timestamp_ms as u64, + transactions, + validator_signature, + checkpoint_commitments, + }) + } +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = pruner_cp_watermark)] +pub struct StoredCpTx { + pub checkpoint_sequence_number: i64, + pub min_tx_sequence_number: i64, + pub max_tx_sequence_number: i64, +} + +impl From<&IndexedCheckpoint> for StoredCpTx { + fn from(c: &IndexedCheckpoint) -> Self { + Self { + checkpoint_sequence_number: c.sequence_number as i64, + min_tx_sequence_number: c.min_tx_sequence_number as i64, + max_tx_sequence_number: c.max_tx_sequence_number as i64, + } + } +} diff --git a/crates/sui-mvr-indexer/src/models/display.rs b/crates/sui-mvr-indexer/src/models/display.rs new file mode 100644 index 0000000000000..33a1c7c7cb0cb --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/display.rs @@ -0,0 +1,35 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use diesel::prelude::*; +use serde::Deserialize; + +use sui_types::display::DisplayVersionUpdatedEvent; + +use crate::schema::display; + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Deserialize)] +#[diesel(table_name = display)] +pub struct StoredDisplay { + pub object_type: String, + pub id: Vec, + pub version: i16, + pub bcs: Vec, +} + +impl StoredDisplay { + pub fn try_from_event(event: &sui_types::event::Event) -> Option { + let (ty, display_event) = DisplayVersionUpdatedEvent::try_from_event(event)?; + + Some(Self { + object_type: ty.to_canonical_string(/* with_prefix */ true), + id: display_event.id.bytes.to_vec(), + version: display_event.version as i16, + bcs: event.contents.clone(), + }) + } + + pub fn to_display_update_event(&self) -> Result { + bcs::from_bytes(&self.bcs) + } +} diff --git a/crates/sui-mvr-indexer/src/models/epoch.rs b/crates/sui-mvr-indexer/src/models/epoch.rs new file mode 100644 index 0000000000000..d8e943f4c245c --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/epoch.rs @@ -0,0 +1,278 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::schema::epochs; +use crate::{errors::IndexerError, schema::feature_flags, schema::protocol_configs}; +use diesel::prelude::{AsChangeset, Identifiable}; +use diesel::{Insertable, Queryable, Selectable}; +use sui_json_rpc_types::{EndOfEpochInfo, EpochInfo}; +use sui_types::event::SystemEpochInfoEvent; +use sui_types::messages_checkpoint::CertifiedCheckpointSummary; +use sui_types::sui_system_state::sui_system_state_summary::SuiSystemStateSummary; + +#[derive(Queryable, Insertable, Debug, Clone, Default)] +#[diesel(table_name = epochs)] +pub struct StoredEpochInfo { + pub epoch: i64, + pub first_checkpoint_id: i64, + pub epoch_start_timestamp: i64, + pub reference_gas_price: i64, + pub protocol_version: i64, + pub total_stake: i64, + pub storage_fund_balance: i64, + pub system_state: Option>, + pub epoch_total_transactions: Option, + pub last_checkpoint_id: Option, + pub epoch_end_timestamp: Option, + pub storage_fund_reinvestment: Option, + pub storage_charge: Option, + pub storage_rebate: Option, + pub stake_subsidy_amount: Option, + pub total_gas_fees: Option, + pub total_stake_rewards_distributed: Option, + pub leftover_storage_fund_inflow: Option, + pub epoch_commitments: Option>, + /// This is the system state summary at the beginning of the epoch, serialized as JSON. + pub system_state_summary_json: Option, + /// First transaction sequence number of this epoch. + pub first_tx_sequence_number: Option, +} + +#[derive(Insertable, Identifiable, AsChangeset, Clone, Debug)] +#[diesel(primary_key(epoch))] +#[diesel(table_name = epochs)] +pub struct StartOfEpochUpdate { + pub epoch: i64, + pub first_checkpoint_id: i64, + pub first_tx_sequence_number: i64, + pub epoch_start_timestamp: i64, + pub reference_gas_price: i64, + pub protocol_version: i64, + pub total_stake: i64, + pub storage_fund_balance: i64, + pub system_state_summary_json: serde_json::Value, +} + +#[derive(Identifiable, AsChangeset, Clone, Debug)] +#[diesel(primary_key(epoch))] +#[diesel(table_name = epochs)] +pub struct EndOfEpochUpdate { + pub epoch: i64, + pub epoch_total_transactions: i64, + pub last_checkpoint_id: i64, + pub epoch_end_timestamp: i64, + pub storage_fund_reinvestment: i64, + pub storage_charge: i64, + pub storage_rebate: i64, + pub stake_subsidy_amount: i64, + pub total_gas_fees: i64, + pub total_stake_rewards_distributed: i64, + pub leftover_storage_fund_inflow: i64, + pub epoch_commitments: Vec, +} + +#[derive(Queryable, Insertable, Debug, Clone, Default)] +#[diesel(table_name = protocol_configs)] +pub struct StoredProtocolConfig { + pub protocol_version: i64, + pub config_name: String, + pub config_value: Option, +} + +#[derive(Queryable, Insertable, Debug, Clone, Default)] +#[diesel(table_name = feature_flags)] +pub struct StoredFeatureFlag { + pub protocol_version: i64, + pub flag_name: String, + pub flag_value: bool, +} + +#[derive(Queryable, Selectable, Clone)] +#[diesel(table_name = epochs)] +pub struct QueryableEpochInfo { + pub epoch: i64, + pub first_checkpoint_id: i64, + pub epoch_start_timestamp: i64, + pub reference_gas_price: i64, + pub protocol_version: i64, + pub total_stake: i64, + pub storage_fund_balance: i64, + pub epoch_total_transactions: Option, + pub first_tx_sequence_number: Option, + pub last_checkpoint_id: Option, + pub epoch_end_timestamp: Option, + pub storage_fund_reinvestment: Option, + pub storage_charge: Option, + pub storage_rebate: Option, + pub stake_subsidy_amount: Option, + pub total_gas_fees: Option, + pub total_stake_rewards_distributed: Option, + pub leftover_storage_fund_inflow: Option, + pub epoch_commitments: Option>, +} + +#[derive(Queryable)] +pub struct QueryableEpochSystemState { + pub epoch: i64, + pub system_state: Vec, +} + +#[derive(Default)] +pub struct EpochStartInfo { + pub first_checkpoint_id: u64, + pub first_tx_sequence_number: u64, + pub total_stake: u64, + pub storage_fund_balance: u64, +} + +impl EpochStartInfo { + pub fn new( + first_checkpoint_id: u64, + first_tx_sequence_number: u64, + epoch_event_opt: Option<&SystemEpochInfoEvent>, + ) -> Self { + Self { + first_checkpoint_id, + first_tx_sequence_number, + total_stake: epoch_event_opt.map(|e| e.total_stake).unwrap_or_default(), + storage_fund_balance: epoch_event_opt + .map(|e| e.storage_fund_balance) + .unwrap_or_default(), + } + } +} + +impl StartOfEpochUpdate { + pub fn new( + new_system_state_summary: SuiSystemStateSummary, + epoch_start_info: EpochStartInfo, + ) -> Self { + Self { + epoch: new_system_state_summary.epoch as i64, + system_state_summary_json: serde_json::to_value(new_system_state_summary.clone()) + .unwrap(), + first_checkpoint_id: epoch_start_info.first_checkpoint_id as i64, + first_tx_sequence_number: epoch_start_info.first_tx_sequence_number as i64, + epoch_start_timestamp: new_system_state_summary.epoch_start_timestamp_ms as i64, + reference_gas_price: new_system_state_summary.reference_gas_price as i64, + protocol_version: new_system_state_summary.protocol_version as i64, + total_stake: epoch_start_info.total_stake as i64, + storage_fund_balance: epoch_start_info.storage_fund_balance as i64, + } + } +} + +#[derive(Default)] +pub struct EpochEndInfo { + pub storage_fund_reinvestment: u64, + pub storage_charge: u64, + pub storage_rebate: u64, + pub leftover_storage_fund_inflow: u64, + pub stake_subsidy_amount: u64, + pub total_gas_fees: u64, + pub total_stake_rewards_distributed: u64, +} + +impl EpochEndInfo { + pub fn new(epoch_event_opt: Option<&SystemEpochInfoEvent>) -> Self { + epoch_event_opt.map_or_else(Self::default, |epoch_event| Self { + storage_fund_reinvestment: epoch_event.storage_fund_reinvestment, + storage_charge: epoch_event.storage_charge, + storage_rebate: epoch_event.storage_rebate, + leftover_storage_fund_inflow: epoch_event.leftover_storage_fund_inflow, + stake_subsidy_amount: epoch_event.stake_subsidy_amount, + total_gas_fees: epoch_event.total_gas_fees, + total_stake_rewards_distributed: epoch_event.total_stake_rewards_distributed, + }) + } +} + +impl EndOfEpochUpdate { + pub fn new( + last_checkpoint_summary: &CertifiedCheckpointSummary, + first_tx_sequence_number: u64, + epoch_end_info: EpochEndInfo, + ) -> Self { + Self { + epoch: last_checkpoint_summary.epoch as i64, + epoch_total_transactions: (last_checkpoint_summary.network_total_transactions + - first_tx_sequence_number) as i64, + last_checkpoint_id: *last_checkpoint_summary.sequence_number() as i64, + epoch_end_timestamp: last_checkpoint_summary.timestamp_ms as i64, + storage_fund_reinvestment: epoch_end_info.storage_fund_reinvestment as i64, + storage_charge: epoch_end_info.storage_charge as i64, + storage_rebate: epoch_end_info.storage_rebate as i64, + leftover_storage_fund_inflow: epoch_end_info.leftover_storage_fund_inflow as i64, + stake_subsidy_amount: epoch_end_info.stake_subsidy_amount as i64, + total_gas_fees: epoch_end_info.total_gas_fees as i64, + total_stake_rewards_distributed: epoch_end_info.total_stake_rewards_distributed as i64, + epoch_commitments: bcs::to_bytes( + &last_checkpoint_summary + .end_of_epoch_data + .clone() + .unwrap() + .epoch_commitments, + ) + .unwrap(), + } + } +} + +impl StoredEpochInfo { + pub fn get_json_system_state_summary(&self) -> Result { + let Some(system_state_summary_json) = self.system_state_summary_json.clone() else { + return Err(IndexerError::PersistentStorageDataCorruptionError( + "System state summary is null for the given epoch".into(), + )); + }; + let system_state_summary: SuiSystemStateSummary = + serde_json::from_value(system_state_summary_json).map_err(|_| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Failed to deserialize `system_state` for epoch {:?}", + self.epoch, + )) + })?; + debug_assert_eq!(system_state_summary.epoch, self.epoch as u64); + Ok(system_state_summary) + } +} + +impl From<&StoredEpochInfo> for Option { + fn from(info: &StoredEpochInfo) -> Option { + Some(EndOfEpochInfo { + reference_gas_price: (info.reference_gas_price as u64), + protocol_version: (info.protocol_version as u64), + last_checkpoint_id: info.last_checkpoint_id.map(|v| v as u64)?, + total_stake: info.total_stake as u64, + storage_fund_balance: info.storage_fund_balance as u64, + epoch_end_timestamp: info.epoch_end_timestamp.map(|v| v as u64)?, + storage_fund_reinvestment: info.storage_fund_reinvestment.map(|v| v as u64)?, + storage_charge: info.storage_charge.map(|v| v as u64)?, + storage_rebate: info.storage_rebate.map(|v| v as u64)?, + stake_subsidy_amount: info.stake_subsidy_amount.map(|v| v as u64)?, + total_gas_fees: info.total_gas_fees.map(|v| v as u64)?, + total_stake_rewards_distributed: info + .total_stake_rewards_distributed + .map(|v| v as u64)?, + leftover_storage_fund_inflow: info.leftover_storage_fund_inflow.map(|v| v as u64)?, + }) + } +} + +impl TryFrom for EpochInfo { + type Error = IndexerError; + + fn try_from(value: StoredEpochInfo) -> Result { + let end_of_epoch_info = (&value).into(); + let system_state_summary = value.get_json_system_state_summary()?; + Ok(EpochInfo { + epoch: value.epoch as u64, + validators: system_state_summary.active_validators, + epoch_total_transactions: value.epoch_total_transactions.unwrap_or(0) as u64, + first_checkpoint_id: value.first_checkpoint_id as u64, + epoch_start_timestamp: value.epoch_start_timestamp as u64, + end_of_epoch_info, + reference_gas_price: Some(value.reference_gas_price as u64), + }) + } +} diff --git a/crates/sui-mvr-indexer/src/models/event_indices.rs b/crates/sui-mvr-indexer/src/models/event_indices.rs new file mode 100644 index 0000000000000..08f17cce339d5 --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/event_indices.rs @@ -0,0 +1,145 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + schema::{ + event_emit_module, event_emit_package, event_senders, event_struct_instantiation, + event_struct_module, event_struct_name, event_struct_package, + }, + types::EventIndex, +}; +use diesel::prelude::*; + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = event_emit_package)] +pub struct StoredEventEmitPackage { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub package: Vec, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = event_emit_module)] +pub struct StoredEventEmitModule { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub package: Vec, + pub module: String, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = event_senders)] +pub struct StoredEventSenders { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = event_struct_package)] +pub struct StoredEventStructPackage { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub package: Vec, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = event_struct_module)] +pub struct StoredEventStructModule { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub package: Vec, + pub module: String, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = event_struct_name)] +pub struct StoredEventStructName { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub package: Vec, + pub module: String, + pub type_name: String, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = event_struct_instantiation)] +pub struct StoredEventStructInstantiation { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub package: Vec, + pub module: String, + pub type_instantiation: String, + pub sender: Vec, +} + +impl EventIndex { + pub fn split( + self: EventIndex, + ) -> ( + StoredEventEmitPackage, + StoredEventEmitModule, + StoredEventSenders, + StoredEventStructPackage, + StoredEventStructModule, + StoredEventStructName, + StoredEventStructInstantiation, + ) { + let tx_sequence_number = self.tx_sequence_number as i64; + let event_sequence_number = self.event_sequence_number as i64; + ( + StoredEventEmitPackage { + tx_sequence_number, + event_sequence_number, + package: self.emit_package.to_vec(), + sender: self.sender.to_vec(), + }, + StoredEventEmitModule { + tx_sequence_number, + event_sequence_number, + package: self.emit_package.to_vec(), + module: self.emit_module.clone(), + sender: self.sender.to_vec(), + }, + StoredEventSenders { + tx_sequence_number, + event_sequence_number, + sender: self.sender.to_vec(), + }, + StoredEventStructPackage { + tx_sequence_number, + event_sequence_number, + package: self.type_package.to_vec(), + sender: self.sender.to_vec(), + }, + StoredEventStructModule { + tx_sequence_number, + event_sequence_number, + package: self.type_package.to_vec(), + module: self.type_module.clone(), + sender: self.sender.to_vec(), + }, + StoredEventStructName { + tx_sequence_number, + event_sequence_number, + package: self.type_package.to_vec(), + module: self.type_module.clone(), + type_name: self.type_name.clone(), + sender: self.sender.to_vec(), + }, + StoredEventStructInstantiation { + tx_sequence_number, + event_sequence_number, + package: self.type_package.to_vec(), + module: self.type_module.clone(), + type_instantiation: self.type_instantiation.clone(), + sender: self.sender.to_vec(), + }, + ) + } +} diff --git a/crates/sui-mvr-indexer/src/models/events.rs b/crates/sui-mvr-indexer/src/models/events.rs new file mode 100644 index 0000000000000..6b9c5044c3ba5 --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/events.rs @@ -0,0 +1,156 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::str::FromStr; +use std::sync::Arc; + +use diesel::prelude::*; +use move_core_types::identifier::Identifier; + +use sui_json_rpc_types::{type_and_fields_from_move_event_data, SuiEvent}; +use sui_package_resolver::{PackageStore, Resolver}; +use sui_types::base_types::{ObjectID, SuiAddress}; +use sui_types::digests::TransactionDigest; +use sui_types::event::EventID; +use sui_types::object::bounded_visitor::BoundedVisitor; +use sui_types::parse_sui_struct_tag; + +use crate::errors::IndexerError; +use crate::schema::events; +use crate::types::IndexedEvent; + +#[derive(Queryable, QueryableByName, Selectable, Insertable, Debug, Clone)] +#[diesel(table_name = events)] +pub struct StoredEvent { + pub tx_sequence_number: i64, + pub event_sequence_number: i64, + pub transaction_digest: Vec, + pub senders: Vec>>, + pub package: Vec, + pub module: String, + pub event_type: String, + pub timestamp_ms: i64, + pub bcs: Vec, + pub sender: Option>, +} + +pub type SendersType = Vec>>; + +impl From for StoredEvent { + fn from(event: IndexedEvent) -> Self { + Self { + tx_sequence_number: event.tx_sequence_number as i64, + event_sequence_number: event.event_sequence_number as i64, + transaction_digest: event.transaction_digest.into_inner().to_vec(), + senders: vec![Some(event.sender.to_vec())], + package: event.package.to_vec(), + module: event.module.clone(), + event_type: event.event_type.clone(), + bcs: event.bcs.clone(), + timestamp_ms: event.timestamp_ms as i64, + sender: Some(event.sender.to_vec()), + } + } +} + +impl StoredEvent { + pub async fn try_into_sui_event( + self, + package_resolver: Arc>, + ) -> Result { + let package_id = ObjectID::from_bytes(self.package.clone()).map_err(|_e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Failed to parse event package ID: {:?}", + self.package + )) + })?; + // Note: SuiEvent only has one sender today, so we always use the first one. + let sender = { + self.senders.first().ok_or_else(|| { + IndexerError::PersistentStorageDataCorruptionError( + "Event senders should contain at least one address".to_string(), + ) + })? + }; + let sender = match sender { + Some(ref s) => SuiAddress::from_bytes(s).map_err(|_e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Failed to parse event sender address: {:?}", + sender + )) + })?, + None => { + return Err(IndexerError::PersistentStorageDataCorruptionError( + "Event senders element should not be null".to_string(), + )) + } + }; + + let type_ = parse_sui_struct_tag(&self.event_type)?; + let move_type_layout = package_resolver + .type_layout(type_.clone().into()) + .await + .map_err(|e| { + IndexerError::ResolveMoveStructError(format!( + "Failed to convert to sui event with Error: {e}", + )) + })?; + let move_object = BoundedVisitor::deserialize_value(&self.bcs, &move_type_layout) + .map_err(|e| IndexerError::SerdeError(e.to_string()))?; + let (_, parsed_json) = type_and_fields_from_move_event_data(move_object) + .map_err(|e| IndexerError::SerdeError(e.to_string()))?; + let tx_digest = + TransactionDigest::try_from(self.transaction_digest.as_slice()).map_err(|e| { + IndexerError::SerdeError(format!( + "Failed to parse transaction digest: {:?}, error: {}", + self.transaction_digest, e + )) + })?; + Ok(SuiEvent { + id: EventID { + tx_digest, + event_seq: self.event_sequence_number as u64, + }, + package_id, + transaction_module: Identifier::from_str(&self.module)?, + sender, + type_, + bcs: self.bcs, + parsed_json, + timestamp_ms: Some(self.timestamp_ms as u64), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use move_core_types::{account_address::AccountAddress, language_storage::StructTag}; + use sui_types::event::Event; + + #[test] + fn test_canonical_string_of_event_type() { + let tx_digest = TransactionDigest::default(); + let event = Event { + package_id: ObjectID::random(), + transaction_module: Identifier::new("test").unwrap(), + sender: AccountAddress::random().into(), + type_: StructTag { + address: AccountAddress::TWO, + module: Identifier::new("test").unwrap(), + name: Identifier::new("test").unwrap(), + type_params: vec![], + }, + contents: vec![], + }; + + let indexed_event = IndexedEvent::from_event(1, 1, 1, tx_digest, &event, 100); + + let stored_event = StoredEvent::from(indexed_event); + + assert_eq!( + stored_event.event_type, + "0x0000000000000000000000000000000000000000000000000000000000000002::test::test" + ); + } +} diff --git a/crates/sui-mvr-indexer/src/models/mod.rs b/crates/sui-mvr-indexer/src/models/mod.rs new file mode 100644 index 0000000000000..84e8b308bc0d5 --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/mod.rs @@ -0,0 +1,15 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +pub mod checkpoints; +pub mod display; +pub mod epoch; +pub mod event_indices; +pub mod events; +pub mod obj_indices; +pub mod objects; +pub mod packages; +pub mod raw_checkpoints; +pub mod transactions; +pub mod tx_indices; +pub mod watermarks; diff --git a/crates/sui-mvr-indexer/src/models/obj_indices.rs b/crates/sui-mvr-indexer/src/models/obj_indices.rs new file mode 100644 index 0000000000000..4acc554565522 --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/obj_indices.rs @@ -0,0 +1,16 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use diesel::prelude::*; + +use crate::schema::objects_version; +/// Model types related to tables that support efficient execution of queries on the `objects`, +/// `objects_history` and `objects_snapshot` tables. + +#[derive(Queryable, Insertable, Debug, Identifiable, Clone, QueryableByName, Selectable)] +#[diesel(table_name = objects_version, primary_key(object_id, object_version))] +pub struct StoredObjectVersion { + pub object_id: Vec, + pub object_version: i64, + pub cp_sequence_number: i64, +} diff --git a/crates/sui-mvr-indexer/src/models/objects.rs b/crates/sui-mvr-indexer/src/models/objects.rs new file mode 100644 index 0000000000000..321aaebe2d4ed --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/objects.rs @@ -0,0 +1,579 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::HashMap; +use std::sync::Arc; + +use diesel::prelude::*; +use serde::de::DeserializeOwned; + +use move_core_types::annotated_value::MoveTypeLayout; +use sui_json_rpc::coin_api::parse_to_struct_tag; +use sui_json_rpc_types::{Balance, Coin as SuiCoin}; +use sui_package_resolver::{PackageStore, Resolver}; +use sui_types::base_types::{ObjectID, ObjectRef}; +use sui_types::digests::ObjectDigest; +use sui_types::dynamic_field::{DynamicFieldType, Field}; +use sui_types::object::{Object, ObjectRead}; + +use crate::errors::IndexerError; +use crate::schema::{full_objects_history, objects, objects_history, objects_snapshot}; +use crate::types::{owner_to_owner_info, IndexedDeletedObject, IndexedObject, ObjectStatus}; + +#[derive(Queryable)] +pub struct DynamicFieldColumn { + pub object_id: Vec, + pub object_version: i64, + pub object_digest: Vec, + pub df_kind: Option, + pub df_name: Option>, + pub df_object_type: Option, + pub df_object_id: Option>, +} + +#[derive(Queryable)] +pub struct ObjectRefColumn { + pub object_id: Vec, + pub object_version: i64, + pub object_digest: Vec, +} + +// NOTE: please add updating statement like below in pg_indexer_store.rs, +// if new columns are added here: +// objects::epoch.eq(excluded(objects::epoch)) +#[derive(Queryable, Insertable, Debug, Identifiable, Clone, QueryableByName)] +#[diesel(table_name = objects, primary_key(object_id))] +pub struct StoredObject { + pub object_id: Vec, + pub object_version: i64, + pub object_digest: Vec, + pub owner_type: i16, + pub owner_id: Option>, + /// The full type of this object, including package id, module, name and type parameters. + /// This and following three fields will be None if the object is a Package + pub object_type: Option, + pub object_type_package: Option>, + pub object_type_module: Option, + /// Name of the object type, e.g., "Coin", without type parameters. + pub object_type_name: Option, + pub serialized_object: Vec, + pub coin_type: Option, + // TODO deal with overflow + pub coin_balance: Option, + pub df_kind: Option, +} + +impl From for StoredObject { + fn from(o: IndexedObject) -> Self { + let IndexedObject { + checkpoint_sequence_number: _, + object, + df_kind, + } = o; + let (owner_type, owner_id) = owner_to_owner_info(&object.owner); + let coin_type = object + .coin_type_maybe() + .map(|t| t.to_canonical_string(/* with_prefix */ true)); + let coin_balance = if coin_type.is_some() { + Some(object.get_coin_value_unsafe()) + } else { + None + }; + Self { + object_id: object.id().to_vec(), + object_version: object.version().value() as i64, + object_digest: object.digest().into_inner().to_vec(), + owner_type: owner_type as i16, + owner_id: owner_id.map(|id| id.to_vec()), + object_type: object + .type_() + .map(|t| t.to_canonical_string(/* with_prefix */ true)), + object_type_package: object.type_().map(|t| t.address().to_vec()), + object_type_module: object.type_().map(|t| t.module().to_string()), + object_type_name: object.type_().map(|t| t.name().to_string()), + serialized_object: bcs::to_bytes(&object).unwrap(), + coin_type, + coin_balance: coin_balance.map(|b| b as i64), + df_kind: df_kind.map(|k| match k { + DynamicFieldType::DynamicField => 0, + DynamicFieldType::DynamicObject => 1, + }), + } + } +} + +#[derive(Queryable, Insertable, Debug, Identifiable, Clone, QueryableByName)] +#[diesel(table_name = objects, primary_key(object_id))] +pub struct StoredDeletedObject { + pub object_id: Vec, + pub object_version: i64, +} + +impl From for StoredDeletedObject { + fn from(o: IndexedDeletedObject) -> Self { + Self { + object_id: o.object_id.to_vec(), + object_version: o.object_version as i64, + } + } +} + +#[derive(Queryable, Insertable, Selectable, Debug, Identifiable, Clone, QueryableByName)] +#[diesel(table_name = objects_snapshot, primary_key(object_id))] +pub struct StoredObjectSnapshot { + pub object_id: Vec, + pub object_version: i64, + pub object_status: i16, + pub object_digest: Option>, + pub checkpoint_sequence_number: i64, + pub owner_type: Option, + pub owner_id: Option>, + pub object_type: Option, + pub object_type_package: Option>, + pub object_type_module: Option, + pub object_type_name: Option, + pub serialized_object: Option>, + pub coin_type: Option, + pub coin_balance: Option, + pub df_kind: Option, +} + +impl From for StoredObjectSnapshot { + fn from(o: IndexedObject) -> Self { + let IndexedObject { + checkpoint_sequence_number, + object, + df_kind, + } = o; + let (owner_type, owner_id) = owner_to_owner_info(&object.owner); + let coin_type = object + .coin_type_maybe() + .map(|t| t.to_canonical_string(/* with_prefix */ true)); + let coin_balance = if coin_type.is_some() { + Some(object.get_coin_value_unsafe()) + } else { + None + }; + + Self { + object_id: object.id().to_vec(), + object_version: object.version().value() as i64, + object_status: ObjectStatus::Active as i16, + object_digest: Some(object.digest().into_inner().to_vec()), + checkpoint_sequence_number: checkpoint_sequence_number as i64, + owner_type: Some(owner_type as i16), + owner_id: owner_id.map(|id| id.to_vec()), + object_type: object + .type_() + .map(|t| t.to_canonical_string(/* with_prefix */ true)), + object_type_package: object.type_().map(|t| t.address().to_vec()), + object_type_module: object.type_().map(|t| t.module().to_string()), + object_type_name: object.type_().map(|t| t.name().to_string()), + serialized_object: Some(bcs::to_bytes(&object).unwrap()), + coin_type, + coin_balance: coin_balance.map(|b| b as i64), + df_kind: df_kind.map(|k| match k { + DynamicFieldType::DynamicField => 0, + DynamicFieldType::DynamicObject => 1, + }), + } + } +} + +impl From for StoredObjectSnapshot { + fn from(o: IndexedDeletedObject) -> Self { + Self { + object_id: o.object_id.to_vec(), + object_version: o.object_version as i64, + object_status: ObjectStatus::WrappedOrDeleted as i16, + object_digest: None, + checkpoint_sequence_number: o.checkpoint_sequence_number as i64, + owner_type: None, + owner_id: None, + object_type: None, + object_type_package: None, + object_type_module: None, + object_type_name: None, + serialized_object: None, + coin_type: None, + coin_balance: None, + df_kind: None, + } + } +} + +#[derive(Queryable, Insertable, Selectable, Debug, Identifiable, Clone, QueryableByName)] +#[diesel(table_name = objects_history, primary_key(object_id, object_version, checkpoint_sequence_number))] +pub struct StoredHistoryObject { + pub object_id: Vec, + pub object_version: i64, + pub object_status: i16, + pub object_digest: Option>, + pub checkpoint_sequence_number: i64, + pub owner_type: Option, + pub owner_id: Option>, + pub object_type: Option, + pub object_type_package: Option>, + pub object_type_module: Option, + pub object_type_name: Option, + pub serialized_object: Option>, + pub coin_type: Option, + pub coin_balance: Option, + pub df_kind: Option, +} + +impl From for StoredHistoryObject { + fn from(o: IndexedObject) -> Self { + let IndexedObject { + checkpoint_sequence_number, + object, + df_kind, + } = o; + let (owner_type, owner_id) = owner_to_owner_info(&object.owner); + let coin_type = object + .coin_type_maybe() + .map(|t| t.to_canonical_string(/* with_prefix */ true)); + let coin_balance = if coin_type.is_some() { + Some(object.get_coin_value_unsafe()) + } else { + None + }; + + Self { + object_id: object.id().to_vec(), + object_version: object.version().value() as i64, + object_status: ObjectStatus::Active as i16, + object_digest: Some(object.digest().into_inner().to_vec()), + checkpoint_sequence_number: checkpoint_sequence_number as i64, + owner_type: Some(owner_type as i16), + owner_id: owner_id.map(|id| id.to_vec()), + object_type: object + .type_() + .map(|t| t.to_canonical_string(/* with_prefix */ true)), + object_type_package: object.type_().map(|t| t.address().to_vec()), + object_type_module: object.type_().map(|t| t.module().to_string()), + object_type_name: object.type_().map(|t| t.name().to_string()), + serialized_object: Some(bcs::to_bytes(&object).unwrap()), + coin_type, + coin_balance: coin_balance.map(|b| b as i64), + df_kind: df_kind.map(|k| match k { + DynamicFieldType::DynamicField => 0, + DynamicFieldType::DynamicObject => 1, + }), + } + } +} + +impl From for StoredHistoryObject { + fn from(o: IndexedDeletedObject) -> Self { + Self { + object_id: o.object_id.to_vec(), + object_version: o.object_version as i64, + object_status: ObjectStatus::WrappedOrDeleted as i16, + object_digest: None, + checkpoint_sequence_number: o.checkpoint_sequence_number as i64, + owner_type: None, + owner_id: None, + object_type: None, + object_type_package: None, + object_type_module: None, + object_type_name: None, + serialized_object: None, + coin_type: None, + coin_balance: None, + df_kind: None, + } + } +} + +impl TryFrom for Object { + type Error = IndexerError; + + fn try_from(o: StoredObject) -> Result { + bcs::from_bytes(&o.serialized_object).map_err(|e| { + IndexerError::SerdeError(format!( + "Failed to deserialize object: {:?}, error: {}", + o.object_id, e + )) + }) + } +} + +impl StoredObject { + pub async fn try_into_object_read( + self, + package_resolver: Arc>, + ) -> Result { + let oref = self.get_object_ref()?; + let object: sui_types::object::Object = self.try_into()?; + let Some(move_object) = object.data.try_as_move().cloned() else { + return Err(IndexerError::PostgresReadError(format!( + "Object {:?} is not a Move object", + oref, + ))); + }; + + let move_type_layout = package_resolver + .type_layout(move_object.type_().clone().into()) + .await + .map_err(|e| { + IndexerError::ResolveMoveStructError(format!( + "Failed to convert into object read for obj {}:{}, type: {}. Error: {e}", + object.id(), + object.version(), + move_object.type_(), + )) + })?; + let move_struct_layout = match move_type_layout { + MoveTypeLayout::Struct(s) => Ok(s), + _ => Err(IndexerError::ResolveMoveStructError( + "MoveTypeLayout is not Struct".to_string(), + )), + }?; + + Ok(ObjectRead::Exists(oref, object, Some(*move_struct_layout))) + } + + pub fn get_object_ref(&self) -> Result { + let object_id = ObjectID::from_bytes(self.object_id.clone()).map_err(|_| { + IndexerError::SerdeError(format!("Can't convert {:?} to object_id", self.object_id)) + })?; + let object_digest = + ObjectDigest::try_from(self.object_digest.as_slice()).map_err(|_| { + IndexerError::SerdeError(format!( + "Can't convert {:?} to object_digest", + self.object_digest + )) + })?; + Ok(( + object_id, + (self.object_version as u64).into(), + object_digest, + )) + } + + pub fn to_dynamic_field(&self) -> Option> + where + K: DeserializeOwned, + V: DeserializeOwned, + { + let object: Object = bcs::from_bytes(&self.serialized_object).ok()?; + + let object = object.data.try_as_move()?; + let ty = object.type_(); + + if !ty.is_dynamic_field() { + return None; + } + + bcs::from_bytes(object.contents()).ok() + } +} + +impl TryFrom for SuiCoin { + type Error = IndexerError; + + fn try_from(o: StoredObject) -> Result { + let object: Object = o.clone().try_into()?; + let (coin_object_id, version, digest) = o.get_object_ref()?; + let coin_type_canonical = + o.coin_type + .ok_or(IndexerError::PersistentStorageDataCorruptionError(format!( + "Object {} is supposed to be a coin but has an empty coin_type column", + coin_object_id, + )))?; + let coin_type = parse_to_struct_tag(coin_type_canonical.as_str()) + .map_err(|_| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "The type of object {} cannot be parsed as a struct tag", + coin_object_id, + )) + })? + .to_string(); + let balance = o + .coin_balance + .ok_or(IndexerError::PersistentStorageDataCorruptionError(format!( + "Object {} is supposed to be a coin but has an empty coin_balance column", + coin_object_id, + )))?; + Ok(SuiCoin { + coin_type, + coin_object_id, + version, + digest, + balance: balance as u64, + previous_transaction: object.previous_transaction, + }) + } +} + +#[derive(QueryableByName)] +pub struct CoinBalance { + #[diesel(sql_type = diesel::sql_types::Text)] + pub coin_type: String, + #[diesel(sql_type = diesel::sql_types::BigInt)] + pub coin_num: i64, + #[diesel(sql_type = diesel::sql_types::BigInt)] + pub coin_balance: i64, +} + +impl TryFrom for Balance { + type Error = IndexerError; + + fn try_from(c: CoinBalance) -> Result { + let coin_type = parse_to_struct_tag(c.coin_type.as_str()) + .map_err(|_| { + IndexerError::PersistentStorageDataCorruptionError( + "The type of coin balance cannot be parsed as a struct tag".to_string(), + ) + })? + .to_string(); + Ok(Self { + coin_type, + coin_object_count: c.coin_num as usize, + // TODO: deal with overflow + total_balance: c.coin_balance as u128, + locked_balance: HashMap::default(), + }) + } +} + +#[derive(Queryable, Insertable, Debug, Identifiable, Clone, QueryableByName, Selectable)] +#[diesel(table_name = full_objects_history, primary_key(object_id, object_version))] +pub struct StoredFullHistoryObject { + pub object_id: Vec, + pub object_version: i64, + pub serialized_object: Option>, +} + +impl From for StoredFullHistoryObject { + fn from(o: IndexedObject) -> Self { + let object = o.object; + Self { + object_id: object.id().to_vec(), + object_version: object.version().value() as i64, + serialized_object: Some(bcs::to_bytes(&object).unwrap()), + } + } +} + +impl From for StoredFullHistoryObject { + fn from(o: IndexedDeletedObject) -> Self { + Self { + object_id: o.object_id.to_vec(), + object_version: o.object_version as i64, + serialized_object: None, + } + } +} + +#[cfg(test)] +mod tests { + use move_core_types::{account_address::AccountAddress, language_storage::StructTag}; + use sui_types::{ + coin::Coin, + digests::TransactionDigest, + gas_coin::{GasCoin, GAS}, + object::{Data, MoveObject, ObjectInner, Owner}, + Identifier, TypeTag, + }; + + use super::*; + + #[test] + fn test_canonical_string_of_object_type_for_coin() { + let test_obj = Object::new_gas_for_testing(); + let indexed_obj = IndexedObject::from_object(1, test_obj, None); + + let stored_obj = StoredObject::from(indexed_obj); + + match stored_obj.object_type { + Some(t) => { + assert_eq!(t, "0x0000000000000000000000000000000000000000000000000000000000000002::coin::Coin<0x0000000000000000000000000000000000000000000000000000000000000002::sui::SUI>"); + } + None => { + panic!("object_type should not be none"); + } + } + } + + #[test] + fn test_convert_stored_obj_to_sui_coin() { + let test_obj = Object::new_gas_for_testing(); + let indexed_obj = IndexedObject::from_object(1, test_obj, None); + + let stored_obj = StoredObject::from(indexed_obj); + + let sui_coin = SuiCoin::try_from(stored_obj).unwrap(); + assert_eq!(sui_coin.coin_type, "0x2::sui::SUI"); + } + + #[test] + fn test_output_format_coin_balance() { + let test_obj = Object::new_gas_for_testing(); + let indexed_obj = IndexedObject::from_object(1, test_obj, None); + + let stored_obj = StoredObject::from(indexed_obj); + let test_balance = CoinBalance { + coin_type: stored_obj.coin_type.unwrap(), + coin_num: 1, + coin_balance: 100, + }; + let balance = Balance::try_from(test_balance).unwrap(); + assert_eq!(balance.coin_type, "0x2::sui::SUI"); + } + + #[test] + fn test_vec_of_coin_sui_conversion() { + // 0xe7::vec_coin::VecCoin>> + let vec_coins_type = TypeTag::Vector(Box::new( + Coin::type_(TypeTag::Struct(Box::new(GAS::type_()))).into(), + )); + let object_type = StructTag { + address: AccountAddress::from_hex_literal("0xe7").unwrap(), + module: Identifier::new("vec_coin").unwrap(), + name: Identifier::new("VecCoin").unwrap(), + type_params: vec![vec_coins_type], + }; + + let id = ObjectID::ZERO; + let gas = 10; + + let contents = bcs::to_bytes(&vec![GasCoin::new(id, gas)]).unwrap(); + let data = Data::Move( + unsafe { + MoveObject::new_from_execution_with_limit( + object_type.into(), + true, + 1.into(), + contents, + 256, + ) + } + .unwrap(), + ); + + let owner = AccountAddress::from_hex_literal("0x1").unwrap(); + + let object = ObjectInner { + owner: Owner::AddressOwner(owner.into()), + data, + previous_transaction: TransactionDigest::genesis_marker(), + storage_rebate: 0, + } + .into(); + + let indexed_obj = IndexedObject::from_object(1, object, None); + + let stored_obj = StoredObject::from(indexed_obj); + + match stored_obj.object_type { + Some(t) => { + assert_eq!(t, "0x00000000000000000000000000000000000000000000000000000000000000e7::vec_coin::VecCoin>>"); + } + None => { + panic!("object_type should not be none"); + } + } + } +} diff --git a/crates/sui-mvr-indexer/src/models/packages.rs b/crates/sui-mvr-indexer/src/models/packages.rs new file mode 100644 index 0000000000000..97c8e8fc5b459 --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/packages.rs @@ -0,0 +1,29 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::schema::packages; +use crate::types::IndexedPackage; + +use diesel::prelude::*; + +#[derive(Queryable, Insertable, Selectable, Clone, Debug, Identifiable)] +#[diesel(table_name = packages, primary_key(package_id))] +pub struct StoredPackage { + pub package_id: Vec, + pub original_id: Vec, + pub package_version: i64, + pub move_package: Vec, + pub checkpoint_sequence_number: i64, +} + +impl From for StoredPackage { + fn from(p: IndexedPackage) -> Self { + Self { + package_id: p.package_id.to_vec(), + original_id: p.move_package.original_package_id().to_vec(), + package_version: p.move_package.version().value() as i64, + move_package: bcs::to_bytes(&p.move_package).unwrap(), + checkpoint_sequence_number: p.checkpoint_sequence_number as i64, + } + } +} diff --git a/crates/sui-mvr-indexer/src/models/raw_checkpoints.rs b/crates/sui-mvr-indexer/src/models/raw_checkpoints.rs new file mode 100644 index 0000000000000..98fafba928705 --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/raw_checkpoints.rs @@ -0,0 +1,26 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::schema::raw_checkpoints; +use crate::types::IndexedCheckpoint; +use diesel::prelude::*; + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = raw_checkpoints)] +pub struct StoredRawCheckpoint { + pub sequence_number: i64, + /// BCS serialized CertifiedCheckpointSummary + pub certified_checkpoint: Vec, + /// BCS serialized CheckpointContents + pub checkpoint_contents: Vec, +} + +impl From<&IndexedCheckpoint> for StoredRawCheckpoint { + fn from(c: &IndexedCheckpoint) -> Self { + Self { + sequence_number: c.sequence_number as i64, + certified_checkpoint: bcs::to_bytes(c.certified_checkpoint.as_ref().unwrap()).unwrap(), + checkpoint_contents: bcs::to_bytes(c.checkpoint_contents.as_ref().unwrap()).unwrap(), + } + } +} diff --git a/crates/sui-mvr-indexer/src/models/transactions.rs b/crates/sui-mvr-indexer/src/models/transactions.rs new file mode 100644 index 0000000000000..1856025c5be4d --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/transactions.rs @@ -0,0 +1,353 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::sync::Arc; + +use diesel::prelude::*; + +use move_core_types::annotated_value::{MoveDatatypeLayout, MoveTypeLayout}; +use move_core_types::language_storage::TypeTag; +use sui_json_rpc_types::{ + BalanceChange, ObjectChange, SuiEvent, SuiTransactionBlock, SuiTransactionBlockEffects, + SuiTransactionBlockEvents, SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, +}; +use sui_package_resolver::{PackageStore, Resolver}; +use sui_types::digests::TransactionDigest; +use sui_types::effects::TransactionEffects; +use sui_types::effects::TransactionEvents; +use sui_types::event::Event; +use sui_types::transaction::SenderSignedData; + +use crate::errors::IndexerError; +use crate::schema::transactions; +use crate::types::IndexedObjectChange; +use crate::types::IndexedTransaction; +use crate::types::IndexerResult; + +#[derive(Clone, Debug, Queryable, Insertable, QueryableByName, Selectable)] +#[diesel(table_name = transactions)] +pub struct StoredTransaction { + pub tx_sequence_number: i64, + pub transaction_digest: Vec, + pub raw_transaction: Vec, + pub raw_effects: Vec, + pub checkpoint_sequence_number: i64, + pub timestamp_ms: i64, + pub object_changes: Vec>>, + pub balance_changes: Vec>>, + pub events: Vec>>, + pub transaction_kind: i16, + pub success_command_count: i16, +} + +pub type StoredTransactionEvents = Vec>>; + +#[derive(Debug, Queryable)] +pub struct TxSeq { + pub seq: i64, +} + +impl Default for TxSeq { + fn default() -> Self { + Self { seq: -1 } + } +} + +#[derive(Clone, Debug, Queryable)] +pub struct StoredTransactionTimestamp { + pub tx_sequence_number: i64, + pub timestamp_ms: i64, +} + +#[derive(Clone, Debug, Queryable)] +pub struct StoredTransactionCheckpoint { + pub tx_sequence_number: i64, + pub checkpoint_sequence_number: i64, +} + +#[derive(Clone, Debug, Queryable)] +pub struct StoredTransactionSuccessCommandCount { + pub tx_sequence_number: i64, + pub checkpoint_sequence_number: i64, + pub success_command_count: i16, + pub timestamp_ms: i64, +} + +impl From<&IndexedTransaction> for StoredTransaction { + fn from(tx: &IndexedTransaction) -> Self { + StoredTransaction { + tx_sequence_number: tx.tx_sequence_number as i64, + transaction_digest: tx.tx_digest.into_inner().to_vec(), + raw_transaction: bcs::to_bytes(&tx.sender_signed_data).unwrap(), + raw_effects: bcs::to_bytes(&tx.effects).unwrap(), + checkpoint_sequence_number: tx.checkpoint_sequence_number as i64, + object_changes: tx + .object_changes + .iter() + .map(|oc| Some(bcs::to_bytes(&oc).unwrap())) + .collect(), + balance_changes: tx + .balance_change + .iter() + .map(|bc| Some(bcs::to_bytes(&bc).unwrap())) + .collect(), + events: tx + .events + .iter() + .map(|e| Some(bcs::to_bytes(&e).unwrap())) + .collect(), + timestamp_ms: tx.timestamp_ms as i64, + transaction_kind: tx.transaction_kind.clone() as i16, + success_command_count: tx.successful_tx_num as i16, + } + } +} + +impl StoredTransaction { + pub fn get_balance_len(&self) -> usize { + self.balance_changes.len() + } + + pub fn get_balance_at_idx(&self, idx: usize) -> Option> { + self.balance_changes.get(idx).cloned().flatten() + } + + pub fn get_object_len(&self) -> usize { + self.object_changes.len() + } + + pub fn get_object_at_idx(&self, idx: usize) -> Option> { + self.object_changes.get(idx).cloned().flatten() + } + + pub fn get_event_len(&self) -> usize { + self.events.len() + } + + pub fn get_event_at_idx(&self, idx: usize) -> Option> { + self.events.get(idx).cloned().flatten() + } + + pub async fn try_into_sui_transaction_block_response( + self, + options: SuiTransactionBlockResponseOptions, + package_resolver: Arc>, + ) -> IndexerResult { + let options = options.clone(); + let tx_digest = + TransactionDigest::try_from(self.transaction_digest.as_slice()).map_err(|e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Can't convert {:?} as tx_digest. Error: {e}", + self.transaction_digest + )) + })?; + + let transaction = if options.show_input { + let sender_signed_data = self.try_into_sender_signed_data()?; + let tx_block = SuiTransactionBlock::try_from_with_package_resolver( + sender_signed_data, + package_resolver.clone(), + ) + .await?; + Some(tx_block) + } else { + None + }; + + let effects = if options.show_effects { + let effects = self.try_into_sui_transaction_effects()?; + Some(effects) + } else { + None + }; + + let raw_transaction = if options.show_raw_input { + self.raw_transaction + } else { + Vec::new() + }; + + let events = if options.show_events { + let events = { + self + .events + .into_iter() + .map(|event| match event { + Some(event) => { + let event: Event = bcs::from_bytes(&event).map_err(|e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Can't convert event bytes into Event. tx_digest={:?} Error: {e}", + tx_digest + )) + })?; + Ok(event) + } + None => Err(IndexerError::PersistentStorageDataCorruptionError(format!( + "Event should not be null, tx_digest={:?}", + tx_digest + ))), + }) + .collect::, IndexerError>>()? + }; + let timestamp = self.timestamp_ms as u64; + let tx_events = TransactionEvents { data: events }; + + tx_events_to_sui_tx_events(tx_events, package_resolver, tx_digest, timestamp).await? + } else { + None + }; + + let object_changes = if options.show_object_changes { + let object_changes = { + self.object_changes.into_iter().map(|object_change| { + match object_change { + Some(object_change) => { + let object_change: IndexedObjectChange = bcs::from_bytes(&object_change) + .map_err(|e| IndexerError::PersistentStorageDataCorruptionError( + format!("Can't convert object_change bytes into IndexedObjectChange. tx_digest={:?} Error: {e}", tx_digest) + ))?; + Ok(ObjectChange::from(object_change)) + } + None => Err(IndexerError::PersistentStorageDataCorruptionError(format!("object_change should not be null, tx_digest={:?}", tx_digest))), + } + }).collect::, IndexerError>>()? + }; + Some(object_changes) + } else { + None + }; + + let balance_changes = if options.show_balance_changes { + let balance_changes = { + self.balance_changes.into_iter().map(|balance_change| { + match balance_change { + Some(balance_change) => { + let balance_change: BalanceChange = bcs::from_bytes(&balance_change) + .map_err(|e| IndexerError::PersistentStorageDataCorruptionError( + format!("Can't convert balance_change bytes into BalanceChange. tx_digest={:?} Error: {e}", tx_digest) + ))?; + Ok(balance_change) + } + None => Err(IndexerError::PersistentStorageDataCorruptionError(format!("object_change should not be null, tx_digest={:?}", tx_digest))), + } + }).collect::, IndexerError>>()? + }; + Some(balance_changes) + } else { + None + }; + + Ok(SuiTransactionBlockResponse { + digest: tx_digest, + transaction, + raw_transaction, + effects, + events, + object_changes, + balance_changes, + timestamp_ms: Some(self.timestamp_ms as u64), + checkpoint: Some(self.checkpoint_sequence_number as u64), + confirmed_local_execution: None, + errors: vec![], + raw_effects: self.raw_effects, + }) + } + fn try_into_sender_signed_data(&self) -> IndexerResult { + let sender_signed_data: SenderSignedData = + bcs::from_bytes(&self.raw_transaction).map_err(|e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Can't convert raw_transaction of {} into SenderSignedData. Error: {e}", + self.tx_sequence_number + )) + })?; + Ok(sender_signed_data) + } + + pub fn try_into_sui_transaction_effects(&self) -> IndexerResult { + let effects: TransactionEffects = bcs::from_bytes(&self.raw_effects).map_err(|e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Can't convert raw_effects of {} into TransactionEffects. Error: {e}", + self.tx_sequence_number + )) + })?; + let effects = SuiTransactionBlockEffects::try_from(effects)?; + Ok(effects) + } +} + +pub fn stored_events_to_events( + stored_events: StoredTransactionEvents, +) -> Result, IndexerError> { + stored_events + .into_iter() + .map(|event| match event { + Some(event) => { + let event: Event = bcs::from_bytes(&event).map_err(|e| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Can't convert event bytes into Event. Error: {e}", + )) + })?; + Ok(event) + } + None => Err(IndexerError::PersistentStorageDataCorruptionError( + "Event should not be null".to_string(), + )), + }) + .collect::, IndexerError>>() +} + +pub async fn tx_events_to_sui_tx_events( + tx_events: TransactionEvents, + package_resolver: Arc>, + tx_digest: TransactionDigest, + timestamp: u64, +) -> Result, IndexerError> { + let mut sui_event_futures = vec![]; + let tx_events_data_len = tx_events.data.len(); + for tx_event in tx_events.data.clone() { + let package_resolver_clone = package_resolver.clone(); + sui_event_futures.push(tokio::task::spawn(async move { + let resolver = package_resolver_clone; + resolver + .type_layout(TypeTag::Struct(Box::new(tx_event.type_.clone()))) + .await + })); + } + let event_move_type_layouts = futures::future::join_all(sui_event_futures) + .await + .into_iter() + .collect::, _>>()? + .into_iter() + .collect::, _>>() + .map_err(|e| { + IndexerError::ResolveMoveStructError(format!( + "Failed to convert to sui event with Error: {e}", + )) + })?; + let event_move_datatype_layouts = event_move_type_layouts + .into_iter() + .filter_map(|move_type_layout| match move_type_layout { + MoveTypeLayout::Struct(s) => Some(MoveDatatypeLayout::Struct(s)), + MoveTypeLayout::Enum(e) => Some(MoveDatatypeLayout::Enum(e)), + _ => None, + }) + .collect::>(); + assert!(tx_events_data_len == event_move_datatype_layouts.len()); + let sui_events = tx_events + .data + .into_iter() + .enumerate() + .zip(event_move_datatype_layouts) + .map(|((seq, tx_event), move_datatype_layout)| { + SuiEvent::try_from( + tx_event, + tx_digest, + seq as u64, + Some(timestamp), + move_datatype_layout, + ) + }) + .collect::, _>>()?; + let sui_tx_events = SuiTransactionBlockEvents { data: sui_events }; + Ok(Some(sui_tx_events)) +} diff --git a/crates/sui-mvr-indexer/src/models/tx_indices.rs b/crates/sui-mvr-indexer/src/models/tx_indices.rs new file mode 100644 index 0000000000000..a00b715eedf98 --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/tx_indices.rs @@ -0,0 +1,225 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + schema::{ + tx_affected_addresses, tx_affected_objects, tx_calls_fun, tx_calls_mod, tx_calls_pkg, + tx_changed_objects, tx_digests, tx_input_objects, tx_kinds, + }, + types::TxIndex, +}; +use diesel::prelude::*; +use itertools::Itertools; + +#[derive(QueryableByName)] +pub struct TxSequenceNumber { + #[diesel(sql_type = diesel::sql_types::BigInt)] + pub tx_sequence_number: i64, +} + +#[derive(QueryableByName)] +pub struct TxDigest { + #[diesel(sql_type = diesel::sql_types::Binary)] + pub transaction_digest: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = tx_affected_addresses)] +pub struct StoredTxAffectedAddresses { + pub tx_sequence_number: i64, + pub affected: Vec, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = tx_affected_objects)] +pub struct StoredTxAffectedObjects { + pub tx_sequence_number: i64, + pub affected: Vec, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = tx_input_objects)] +pub struct StoredTxInputObject { + pub tx_sequence_number: i64, + pub object_id: Vec, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = tx_changed_objects)] +pub struct StoredTxChangedObject { + pub tx_sequence_number: i64, + pub object_id: Vec, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = tx_calls_pkg)] +pub struct StoredTxPkg { + pub tx_sequence_number: i64, + pub package: Vec, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = tx_calls_mod)] +pub struct StoredTxMod { + pub tx_sequence_number: i64, + pub package: Vec, + pub module: String, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = tx_calls_fun)] +pub struct StoredTxFun { + pub tx_sequence_number: i64, + pub package: Vec, + pub module: String, + pub func: String, + pub sender: Vec, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = tx_digests)] +pub struct StoredTxDigest { + pub tx_digest: Vec, + pub tx_sequence_number: i64, +} + +#[derive(Queryable, Insertable, Selectable, Debug, Clone, Default)] +#[diesel(table_name = tx_kinds)] +pub struct StoredTxKind { + pub tx_kind: i16, + pub tx_sequence_number: i64, +} + +#[allow(clippy::type_complexity)] +impl TxIndex { + pub fn split( + self: TxIndex, + ) -> ( + Vec, + Vec, + Vec, + Vec, + Vec, + Vec, + Vec, + Vec, + Vec, + ) { + let tx_sequence_number = self.tx_sequence_number as i64; + + let tx_affected_addresses = self + .recipients + .iter() + .chain(self.payers.iter()) + .chain(std::iter::once(&self.sender)) + .unique() + .map(|a| StoredTxAffectedAddresses { + tx_sequence_number, + affected: a.to_vec(), + sender: self.sender.to_vec(), + }) + .collect(); + + let tx_affected_objects = self + .affected_objects + .iter() + .map(|o| StoredTxAffectedObjects { + tx_sequence_number, + affected: o.to_vec(), + sender: self.sender.to_vec(), + }) + .collect(); + + let tx_input_objects = self + .input_objects + .iter() + .map(|o| StoredTxInputObject { + tx_sequence_number, + object_id: bcs::to_bytes(&o).unwrap(), + sender: self.sender.to_vec(), + }) + .collect(); + + let tx_changed_objects = self + .changed_objects + .iter() + .map(|o| StoredTxChangedObject { + tx_sequence_number, + object_id: bcs::to_bytes(&o).unwrap(), + sender: self.sender.to_vec(), + }) + .collect(); + + let mut packages = Vec::new(); + let mut packages_modules = Vec::new(); + let mut packages_modules_funcs = Vec::new(); + + for (pkg, pkg_mod, pkg_mod_func) in self + .move_calls + .iter() + .map(|(p, m, f)| (*p, (*p, m.clone()), (*p, m.clone(), f.clone()))) + { + packages.push(pkg); + packages_modules.push(pkg_mod); + packages_modules_funcs.push(pkg_mod_func); + } + + let tx_pkgs = packages + .iter() + .map(|p| StoredTxPkg { + tx_sequence_number, + package: p.to_vec(), + sender: self.sender.to_vec(), + }) + .collect(); + + let tx_mods = packages_modules + .iter() + .map(|(p, m)| StoredTxMod { + tx_sequence_number, + package: p.to_vec(), + module: m.to_string(), + sender: self.sender.to_vec(), + }) + .collect(); + + let tx_calls = packages_modules_funcs + .iter() + .map(|(p, m, f)| StoredTxFun { + tx_sequence_number, + package: p.to_vec(), + module: m.to_string(), + func: f.to_string(), + sender: self.sender.to_vec(), + }) + .collect(); + + let stored_tx_digest = StoredTxDigest { + tx_digest: self.transaction_digest.into_inner().to_vec(), + tx_sequence_number, + }; + + let tx_kind = StoredTxKind { + tx_kind: self.tx_kind as i16, + tx_sequence_number, + }; + + ( + tx_affected_addresses, + tx_affected_objects, + tx_input_objects, + tx_changed_objects, + tx_pkgs, + tx_mods, + tx_calls, + vec![stored_tx_digest], + vec![tx_kind], + ) + } +} diff --git a/crates/sui-mvr-indexer/src/models/watermarks.rs b/crates/sui-mvr-indexer/src/models/watermarks.rs new file mode 100644 index 0000000000000..1ff3d3cfe52ac --- /dev/null +++ b/crates/sui-mvr-indexer/src/models/watermarks.rs @@ -0,0 +1,76 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::str::FromStr; + +use diesel::prelude::*; + +use crate::{ + handlers::{pruner::PrunableTable, CommitterWatermark}, + schema::watermarks::{self}, +}; + +/// Represents a row in the `watermarks` table. +#[derive(Queryable, Insertable, Default, QueryableByName, Clone)] +#[diesel(table_name = watermarks, primary_key(entity))] +pub struct StoredWatermark { + /// The table governed by this watermark, i.e `epochs`, `checkpoints`, `transactions`. + pub pipeline: String, + /// Inclusive upper epoch bound for this entity's data. Committer updates this field. Pruner uses + /// this to determine if pruning is necessary based on the retention policy. + pub epoch_hi_inclusive: i64, + /// Inclusive upper checkpoint bound for this entity's data. Committer updates this field. All + /// data of this entity in the checkpoint must be persisted before advancing this watermark. The + /// committer refers to this on disaster recovery to resume writing. + pub checkpoint_hi_inclusive: i64, + /// Exclusive upper transaction sequence number bound for this entity's data. Committer updates + /// this field. + pub tx_hi: i64, + /// Inclusive lower epoch bound for this entity's data. Pruner updates this field when the epoch range exceeds the retention policy. + pub epoch_lo: i64, + /// Inclusive low watermark that the pruner advances. Corresponds to the epoch id, checkpoint + /// sequence number, or tx sequence number depending on the entity. Data before this watermark is + /// considered pruned by a reader. The underlying data may still exist in the db instance. + pub reader_lo: i64, + /// Updated using the database's current timestamp when the pruner sees that some data needs to + /// be dropped. The pruner uses this column to determine whether to prune or wait long enough + /// that all in-flight reads complete or timeout before it acts on an updated watermark. + pub timestamp_ms: i64, + /// Column used by the pruner to track its true progress. Data below this watermark can be + /// immediately pruned. + pub pruner_hi: i64, +} + +impl StoredWatermark { + pub fn from_upper_bound_update(entity: &str, watermark: CommitterWatermark) -> Self { + StoredWatermark { + pipeline: entity.to_string(), + epoch_hi_inclusive: watermark.epoch_hi_inclusive as i64, + checkpoint_hi_inclusive: watermark.checkpoint_hi_inclusive as i64, + tx_hi: watermark.tx_hi as i64, + ..StoredWatermark::default() + } + } + + pub fn from_lower_bound_update(entity: &str, epoch_lo: u64, reader_lo: u64) -> Self { + StoredWatermark { + pipeline: entity.to_string(), + epoch_lo: epoch_lo as i64, + reader_lo: reader_lo as i64, + ..StoredWatermark::default() + } + } + + pub fn entity(&self) -> Option { + PrunableTable::from_str(&self.pipeline).ok() + } + + /// Determine whether to set a new epoch lower bound based on the retention policy. + pub fn new_epoch_lo(&self, retention: u64) -> Option { + if self.epoch_lo as u64 + retention <= self.epoch_hi_inclusive as u64 { + Some((self.epoch_hi_inclusive as u64).saturating_sub(retention - 1)) + } else { + None + } + } +} diff --git a/crates/sui-mvr-indexer/src/restorer/archives.rs b/crates/sui-mvr-indexer/src/restorer/archives.rs new file mode 100644 index 0000000000000..f70336f76d3c5 --- /dev/null +++ b/crates/sui-mvr-indexer/src/restorer/archives.rs @@ -0,0 +1,60 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::num::NonZeroUsize; + +use prometheus::Registry; +use sui_types::digests::CheckpointDigest; +use tracing::info; + +use sui_archival::reader::{ArchiveReader, ArchiveReaderMetrics}; +use sui_config::node::ArchiveReaderConfig; +use sui_config::object_storage_config::{ObjectStoreConfig, ObjectStoreType}; + +use crate::errors::IndexerError; +use crate::types::IndexerResult; + +#[derive(Clone, Debug)] +pub struct RestoreCheckpointInfo { + pub next_checkpoint_after_epoch: u64, + pub chain_identifier: CheckpointDigest, +} + +pub async fn read_restore_checkpoint_info( + archive_bucket: Option, + epoch: u64, +) -> IndexerResult { + let archive_store_config = ObjectStoreConfig { + object_store: Some(ObjectStoreType::GCS), + bucket: archive_bucket, + object_store_connection_limit: 50, + no_sign_request: false, + ..Default::default() + }; + let archive_reader_config = ArchiveReaderConfig { + remote_store_config: archive_store_config, + download_concurrency: NonZeroUsize::new(50).unwrap(), + use_for_pruning_watermark: false, + }; + let metrics = ArchiveReaderMetrics::new(&Registry::default()); + let archive_reader = ArchiveReader::new(archive_reader_config, &metrics)?; + archive_reader.sync_manifest_once().await?; + let manifest = archive_reader.get_manifest().await?; + let next_checkpoint_after_epoch = manifest.next_checkpoint_after_epoch(epoch); + info!( + "Read from archives: next checkpoint sequence after epoch {} is: {}", + epoch, next_checkpoint_after_epoch + ); + let cp_summaries = archive_reader + .get_summaries_for_list_no_verify(vec![0]) + .await + .map_err(|e| IndexerError::ArchiveReaderError(format!("Failed to get summaries: {}", e)))?; + let first_cp = cp_summaries + .first() + .ok_or_else(|| IndexerError::ArchiveReaderError("No checkpoint found".to_string()))?; + let chain_identifier = *first_cp.digest(); + Ok(RestoreCheckpointInfo { + next_checkpoint_after_epoch, + chain_identifier, + }) +} diff --git a/crates/sui-mvr-indexer/src/restorer/formal_snapshot.rs b/crates/sui-mvr-indexer/src/restorer/formal_snapshot.rs new file mode 100644 index 0000000000000..bab43c7303f38 --- /dev/null +++ b/crates/sui-mvr-indexer/src/restorer/formal_snapshot.rs @@ -0,0 +1,283 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::BTreeMap; +use std::fs; +use std::num::NonZeroUsize; +use std::path::PathBuf; +use std::sync::Arc; + +use futures::future::{AbortHandle, AbortRegistration, Abortable}; +use indicatif::{MultiProgress, ProgressBar, ProgressStyle}; +use object_store::path::Path; +use tokio::sync::{Mutex, Semaphore}; +use tokio::task; +use tracing::info; + +use sui_config::object_storage_config::{ObjectStoreConfig, ObjectStoreType}; +use sui_core::authority::authority_store_tables::LiveObject; +use sui_snapshot::reader::{download_bytes, LiveObjectIter, StateSnapshotReaderV1}; +use sui_snapshot::FileMetadata; +use sui_storage::object_store::util::get; +use sui_storage::object_store::ObjectStoreGetExt; +use sui_types::accumulator::Accumulator; + +use crate::config::RestoreConfig; +use crate::errors::IndexerError; +use crate::handlers::TransactionObjectChangesToCommit; +use crate::restorer::archives::{read_restore_checkpoint_info, RestoreCheckpointInfo}; +use crate::store::{indexer_store::IndexerStore, PgIndexerStore}; +use crate::types::{IndexedCheckpoint, IndexedObject}; + +pub type DigestByBucketAndPartition = BTreeMap>; +pub type SnapshotChecksums = (DigestByBucketAndPartition, Accumulator); +pub type Sha3DigestType = Arc>>>; + +pub struct IndexerFormalSnapshotRestorer { + store: PgIndexerStore, + reader: StateSnapshotReaderV1, + restore_config: RestoreConfig, +} + +impl IndexerFormalSnapshotRestorer { + pub async fn new( + store: PgIndexerStore, + restore_config: RestoreConfig, + ) -> Result { + let remote_store_config = ObjectStoreConfig { + object_store: Some(ObjectStoreType::S3), + aws_endpoint: Some(restore_config.snapshot_endpoint.clone()), + aws_virtual_hosted_style_request: true, + object_store_connection_limit: restore_config.object_store_concurrent_limit, + no_sign_request: true, + ..Default::default() + }; + + let base_path = PathBuf::from(restore_config.snapshot_download_dir.clone()); + let snapshot_dir = base_path.join("snapshot"); + if snapshot_dir.exists() { + fs::remove_dir_all(snapshot_dir.clone()).unwrap(); + info!( + "Deleted all files from snapshot directory: {:?}", + snapshot_dir + ); + } else { + fs::create_dir(snapshot_dir.clone()).unwrap(); + info!("Created snapshot directory: {:?}", snapshot_dir); + } + + let local_store_config = ObjectStoreConfig { + object_store: Some(ObjectStoreType::File), + directory: Some(snapshot_dir.clone().to_path_buf()), + ..Default::default() + }; + + let m = MultiProgress::new(); + let reader = StateSnapshotReaderV1::new( + restore_config.start_epoch, + &remote_store_config, + &local_store_config, + usize::MAX, + NonZeroUsize::new(restore_config.object_store_concurrent_limit).unwrap(), + m.clone(), + ) + .await + .unwrap_or_else(|err| panic!("Failed to create reader: {}", err)); + info!( + "Initialized formal snapshot reader at epoch {}", + restore_config.start_epoch + ); + + Ok(Self { + store, + reader, + restore_config: restore_config.clone(), + }) + } + + pub async fn restore(&mut self) -> Result<(), IndexerError> { + let (sha3_digests, num_part_files) = self.reader.compute_checksum().await?; + let (_abort_handle, abort_registration) = AbortHandle::new_pair(); + let (input_files, epoch_dir, remote_object_store, _concurrency) = + self.reader.export_metadata().await?; + let owned_input_files: Vec<(u32, (u32, FileMetadata))> = input_files + .into_iter() + .map(|(bucket, (part_num, metadata))| (*bucket, (part_num, metadata.clone()))) + .collect(); + self.restore_move_objects( + abort_registration, + owned_input_files, + epoch_dir, + remote_object_store, + sha3_digests, + num_part_files, + ) + .await?; + info!("Finished restoring move objects"); + self.restore_display_table().await?; + info!("Finished restoring display table"); + self.restore_cp_watermark_and_chain_id().await?; + info!("Finished restoring checkpoint info"); + Ok(()) + } + + async fn restore_move_objects( + &self, + abort_registration: AbortRegistration, + input_files: Vec<(u32, (u32, FileMetadata))>, + epoch_dir: Path, + remote_object_store: Arc, + sha3_digests: Arc>, + num_part_files: usize, + ) -> std::result::Result<(), anyhow::Error> { + let move_object_progress_bar = Arc::new(self.reader.get_multi_progress().add( + ProgressBar::new(num_part_files as u64).with_style( + ProgressStyle::with_template( + "[{elapsed_precise}] {wide_bar} {pos} out of {len} move object files restored ({msg})", + ) + .unwrap(), + ), + )); + + Abortable::new( + async move { + let sema_limit = Arc::new(Semaphore::new( + self.restore_config.object_store_concurrent_limit, + )); + let mut restore_tasks = vec![]; + + for (bucket, (part_num, file_metadata)) in input_files.into_iter() { + let sema_limit_clone = sema_limit.clone(); + let epoch_dir_clone = epoch_dir.clone(); + let remote_object_store_clone = remote_object_store.clone(); + let sha3_digests_clone = sha3_digests.clone(); + let store_clone = self.store.clone(); + let bar_clone = move_object_progress_bar.clone(); + let restore_config = self.restore_config.clone(); + + let restore_task = task::spawn(async move { + let _permit = sema_limit_clone.acquire().await.unwrap(); + let object_file_path = file_metadata.file_path(&epoch_dir_clone); + let (bytes, _) = download_bytes( + remote_object_store_clone, + &file_metadata, + epoch_dir_clone, + sha3_digests_clone, + &&bucket, + &part_num, + Some(restore_config.object_store_max_timeout_secs), + ) + .await; + info!( + "Finished downloading move object file {:?}", + object_file_path + ); + let mut move_objects = vec![]; + let _result: Result<(), anyhow::Error> = + LiveObjectIter::new(&file_metadata, bytes.clone()).map(|obj_iter| { + for object in obj_iter { + match object { + LiveObject::Normal(obj) => { + // TODO: placeholder values for df_info and checkpoint_seq_num, + // will clean it up when the column cleanup is done. + let indexed_object = + IndexedObject::from_object(0, obj, None); + move_objects.push(indexed_object); + } + LiveObject::Wrapped(_) => {} + } + } + }); + + let live_obj_cnt = move_objects.len(); + let object_changes = TransactionObjectChangesToCommit { + changed_objects: move_objects.clone(), + deleted_objects: vec![], + }; + info!( + "Start persisting {} objects to objects table from {}", + live_obj_cnt, object_file_path + ); + store_clone + .persist_objects(vec![object_changes]) + .await + .expect("Failed to persist to objects from restore"); + info!( + "Finished persisting {} objects to objects table from {}", + live_obj_cnt, object_file_path + ); + + let objects_snapshot_changes = TransactionObjectChangesToCommit { + changed_objects: move_objects, + deleted_objects: vec![], + }; + store_clone + .persist_objects_snapshot(vec![objects_snapshot_changes]) + .await + .expect("Failed to persist objects snapshot"); + + bar_clone.inc(1); + bar_clone.set_message(format!( + "Restored {} live move objects from {}", + live_obj_cnt, object_file_path + )); + Ok::<(), anyhow::Error>(()) + }); + restore_tasks.push(restore_task); + } + + let restore_task_results = futures::future::join_all(restore_tasks).await; + for restore_task_result in restore_task_results { + restore_task_result??; + } + Ok(()) + }, + abort_registration, + ) + .await? + } + + async fn restore_display_table(&self) -> std::result::Result<(), anyhow::Error> { + let bucket = self.restore_config.gcs_display_bucket.clone(); + let start_epoch = self.restore_config.start_epoch; + + let remote_store_config = ObjectStoreConfig { + object_store: Some(ObjectStoreType::GCS), + bucket: Some(bucket), + object_store_connection_limit: 200, + no_sign_request: false, + ..Default::default() + }; + let remote_store = remote_store_config.make().map_err(|e| { + IndexerError::GcsError(format!("Failed to make GCS remote store: {}", e)) + })?; + let path = Path::from(format!("display_{}.csv", start_epoch).as_str()); + let bytes: bytes::Bytes = get(&remote_store, &path).await?; + self.store.restore_display(bytes).await?; + Ok(()) + } + + async fn restore_cp_watermark_and_chain_id(&self) -> Result<(), IndexerError> { + let restore_checkpoint_info = read_restore_checkpoint_info( + Some(self.restore_config.gcs_archive_bucket.clone()), + self.restore_config.start_epoch, + ) + .await?; + let RestoreCheckpointInfo { + next_checkpoint_after_epoch, + chain_identifier, + } = restore_checkpoint_info; + self.store + .persist_chain_identifier(chain_identifier.into_inner().to_vec()) + .await?; + assert!(next_checkpoint_after_epoch > 0); + // FIXME: This is a temporary hack to add a checkpoint watermark. + // Once we have proper watermark tables, we should remove the following code. + let last_cp = IndexedCheckpoint { + sequence_number: next_checkpoint_after_epoch - 1, + ..Default::default() + }; + self.store.persist_checkpoints(vec![last_cp]).await?; + Ok(()) + } +} diff --git a/crates/sui-mvr-indexer/src/restorer/mod.rs b/crates/sui-mvr-indexer/src/restorer/mod.rs new file mode 100644 index 0000000000000..1899227725b62 --- /dev/null +++ b/crates/sui-mvr-indexer/src/restorer/mod.rs @@ -0,0 +1,5 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +pub mod archives; +pub mod formal_snapshot; diff --git a/crates/sui-mvr-indexer/src/schema.patch b/crates/sui-mvr-indexer/src/schema.patch new file mode 100644 index 0000000000000..c935f4d862fe0 --- /dev/null +++ b/crates/sui-mvr-indexer/src/schema.patch @@ -0,0 +1,7 @@ +diff --git a/crates/sui-mvr-indexer/src/schema.rs b/crates/sui-mvr-indexer/src/schema.rs +--- a/crates/sui-mvr-indexer/src/schema.rs ++++ b/crates/sui-mvr-indexer/src/schema.rs +@@ -1 +1,3 @@ ++// Copyright (c) Mysten Labs, Inc. ++// SPDX-License-Identifier: Apache-2.0 + // @generated automatically by Diesel CLI. diff --git a/crates/sui-mvr-indexer/src/schema.rs b/crates/sui-mvr-indexer/src/schema.rs new file mode 100644 index 0000000000000..447b45557922c --- /dev/null +++ b/crates/sui-mvr-indexer/src/schema.rs @@ -0,0 +1,404 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +// @generated automatically by Diesel CLI. + +diesel::table! { + chain_identifier (checkpoint_digest) { + checkpoint_digest -> Bytea, + } +} + +diesel::table! { + checkpoints (sequence_number) { + sequence_number -> Int8, + checkpoint_digest -> Bytea, + epoch -> Int8, + network_total_transactions -> Int8, + previous_checkpoint_digest -> Nullable, + end_of_epoch -> Bool, + tx_digests -> Array>, + timestamp_ms -> Int8, + total_gas_cost -> Int8, + computation_cost -> Int8, + storage_cost -> Int8, + storage_rebate -> Int8, + non_refundable_storage_fee -> Int8, + checkpoint_commitments -> Bytea, + validator_signature -> Bytea, + end_of_epoch_data -> Nullable, + min_tx_sequence_number -> Nullable, + max_tx_sequence_number -> Nullable, + } +} + +diesel::table! { + display (object_type) { + object_type -> Text, + id -> Bytea, + version -> Int2, + bcs -> Bytea, + } +} + +diesel::table! { + epochs (epoch) { + epoch -> Int8, + first_checkpoint_id -> Int8, + epoch_start_timestamp -> Int8, + reference_gas_price -> Int8, + protocol_version -> Int8, + total_stake -> Int8, + storage_fund_balance -> Int8, + system_state -> Nullable, + epoch_total_transactions -> Nullable, + last_checkpoint_id -> Nullable, + epoch_end_timestamp -> Nullable, + storage_fund_reinvestment -> Nullable, + storage_charge -> Nullable, + storage_rebate -> Nullable, + stake_subsidy_amount -> Nullable, + total_gas_fees -> Nullable, + total_stake_rewards_distributed -> Nullable, + leftover_storage_fund_inflow -> Nullable, + epoch_commitments -> Nullable, + system_state_summary_json -> Nullable, + first_tx_sequence_number -> Nullable, + } +} + +diesel::table! { + event_emit_module (package, module, tx_sequence_number, event_sequence_number) { + package -> Bytea, + module -> Text, + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + sender -> Bytea, + } +} + +diesel::table! { + event_emit_package (package, tx_sequence_number, event_sequence_number) { + package -> Bytea, + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + sender -> Bytea, + } +} + +diesel::table! { + event_senders (sender, tx_sequence_number, event_sequence_number) { + sender -> Bytea, + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + } +} + +diesel::table! { + event_struct_instantiation (package, module, type_instantiation, tx_sequence_number, event_sequence_number) { + package -> Bytea, + module -> Text, + type_instantiation -> Text, + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + sender -> Bytea, + } +} + +diesel::table! { + event_struct_module (package, module, tx_sequence_number, event_sequence_number) { + package -> Bytea, + module -> Text, + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + sender -> Bytea, + } +} + +diesel::table! { + event_struct_name (package, module, type_name, tx_sequence_number, event_sequence_number) { + package -> Bytea, + module -> Text, + type_name -> Text, + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + sender -> Bytea, + } +} + +diesel::table! { + event_struct_package (package, tx_sequence_number, event_sequence_number) { + package -> Bytea, + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + sender -> Bytea, + } +} + +diesel::table! { + events (tx_sequence_number, event_sequence_number) { + tx_sequence_number -> Int8, + event_sequence_number -> Int8, + transaction_digest -> Bytea, + senders -> Array>, + package -> Bytea, + module -> Text, + event_type -> Text, + timestamp_ms -> Int8, + bcs -> Bytea, + sender -> Nullable, + } +} + +diesel::table! { + feature_flags (protocol_version, flag_name) { + protocol_version -> Int8, + flag_name -> Text, + flag_value -> Bool, + } +} + +diesel::table! { + full_objects_history (object_id, object_version) { + object_id -> Bytea, + object_version -> Int8, + serialized_object -> Nullable, + } +} + +diesel::table! { + objects (object_id) { + object_id -> Bytea, + object_version -> Int8, + object_digest -> Bytea, + owner_type -> Int2, + owner_id -> Nullable, + object_type -> Nullable, + object_type_package -> Nullable, + object_type_module -> Nullable, + object_type_name -> Nullable, + serialized_object -> Bytea, + coin_type -> Nullable, + coin_balance -> Nullable, + df_kind -> Nullable, + } +} + +diesel::table! { + objects_history (checkpoint_sequence_number, object_id, object_version) { + object_id -> Bytea, + object_version -> Int8, + object_status -> Int2, + object_digest -> Nullable, + checkpoint_sequence_number -> Int8, + owner_type -> Nullable, + owner_id -> Nullable, + object_type -> Nullable, + object_type_package -> Nullable, + object_type_module -> Nullable, + object_type_name -> Nullable, + serialized_object -> Nullable, + coin_type -> Nullable, + coin_balance -> Nullable, + df_kind -> Nullable, + } +} + +diesel::table! { + objects_snapshot (object_id) { + object_id -> Bytea, + object_version -> Int8, + object_status -> Int2, + object_digest -> Nullable, + checkpoint_sequence_number -> Int8, + owner_type -> Nullable, + owner_id -> Nullable, + object_type -> Nullable, + object_type_package -> Nullable, + object_type_module -> Nullable, + object_type_name -> Nullable, + serialized_object -> Nullable, + coin_type -> Nullable, + coin_balance -> Nullable, + df_kind -> Nullable, + } +} + +diesel::table! { + objects_version (object_id, object_version) { + object_id -> Bytea, + object_version -> Int8, + cp_sequence_number -> Int8, + } +} + +diesel::table! { + packages (package_id, original_id, package_version) { + package_id -> Bytea, + original_id -> Bytea, + package_version -> Int8, + move_package -> Bytea, + checkpoint_sequence_number -> Int8, + } +} + +diesel::table! { + protocol_configs (protocol_version, config_name) { + protocol_version -> Int8, + config_name -> Text, + config_value -> Nullable, + } +} + +diesel::table! { + pruner_cp_watermark (checkpoint_sequence_number) { + checkpoint_sequence_number -> Int8, + min_tx_sequence_number -> Int8, + max_tx_sequence_number -> Int8, + } +} + +diesel::table! { + raw_checkpoints (sequence_number) { + sequence_number -> Int8, + certified_checkpoint -> Bytea, + checkpoint_contents -> Bytea, + } +} + +diesel::table! { + transactions (tx_sequence_number) { + tx_sequence_number -> Int8, + transaction_digest -> Bytea, + raw_transaction -> Bytea, + raw_effects -> Bytea, + checkpoint_sequence_number -> Int8, + timestamp_ms -> Int8, + object_changes -> Array>, + balance_changes -> Array>, + events -> Array>, + transaction_kind -> Int2, + success_command_count -> Int2, + } +} + +diesel::table! { + tx_affected_addresses (affected, tx_sequence_number) { + tx_sequence_number -> Int8, + affected -> Bytea, + sender -> Bytea, + } +} + +diesel::table! { + tx_affected_objects (affected, tx_sequence_number) { + tx_sequence_number -> Int8, + affected -> Bytea, + sender -> Bytea, + } +} + +diesel::table! { + tx_calls_fun (package, module, func, tx_sequence_number) { + tx_sequence_number -> Int8, + package -> Bytea, + module -> Text, + func -> Text, + sender -> Bytea, + } +} + +diesel::table! { + tx_calls_mod (package, module, tx_sequence_number) { + tx_sequence_number -> Int8, + package -> Bytea, + module -> Text, + sender -> Bytea, + } +} + +diesel::table! { + tx_calls_pkg (package, tx_sequence_number) { + tx_sequence_number -> Int8, + package -> Bytea, + sender -> Bytea, + } +} + +diesel::table! { + tx_changed_objects (object_id, tx_sequence_number) { + tx_sequence_number -> Int8, + object_id -> Bytea, + sender -> Bytea, + } +} + +diesel::table! { + tx_digests (tx_digest) { + tx_digest -> Bytea, + tx_sequence_number -> Int8, + } +} + +diesel::table! { + tx_input_objects (object_id, tx_sequence_number) { + tx_sequence_number -> Int8, + object_id -> Bytea, + sender -> Bytea, + } +} + +diesel::table! { + tx_kinds (tx_kind, tx_sequence_number) { + tx_sequence_number -> Int8, + tx_kind -> Int2, + } +} + +diesel::table! { + watermarks (pipeline) { + pipeline -> Text, + epoch_hi_inclusive -> Int8, + checkpoint_hi_inclusive -> Int8, + tx_hi -> Int8, + epoch_lo -> Int8, + reader_lo -> Int8, + timestamp_ms -> Int8, + pruner_hi -> Int8, + } +} + +diesel::allow_tables_to_appear_in_same_query!( + chain_identifier, + checkpoints, + display, + epochs, + event_emit_module, + event_emit_package, + event_senders, + event_struct_instantiation, + event_struct_module, + event_struct_name, + event_struct_package, + events, + feature_flags, + full_objects_history, + objects, + objects_history, + objects_snapshot, + objects_version, + packages, + protocol_configs, + pruner_cp_watermark, + raw_checkpoints, + transactions, + tx_affected_addresses, + tx_affected_objects, + tx_calls_fun, + tx_calls_mod, + tx_calls_pkg, + tx_changed_objects, + tx_digests, + tx_input_objects, + tx_kinds, + watermarks, +); diff --git a/crates/sui-mvr-indexer/src/store/indexer_store.rs b/crates/sui-mvr-indexer/src/store/indexer_store.rs new file mode 100644 index 0000000000000..998b37f286b1e --- /dev/null +++ b/crates/sui-mvr-indexer/src/store/indexer_store.rs @@ -0,0 +1,140 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::BTreeMap; + +use async_trait::async_trait; +use strum::IntoEnumIterator; + +use crate::errors::IndexerError; +use crate::handlers::pruner::PrunableTable; +use crate::handlers::{CommitterWatermark, EpochToCommit, TransactionObjectChangesToCommit}; +use crate::models::display::StoredDisplay; +use crate::models::obj_indices::StoredObjectVersion; +use crate::models::objects::{StoredDeletedObject, StoredObject}; +use crate::models::raw_checkpoints::StoredRawCheckpoint; +use crate::models::watermarks::StoredWatermark; +use crate::types::{ + EventIndex, IndexedCheckpoint, IndexedEvent, IndexedPackage, IndexedTransaction, TxIndex, +}; + +#[allow(clippy::large_enum_variant)] +pub enum ObjectsToCommit { + MutatedObject(StoredObject), + DeletedObject(StoredDeletedObject), +} + +#[async_trait] +pub trait IndexerStore: Clone + Sync + Send + 'static { + async fn get_latest_checkpoint_sequence_number(&self) -> Result, IndexerError>; + + async fn get_available_epoch_range(&self) -> Result<(u64, u64), IndexerError>; + + async fn get_available_checkpoint_range(&self) -> Result<(u64, u64), IndexerError>; + + async fn get_latest_object_snapshot_checkpoint_sequence_number( + &self, + ) -> Result, IndexerError>; + + async fn get_chain_identifier(&self) -> Result>, IndexerError>; + + async fn persist_protocol_configs_and_feature_flags( + &self, + chain_id: Vec, + ) -> Result<(), IndexerError>; + + async fn persist_objects( + &self, + object_changes: Vec, + ) -> Result<(), IndexerError>; + + async fn persist_object_history( + &self, + object_changes: Vec, + ) -> Result<(), IndexerError>; + + async fn persist_full_objects_history( + &self, + object_changes: Vec, + ) -> Result<(), IndexerError>; + + async fn persist_objects_version( + &self, + object_versions: Vec, + ) -> Result<(), IndexerError>; + + async fn persist_objects_snapshot( + &self, + object_changes: Vec, + ) -> Result<(), IndexerError>; + + async fn persist_checkpoints( + &self, + checkpoints: Vec, + ) -> Result<(), IndexerError>; + + async fn persist_chain_identifier( + &self, + checkpoint_digest: Vec, + ) -> Result<(), IndexerError>; + + async fn persist_transactions( + &self, + transactions: Vec, + ) -> Result<(), IndexerError>; + + async fn persist_tx_indices(&self, indices: Vec) -> Result<(), IndexerError>; + + async fn persist_events(&self, events: Vec) -> Result<(), IndexerError>; + async fn persist_event_indices( + &self, + event_indices: Vec, + ) -> Result<(), IndexerError>; + + async fn persist_displays( + &self, + display_updates: BTreeMap, + ) -> Result<(), IndexerError>; + + async fn persist_packages(&self, packages: Vec) -> Result<(), IndexerError>; + + /// Updates the current epoch with end-of-epoch data, and writes a new epoch to the database. + async fn persist_epoch(&self, epoch: EpochToCommit) -> Result<(), IndexerError>; + + /// Updates epoch-partitioned tables to accept data from the new epoch. + async fn advance_epoch(&self, epoch: EpochToCommit) -> Result<(), IndexerError>; + + async fn prune_epoch(&self, epoch: u64) -> Result<(), IndexerError>; + + async fn get_network_total_transactions_by_end_of_epoch( + &self, + epoch: u64, + ) -> Result, IndexerError>; + + async fn upload_display(&self, epoch: u64) -> Result<(), IndexerError>; + + async fn restore_display(&self, bytes: bytes::Bytes) -> Result<(), IndexerError>; + + async fn persist_raw_checkpoints( + &self, + checkpoints: Vec, + ) -> Result<(), IndexerError>; + + /// Update the upper bound of the watermarks for the given tables. + async fn update_watermarks_upper_bound( + &self, + watermark: CommitterWatermark, + ) -> Result<(), IndexerError> + where + E::Iterator: Iterator>; + + /// Updates each watermark entry's lower bounds per the list of tables and their new epoch lower + /// bounds. + async fn update_watermarks_lower_bound( + &self, + watermarks: Vec<(PrunableTable, u64)>, + ) -> Result<(), IndexerError>; + + /// Load all watermark entries from the store, and the latest timestamp from the db. + async fn get_watermarks(&self) -> Result<(Vec, i64), IndexerError>; +} diff --git a/crates/sui-mvr-indexer/src/store/mod.rs b/crates/sui-mvr-indexer/src/store/mod.rs new file mode 100644 index 0000000000000..9d6bf65cc26b4 --- /dev/null +++ b/crates/sui-mvr-indexer/src/store/mod.rs @@ -0,0 +1,93 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::time::Duration; + +use diesel_async::{scoped_futures::ScopedBoxFuture, AsyncPgConnection}; +pub(crate) use indexer_store::*; +pub use pg_indexer_store::PgIndexerStore; + +use crate::{database::ConnectionPool, errors::IndexerError}; + +pub mod indexer_store; +pub mod package_resolver; +mod pg_indexer_store; +pub mod pg_partition_manager; + +pub async fn transaction_with_retry<'a, Q, T>( + pool: &ConnectionPool, + timeout: Duration, + query: Q, +) -> Result +where + Q: for<'r> FnOnce( + &'r mut AsyncPgConnection, + ) -> ScopedBoxFuture<'a, 'r, Result> + + Send, + Q: Clone, + T: 'a, +{ + let backoff = backoff::ExponentialBackoff { + max_elapsed_time: Some(timeout), + ..Default::default() + }; + backoff::future::retry(backoff, || async { + let mut connection = pool.get().await.map_err(|e| backoff::Error::Transient { + err: IndexerError::PostgresWriteError(e.to_string()), + retry_after: None, + })?; + + connection + .build_transaction() + .read_write() + .run(query.clone()) + .await + .map_err(|e| { + tracing::error!("Error with persisting data into DB: {:?}, retrying...", e); + backoff::Error::Transient { + err: IndexerError::PostgresWriteError(e.to_string()), + retry_after: None, + } + }) + }) + .await +} + +pub async fn read_with_retry<'a, Q, T>( + pool: &ConnectionPool, + timeout: Duration, + query: Q, +) -> Result +where + Q: for<'r> FnOnce( + &'r mut AsyncPgConnection, + ) -> ScopedBoxFuture<'a, 'r, Result> + + Send, + Q: Clone, + T: 'a, +{ + let backoff = backoff::ExponentialBackoff { + max_elapsed_time: Some(timeout), + ..Default::default() + }; + backoff::future::retry(backoff, || async { + let mut connection = pool.get().await.map_err(|e| backoff::Error::Transient { + err: IndexerError::PostgresWriteError(e.to_string()), + retry_after: None, + })?; + + connection + .build_transaction() + .read_only() + .run(query.clone()) + .await + .map_err(|e| { + tracing::error!("Error with reading data from DB: {:?}, retrying...", e); + backoff::Error::Transient { + err: IndexerError::PostgresWriteError(e.to_string()), + retry_after: None, + } + }) + }) + .await +} diff --git a/crates/sui-mvr-indexer/src/store/package_resolver.rs b/crates/sui-mvr-indexer/src/store/package_resolver.rs new file mode 100644 index 0000000000000..f4cedd6500871 --- /dev/null +++ b/crates/sui-mvr-indexer/src/store/package_resolver.rs @@ -0,0 +1,58 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::sync::Arc; + +use crate::database::ConnectionPool; +use crate::schema::objects; +use anyhow::anyhow; +use async_trait::async_trait; +use diesel::ExpressionMethods; +use diesel::QueryDsl; +use diesel_async::RunQueryDsl; +use move_core_types::account_address::AccountAddress; +use sui_package_resolver::{error::Error as PackageResolverError, Package, PackageStore}; +use sui_types::object::Object; + +/// A package resolver that reads packages from the database. +#[derive(Clone)] +pub struct IndexerStorePackageResolver { + pool: ConnectionPool, +} + +impl IndexerStorePackageResolver { + pub fn new(pool: ConnectionPool) -> Self { + Self { pool } + } +} + +#[async_trait] +impl PackageStore for IndexerStorePackageResolver { + async fn fetch(&self, id: AccountAddress) -> Result, PackageResolverError> { + let pkg = self + .get_package_from_db(id) + .await + .map_err(|e| PackageResolverError::Store { + store: "PostgresDB", + error: e.to_string(), + })?; + Ok(Arc::new(pkg)) + } +} + +impl IndexerStorePackageResolver { + async fn get_package_from_db(&self, id: AccountAddress) -> Result { + let mut connection = self.pool.get().await?; + + let bcs = objects::dsl::objects + .select(objects::dsl::serialized_object) + .filter(objects::dsl::object_id.eq(id.to_vec())) + .get_result::>(&mut connection) + .await + .map_err(|e| anyhow!("Package not found in DB: {e}"))?; + + let object = bcs::from_bytes::(&bcs)?; + Package::read_from_object(&object) + .map_err(|e| anyhow!("Failed parsing object to package: {e}")) + } +} diff --git a/crates/sui-mvr-indexer/src/store/pg_indexer_store.rs b/crates/sui-mvr-indexer/src/store/pg_indexer_store.rs new file mode 100644 index 0000000000000..b1d1af7b31ed6 --- /dev/null +++ b/crates/sui-mvr-indexer/src/store/pg_indexer_store.rs @@ -0,0 +1,2495 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::collections::{BTreeMap, HashMap}; +use std::io::Cursor; +use std::time::Duration; + +use async_trait::async_trait; +use core::result::Result::Ok; +use csv::{ReaderBuilder, Writer}; +use diesel::dsl::{max, min}; +use diesel::ExpressionMethods; +use diesel::OptionalExtension; +use diesel::QueryDsl; +use diesel_async::scoped_futures::ScopedFutureExt; +use futures::future::Either; +use itertools::Itertools; +use object_store::path::Path; +use strum::IntoEnumIterator; +use sui_types::base_types::ObjectID; +use tap::TapFallible; +use tracing::{info, warn}; + +use sui_config::object_storage_config::{ObjectStoreConfig, ObjectStoreType}; +use sui_protocol_config::ProtocolConfig; +use sui_storage::object_store::util::put; + +use crate::config::UploadOptions; +use crate::database::ConnectionPool; +use crate::errors::{Context, IndexerError}; +use crate::handlers::pruner::PrunableTable; +use crate::handlers::TransactionObjectChangesToCommit; +use crate::handlers::{CommitterWatermark, EpochToCommit}; +use crate::metrics::IndexerMetrics; +use crate::models::checkpoints::StoredChainIdentifier; +use crate::models::checkpoints::StoredCheckpoint; +use crate::models::checkpoints::StoredCpTx; +use crate::models::display::StoredDisplay; +use crate::models::epoch::StoredEpochInfo; +use crate::models::epoch::{StoredFeatureFlag, StoredProtocolConfig}; +use crate::models::events::StoredEvent; +use crate::models::obj_indices::StoredObjectVersion; +use crate::models::objects::{ + StoredDeletedObject, StoredFullHistoryObject, StoredHistoryObject, StoredObject, + StoredObjectSnapshot, +}; +use crate::models::packages::StoredPackage; +use crate::models::transactions::StoredTransaction; +use crate::models::watermarks::StoredWatermark; +use crate::schema::{ + chain_identifier, checkpoints, display, epochs, event_emit_module, event_emit_package, + event_senders, event_struct_instantiation, event_struct_module, event_struct_name, + event_struct_package, events, feature_flags, full_objects_history, objects, objects_history, + objects_snapshot, objects_version, packages, protocol_configs, pruner_cp_watermark, + raw_checkpoints, transactions, tx_affected_addresses, tx_affected_objects, tx_calls_fun, + tx_calls_mod, tx_calls_pkg, tx_changed_objects, tx_digests, tx_input_objects, tx_kinds, + watermarks, +}; +use crate::store::{read_with_retry, transaction_with_retry}; +use crate::types::{EventIndex, IndexedDeletedObject, IndexedObject}; +use crate::types::{IndexedCheckpoint, IndexedEvent, IndexedPackage, IndexedTransaction, TxIndex}; + +use super::pg_partition_manager::{EpochPartitionData, PgPartitionManager}; +use super::IndexerStore; + +use crate::models::raw_checkpoints::StoredRawCheckpoint; +use diesel::upsert::excluded; +use sui_types::digests::{ChainIdentifier, CheckpointDigest}; + +#[macro_export] +macro_rules! chunk { + ($data: expr, $size: expr) => {{ + $data + .into_iter() + .chunks($size) + .into_iter() + .map(|c| c.collect()) + .collect::>>() + }}; +} + +// In one DB transaction, the update could be chunked into +// a few statements, this is the amount of rows to update in one statement +// TODO: I think with the `per_db_tx` params, `PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX` +// is now less relevant. We should do experiments and remove it if it's true. +const PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX: usize = 1000; +// The amount of rows to update in one DB transaction +const PG_COMMIT_PARALLEL_CHUNK_SIZE: usize = 100; +// The amount of rows to update in one DB transaction, for objects particularly +// Having this number too high may cause many db deadlocks because of +// optimistic locking. +const PG_COMMIT_OBJECTS_PARALLEL_CHUNK_SIZE: usize = 500; +const PG_DB_COMMIT_SLEEP_DURATION: Duration = Duration::from_secs(3600); + +#[derive(Clone)] +pub struct PgIndexerStoreConfig { + pub parallel_chunk_size: usize, + pub parallel_objects_chunk_size: usize, + pub gcs_cred_path: Option, + pub gcs_display_bucket: Option, +} + +#[derive(Clone)] +pub struct PgIndexerStore { + pool: ConnectionPool, + metrics: IndexerMetrics, + partition_manager: PgPartitionManager, + config: PgIndexerStoreConfig, +} + +impl PgIndexerStore { + pub fn new( + pool: ConnectionPool, + upload_options: UploadOptions, + metrics: IndexerMetrics, + ) -> Self { + let parallel_chunk_size = std::env::var("PG_COMMIT_PARALLEL_CHUNK_SIZE") + .unwrap_or_else(|_e| PG_COMMIT_PARALLEL_CHUNK_SIZE.to_string()) + .parse::() + .unwrap(); + let parallel_objects_chunk_size = std::env::var("PG_COMMIT_OBJECTS_PARALLEL_CHUNK_SIZE") + .unwrap_or_else(|_e| PG_COMMIT_OBJECTS_PARALLEL_CHUNK_SIZE.to_string()) + .parse::() + .unwrap(); + let partition_manager = + PgPartitionManager::new(pool.clone()).expect("Failed to initialize partition manager"); + let config = PgIndexerStoreConfig { + parallel_chunk_size, + parallel_objects_chunk_size, + gcs_cred_path: upload_options.gcs_cred_path, + gcs_display_bucket: upload_options.gcs_display_bucket, + }; + + Self { + pool, + metrics, + partition_manager, + config, + } + } + + pub fn pool(&self) -> ConnectionPool { + self.pool.clone() + } + + /// Get the range of the protocol versions that need to be indexed. + pub async fn get_protocol_version_index_range(&self) -> Result<(i64, i64), IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + // We start indexing from the next protocol version after the latest one stored in the db. + let start = protocol_configs::table + .select(max(protocol_configs::protocol_version)) + .first::>(&mut connection) + .await + .map_err(Into::into) + .context("Failed reading latest protocol version from PostgresDB")? + .map_or(1, |v| v + 1); + + // We end indexing at the protocol version of the latest epoch stored in the db. + let end = epochs::table + .select(max(epochs::protocol_version)) + .first::>(&mut connection) + .await + .map_err(Into::into) + .context("Failed reading latest epoch protocol version from PostgresDB")? + .unwrap_or(1); + Ok((start, end)) + } + + async fn get_chain_identifier(&self) -> Result>, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + chain_identifier::table + .select(chain_identifier::checkpoint_digest) + .first::>(&mut connection) + .await + .optional() + .map_err(Into::into) + .context("Failed reading chain id from PostgresDB") + } + + // `pub` is needed for wait_for_checkpoint in tests + pub async fn get_latest_checkpoint_sequence_number(&self) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + checkpoints::table + .select(max(checkpoints::sequence_number)) + .first::>(&mut connection) + .await + .map_err(Into::into) + .map(|v| v.map(|v| v as u64)) + .context("Failed reading latest checkpoint sequence number from PostgresDB") + } + + async fn get_available_checkpoint_range(&self) -> Result<(u64, u64), IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + checkpoints::table + .select(( + min(checkpoints::sequence_number), + max(checkpoints::sequence_number), + )) + .first::<(Option, Option)>(&mut connection) + .await + .map_err(Into::into) + .map(|(min, max)| { + ( + min.unwrap_or_default() as u64, + max.unwrap_or_default() as u64, + ) + }) + .context("Failed reading min and max checkpoint sequence numbers from PostgresDB") + } + + async fn get_prunable_epoch_range(&self) -> Result<(u64, u64), IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + epochs::table + .select((min(epochs::epoch), max(epochs::epoch))) + .first::<(Option, Option)>(&mut connection) + .await + .map_err(Into::into) + .map(|(min, max)| { + ( + min.unwrap_or_default() as u64, + max.unwrap_or_default() as u64, + ) + }) + .context("Failed reading min and max epoch numbers from PostgresDB") + } + + async fn get_min_prunable_checkpoint(&self) -> Result { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + pruner_cp_watermark::table + .select(min(pruner_cp_watermark::checkpoint_sequence_number)) + .first::>(&mut connection) + .await + .map_err(Into::into) + .map(|v| v.unwrap_or_default() as u64) + .context("Failed reading min prunable checkpoint sequence number from PostgresDB") + } + + pub async fn get_checkpoint_range_for_epoch( + &self, + epoch: u64, + ) -> Result<(u64, Option), IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + epochs::table + .select((epochs::first_checkpoint_id, epochs::last_checkpoint_id)) + .filter(epochs::epoch.eq(epoch as i64)) + .first::<(i64, Option)>(&mut connection) + .await + .map_err(Into::into) + .map(|(min, max)| (min as u64, max.map(|v| v as u64))) + .context("Failed reading checkpoint range from PostgresDB") + } + + pub async fn get_transaction_range_for_checkpoint( + &self, + checkpoint: u64, + ) -> Result<(u64, u64), IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + pruner_cp_watermark::table + .select(( + pruner_cp_watermark::min_tx_sequence_number, + pruner_cp_watermark::max_tx_sequence_number, + )) + .filter(pruner_cp_watermark::checkpoint_sequence_number.eq(checkpoint as i64)) + .first::<(i64, i64)>(&mut connection) + .await + .map_err(Into::into) + .map(|(min, max)| (min as u64, max as u64)) + .context("Failed reading transaction range from PostgresDB") + } + + pub async fn get_latest_object_snapshot_checkpoint_sequence_number( + &self, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + objects_snapshot::table + .select(max(objects_snapshot::checkpoint_sequence_number)) + .first::>(&mut connection) + .await + .map_err(Into::into) + .map(|v| v.map(|v| v as u64)) + .context( + "Failed reading latest object snapshot checkpoint sequence number from PostgresDB", + ) + } + + async fn persist_display_updates( + &self, + display_updates: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + diesel::insert_into(display::table) + .values(display_updates) + .on_conflict(display::object_type) + .do_update() + .set(( + display::id.eq(excluded(display::id)), + display::version.eq(excluded(display::version)), + display::bcs.eq(excluded(display::bcs)), + )) + .execute(conn) + .await?; + + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await?; + + Ok(()) + } + + async fn persist_object_mutation_chunk( + &self, + mutated_object_mutation_chunk: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + let guard = self + .metrics + .checkpoint_db_commit_latency_objects_chunks + .start_timer(); + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + diesel::insert_into(objects::table) + .values(mutated_object_mutation_chunk.clone()) + .on_conflict(objects::object_id) + .do_update() + .set(( + objects::object_id.eq(excluded(objects::object_id)), + objects::object_version.eq(excluded(objects::object_version)), + objects::object_digest.eq(excluded(objects::object_digest)), + objects::owner_type.eq(excluded(objects::owner_type)), + objects::owner_id.eq(excluded(objects::owner_id)), + objects::object_type.eq(excluded(objects::object_type)), + objects::serialized_object.eq(excluded(objects::serialized_object)), + objects::coin_type.eq(excluded(objects::coin_type)), + objects::coin_balance.eq(excluded(objects::coin_balance)), + objects::df_kind.eq(excluded(objects::df_kind)), + )) + .execute(conn) + .await?; + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + guard.stop_and_record(); + }) + .tap_err(|e| { + tracing::error!("Failed to persist object mutations with error: {}", e); + }) + } + + async fn persist_object_deletion_chunk( + &self, + deleted_objects_chunk: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + let guard = self + .metrics + .checkpoint_db_commit_latency_objects_chunks + .start_timer(); + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + diesel::delete( + objects::table.filter( + objects::object_id.eq_any( + deleted_objects_chunk + .iter() + .map(|o| o.object_id.clone()) + .collect::>(), + ), + ), + ) + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to write object deletion to PostgresDB")?; + + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + guard.stop_and_record(); + }) + .tap_err(|e| { + tracing::error!("Failed to persist object deletions with error: {}", e); + }) + } + + async fn persist_object_snapshot_mutation_chunk( + &self, + objects_snapshot_mutations: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + let guard = self + .metrics + .checkpoint_db_commit_latency_objects_snapshot_chunks + .start_timer(); + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for mutation_chunk in + objects_snapshot_mutations.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(objects_snapshot::table) + .values(mutation_chunk) + .on_conflict(objects_snapshot::object_id) + .do_update() + .set(( + objects_snapshot::object_version + .eq(excluded(objects_snapshot::object_version)), + objects_snapshot::object_status + .eq(excluded(objects_snapshot::object_status)), + objects_snapshot::object_digest + .eq(excluded(objects_snapshot::object_digest)), + objects_snapshot::owner_type.eq(excluded(objects_snapshot::owner_type)), + objects_snapshot::owner_id.eq(excluded(objects_snapshot::owner_id)), + objects_snapshot::object_type_package + .eq(excluded(objects_snapshot::object_type_package)), + objects_snapshot::object_type_module + .eq(excluded(objects_snapshot::object_type_module)), + objects_snapshot::object_type_name + .eq(excluded(objects_snapshot::object_type_name)), + objects_snapshot::object_type + .eq(excluded(objects_snapshot::object_type)), + objects_snapshot::serialized_object + .eq(excluded(objects_snapshot::serialized_object)), + objects_snapshot::coin_type.eq(excluded(objects_snapshot::coin_type)), + objects_snapshot::coin_balance + .eq(excluded(objects_snapshot::coin_balance)), + objects_snapshot::df_kind.eq(excluded(objects_snapshot::df_kind)), + objects_snapshot::checkpoint_sequence_number + .eq(excluded(objects_snapshot::checkpoint_sequence_number)), + )) + .execute(conn) + .await?; + } + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + guard.stop_and_record(); + }) + .tap_err(|e| { + tracing::error!("Failed to persist object snapshot with error: {}", e); + }) + } + + async fn persist_object_snapshot_deletion_chunk( + &self, + objects_snapshot_deletions: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + let guard = self + .metrics + .checkpoint_db_commit_latency_objects_snapshot_chunks + .start_timer(); + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for deletion_chunk in + objects_snapshot_deletions.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::delete( + objects_snapshot::table.filter( + objects_snapshot::object_id.eq_any( + deletion_chunk + .iter() + .map(|o| o.object_id.clone()) + .collect::>(), + ), + ), + ) + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to write object deletion to PostgresDB")?; + } + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!( + elapsed, + "Deleted {} chunked object snapshots", + objects_snapshot_deletions.len(), + ); + }) + .tap_err(|e| { + tracing::error!( + "Failed to persist object snapshot deletions with error: {}", + e + ); + }) + } + + async fn persist_objects_history_chunk( + &self, + stored_objects_history: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + let guard = self + .metrics + .checkpoint_db_commit_latency_objects_history_chunks + .start_timer(); + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for stored_objects_history_chunk in + stored_objects_history.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + let error_message = concat!( + "Failed to write to ", + stringify!((objects_history::table)), + " DB" + ); + diesel::insert_into(objects_history::table) + .values(stored_objects_history_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context(error_message)?; + } + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + guard.stop_and_record(); + }) + .tap_err(|e| { + tracing::error!("Failed to persist object history with error: {}", e); + }) + } + + async fn persist_full_objects_history_chunk( + &self, + objects: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + let guard = self + .metrics + .checkpoint_db_commit_latency_full_objects_history_chunks + .start_timer(); + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for objects_chunk in objects.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + diesel::insert_into(full_objects_history::table) + .values(objects_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to write to full_objects_history table")?; + } + + Ok(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!( + elapsed, + "Persisted {} chunked full objects history", + objects.len(), + ); + }) + .tap_err(|e| { + tracing::error!("Failed to persist full object history with error: {}", e); + }) + } + + async fn persist_objects_version_chunk( + &self, + object_versions: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + let guard = self + .metrics + .checkpoint_db_commit_latency_objects_version_chunks + .start_timer(); + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for object_version_chunk in object_versions.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(objects_version::table) + .values(object_version_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to write to objects_version table")?; + } + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!( + elapsed, + "Persisted {} chunked object versions", + object_versions.len(), + ); + }) + .tap_err(|e| { + tracing::error!("Failed to persist object versions with error: {}", e); + }) + } + + async fn persist_raw_checkpoints_impl( + &self, + raw_checkpoints: &[StoredRawCheckpoint], + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + diesel::insert_into(raw_checkpoints::table) + .values(raw_checkpoints) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to write to raw_checkpoints table")?; + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + } + + async fn persist_checkpoints( + &self, + checkpoints: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + let Some(first_checkpoint) = checkpoints.as_slice().first() else { + return Ok(()); + }; + + // If the first checkpoint has sequence number 0, we need to persist the digest as + // chain identifier. + if first_checkpoint.sequence_number == 0 { + let checkpoint_digest = first_checkpoint.checkpoint_digest.into_inner().to_vec(); + self.persist_protocol_configs_and_feature_flags(checkpoint_digest.clone()) + .await?; + self.persist_chain_identifier(checkpoint_digest).await?; + } + let guard = self + .metrics + .checkpoint_db_commit_latency_checkpoints + .start_timer(); + + let stored_cp_txs = checkpoints.iter().map(StoredCpTx::from).collect::>(); + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for stored_cp_tx_chunk in stored_cp_txs.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + diesel::insert_into(pruner_cp_watermark::table) + .values(stored_cp_tx_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to write to pruner_cp_watermark table")?; + } + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + info!( + "Persisted {} pruner_cp_watermark rows.", + stored_cp_txs.len(), + ); + }) + .tap_err(|e| { + tracing::error!("Failed to persist pruner_cp_watermark with error: {}", e); + })?; + + let stored_checkpoints = checkpoints + .iter() + .map(StoredCheckpoint::from) + .collect::>(); + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for stored_checkpoint_chunk in + stored_checkpoints.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(checkpoints::table) + .values(stored_checkpoint_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to write to checkpoints table")?; + let time_now_ms = chrono::Utc::now().timestamp_millis(); + for stored_checkpoint in stored_checkpoint_chunk { + self.metrics + .db_commit_lag_ms + .set(time_now_ms - stored_checkpoint.timestamp_ms); + self.metrics + .max_committed_checkpoint_sequence_number + .set(stored_checkpoint.sequence_number); + self.metrics + .committed_checkpoint_timestamp_ms + .set(stored_checkpoint.timestamp_ms); + } + + for stored_checkpoint in stored_checkpoint_chunk { + info!( + "Indexer lag: \ + persisted checkpoint {} with time now {} and checkpoint time {}", + stored_checkpoint.sequence_number, + time_now_ms, + stored_checkpoint.timestamp_ms + ); + } + } + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!( + elapsed, + "Persisted {} checkpoints", + stored_checkpoints.len() + ); + }) + .tap_err(|e| { + tracing::error!("Failed to persist checkpoints with error: {}", e); + }) + } + + async fn persist_transactions_chunk( + &self, + transactions: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + let guard = self + .metrics + .checkpoint_db_commit_latency_transactions_chunks + .start_timer(); + let transformation_guard = self + .metrics + .checkpoint_db_commit_latency_transactions_chunks_transformation + .start_timer(); + let transactions = transactions + .iter() + .map(StoredTransaction::from) + .collect::>(); + drop(transformation_guard); + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for transaction_chunk in transactions.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + let error_message = concat!( + "Failed to write to ", + stringify!((transactions::table)), + " DB" + ); + diesel::insert_into(transactions::table) + .values(transaction_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context(error_message)?; + } + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!( + elapsed, + "Persisted {} chunked transactions", + transactions.len() + ); + }) + .tap_err(|e| { + tracing::error!("Failed to persist transactions with error: {}", e); + }) + } + + async fn persist_events_chunk(&self, events: Vec) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + let guard = self + .metrics + .checkpoint_db_commit_latency_events_chunks + .start_timer(); + let len = events.len(); + let events = events + .into_iter() + .map(StoredEvent::from) + .collect::>(); + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for event_chunk in events.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + let error_message = + concat!("Failed to write to ", stringify!((events::table)), " DB"); + diesel::insert_into(events::table) + .values(event_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context(error_message)?; + } + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} chunked events", len); + }) + .tap_err(|e| { + tracing::error!("Failed to persist events with error: {}", e); + }) + } + + async fn persist_packages(&self, packages: Vec) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + if packages.is_empty() { + return Ok(()); + } + let guard = self + .metrics + .checkpoint_db_commit_latency_packages + .start_timer(); + let packages = packages + .into_iter() + .map(StoredPackage::from) + .collect::>(); + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for packages_chunk in packages.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + diesel::insert_into(packages::table) + .values(packages_chunk) + .on_conflict(packages::package_id) + .do_update() + .set(( + packages::package_id.eq(excluded(packages::package_id)), + packages::package_version.eq(excluded(packages::package_version)), + packages::move_package.eq(excluded(packages::move_package)), + packages::checkpoint_sequence_number + .eq(excluded(packages::checkpoint_sequence_number)), + )) + .execute(conn) + .await?; + } + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} packages", packages.len()); + }) + .tap_err(|e| { + tracing::error!("Failed to persist packages with error: {}", e); + }) + } + + async fn persist_event_indices_chunk( + &self, + indices: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + let guard = self + .metrics + .checkpoint_db_commit_latency_event_indices_chunks + .start_timer(); + let len = indices.len(); + let ( + event_emit_packages, + event_emit_modules, + event_senders, + event_struct_packages, + event_struct_modules, + event_struct_names, + event_struct_instantiations, + ) = indices.into_iter().map(|i| i.split()).fold( + ( + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + ), + |( + mut event_emit_packages, + mut event_emit_modules, + mut event_senders, + mut event_struct_packages, + mut event_struct_modules, + mut event_struct_names, + mut event_struct_instantiations, + ), + index| { + event_emit_packages.push(index.0); + event_emit_modules.push(index.1); + event_senders.push(index.2); + event_struct_packages.push(index.3); + event_struct_modules.push(index.4); + event_struct_names.push(index.5); + event_struct_instantiations.push(index.6); + ( + event_emit_packages, + event_emit_modules, + event_senders, + event_struct_packages, + event_struct_modules, + event_struct_names, + event_struct_instantiations, + ) + }, + ); + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for event_emit_packages_chunk in + event_emit_packages.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(event_emit_package::table) + .values(event_emit_packages_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for event_emit_modules_chunk in + event_emit_modules.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(event_emit_module::table) + .values(event_emit_modules_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for event_senders_chunk in event_senders.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + diesel::insert_into(event_senders::table) + .values(event_senders_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for event_struct_packages_chunk in + event_struct_packages.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(event_struct_package::table) + .values(event_struct_packages_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for event_struct_modules_chunk in + event_struct_modules.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(event_struct_module::table) + .values(event_struct_modules_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for event_struct_names_chunk in + event_struct_names.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(event_struct_name::table) + .values(event_struct_names_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for event_struct_instantiations_chunk in + event_struct_instantiations.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(event_struct_instantiation::table) + .values(event_struct_instantiations_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + Ok(()) + } + .scope_boxed() + }) + .await?; + + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} chunked event indices", len); + Ok(()) + } + + async fn persist_tx_indices_chunk(&self, indices: Vec) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + let guard = self + .metrics + .checkpoint_db_commit_latency_tx_indices_chunks + .start_timer(); + let len = indices.len(); + let ( + affected_addresses, + affected_objects, + input_objects, + changed_objects, + pkgs, + mods, + funs, + digests, + kinds, + ) = indices.into_iter().map(|i| i.split()).fold( + ( + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + Vec::new(), + ), + |( + mut tx_affected_addresses, + mut tx_affected_objects, + mut tx_input_objects, + mut tx_changed_objects, + mut tx_pkgs, + mut tx_mods, + mut tx_funs, + mut tx_digests, + mut tx_kinds, + ), + index| { + tx_affected_addresses.extend(index.0); + tx_affected_objects.extend(index.1); + tx_input_objects.extend(index.2); + tx_changed_objects.extend(index.3); + tx_pkgs.extend(index.4); + tx_mods.extend(index.5); + tx_funs.extend(index.6); + tx_digests.extend(index.7); + tx_kinds.extend(index.8); + ( + tx_affected_addresses, + tx_affected_objects, + tx_input_objects, + tx_changed_objects, + tx_pkgs, + tx_mods, + tx_funs, + tx_digests, + tx_kinds, + ) + }, + ); + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for affected_addresses_chunk in + affected_addresses.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(tx_affected_addresses::table) + .values(affected_addresses_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for affected_objects_chunk in + affected_objects.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(tx_affected_objects::table) + .values(affected_objects_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for input_objects_chunk in input_objects.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + diesel::insert_into(tx_input_objects::table) + .values(input_objects_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for changed_objects_chunk in + changed_objects.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) + { + diesel::insert_into(tx_changed_objects::table) + .values(changed_objects_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for pkgs_chunk in pkgs.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + diesel::insert_into(tx_calls_pkg::table) + .values(pkgs_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for mods_chunk in mods.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + diesel::insert_into(tx_calls_mod::table) + .values(mods_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for funs_chunk in funs.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + diesel::insert_into(tx_calls_fun::table) + .values(funs_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for digests_chunk in digests.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + diesel::insert_into(tx_digests::table) + .values(digests_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + for kinds_chunk in kinds.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + diesel::insert_into(tx_kinds::table) + .values(kinds_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await?; + } + + Ok(()) + } + .scope_boxed() + }) + .await?; + + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} chunked tx_indices", len); + Ok(()) + } + + async fn persist_epoch(&self, epoch: EpochToCommit) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + let guard = self + .metrics + .checkpoint_db_commit_latency_epoch + .start_timer(); + let epoch_id = epoch.new_epoch.epoch; + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + if let Some(last_epoch) = &epoch.last_epoch { + let last_epoch_id = last_epoch.epoch; + + info!(last_epoch_id, "Persisting epoch end data."); + diesel::update(epochs::table.filter(epochs::epoch.eq(last_epoch_id))) + .set(last_epoch) + .execute(conn) + .await?; + } + + let epoch_id = epoch.new_epoch.epoch; + info!(epoch_id, "Persisting epoch beginning info"); + let error_message = + concat!("Failed to write to ", stringify!((epochs::table)), " DB"); + diesel::insert_into(epochs::table) + .values(epoch.new_epoch) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context(error_message)?; + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!(elapsed, epoch_id, "Persisted epoch beginning info"); + }) + .tap_err(|e| { + tracing::error!("Failed to persist epoch with error: {}", e); + }) + } + + async fn advance_epoch(&self, epoch_to_commit: EpochToCommit) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let last_epoch_id = epoch_to_commit.last_epoch.as_ref().map(|e| e.epoch); + // partition_0 has been created, so no need to advance it. + if let Some(last_epoch_id) = last_epoch_id { + let last_db_epoch: Option = epochs::table + .filter(epochs::epoch.eq(last_epoch_id)) + .first::(&mut connection) + .await + .optional() + .map_err(Into::into) + .context("Failed to read last epoch from PostgresDB")?; + if let Some(last_epoch) = last_db_epoch { + let epoch_partition_data = + EpochPartitionData::compose_data(epoch_to_commit, last_epoch); + let table_partitions = self.partition_manager.get_table_partitions().await?; + for (table, (_, last_partition)) in table_partitions { + // Only advance epoch partition for epoch partitioned tables. + if !self + .partition_manager + .get_strategy(&table) + .is_epoch_partitioned() + { + continue; + } + let guard = self.metrics.advance_epoch_latency.start_timer(); + self.partition_manager + .advance_epoch(table.clone(), last_partition, &epoch_partition_data) + .await?; + let elapsed = guard.stop_and_record(); + info!( + elapsed, + "Advanced epoch partition {} for table {}", + last_partition, + table.clone() + ); + } + } else { + tracing::error!("Last epoch: {} from PostgresDB is None.", last_epoch_id); + } + } + + Ok(()) + } + + async fn prune_checkpoints_table(&self, cp: u64) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + diesel::delete( + checkpoints::table.filter(checkpoints::sequence_number.eq(cp as i64)), + ) + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to prune checkpoints table")?; + + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + } + + async fn prune_event_indices_table( + &self, + min_tx: u64, + max_tx: u64, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + let (min_tx, max_tx) = (min_tx as i64, max_tx as i64); + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + diesel::delete( + event_emit_module::table + .filter(event_emit_module::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete( + event_emit_package::table + .filter(event_emit_package::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete( + event_senders::table + .filter(event_senders::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete(event_struct_instantiation::table.filter( + event_struct_instantiation::tx_sequence_number.between(min_tx, max_tx), + )) + .execute(conn) + .await?; + + diesel::delete( + event_struct_module::table + .filter(event_struct_module::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete( + event_struct_name::table + .filter(event_struct_name::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete( + event_struct_package::table + .filter(event_struct_package::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + Ok(()) + } + .scope_boxed() + }) + .await + } + + async fn prune_tx_indices_table(&self, min_tx: u64, max_tx: u64) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + let (min_tx, max_tx) = (min_tx as i64, max_tx as i64); + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + diesel::delete( + tx_affected_addresses::table + .filter(tx_affected_addresses::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete( + tx_affected_objects::table + .filter(tx_affected_objects::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete( + tx_input_objects::table + .filter(tx_input_objects::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete( + tx_changed_objects::table + .filter(tx_changed_objects::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete( + tx_calls_pkg::table + .filter(tx_calls_pkg::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete( + tx_calls_mod::table + .filter(tx_calls_mod::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete( + tx_calls_fun::table + .filter(tx_calls_fun::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + diesel::delete( + tx_digests::table + .filter(tx_digests::tx_sequence_number.between(min_tx, max_tx)), + ) + .execute(conn) + .await?; + + Ok(()) + } + .scope_boxed() + }) + .await + } + + async fn prune_cp_tx_table(&self, cp: u64) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + diesel::delete( + pruner_cp_watermark::table + .filter(pruner_cp_watermark::checkpoint_sequence_number.eq(cp as i64)), + ) + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to prune pruner_cp_watermark table")?; + Ok(()) + } + .scope_boxed() + }) + .await + } + + async fn get_network_total_transactions_by_end_of_epoch( + &self, + epoch: u64, + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + // TODO: (wlmyng) update to read from epochs::network_total_transactions + + Ok(Some( + checkpoints::table + .filter(checkpoints::epoch.eq(epoch as i64)) + .select(checkpoints::network_total_transactions) + .order_by(checkpoints::sequence_number.desc()) + .first::(&mut connection) + .await + .map_err(Into::into) + .context("Failed to get network total transactions in epoch") + .map(|v| v as u64)?, + )) + } + + async fn update_watermarks_upper_bound( + &self, + watermark: CommitterWatermark, + ) -> Result<(), IndexerError> + where + E::Iterator: Iterator>, + { + use diesel_async::RunQueryDsl; + + let guard = self + .metrics + .checkpoint_db_commit_latency_watermarks + .start_timer(); + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + let upper_bound_updates = E::iter() + .map(|table| StoredWatermark::from_upper_bound_update(table.as_ref(), watermark)) + .collect::>(); + async { + diesel::insert_into(watermarks::table) + .values(upper_bound_updates) + .on_conflict(watermarks::pipeline) + .do_update() + .set(( + watermarks::epoch_hi_inclusive.eq(excluded(watermarks::epoch_hi_inclusive)), + watermarks::checkpoint_hi_inclusive + .eq(excluded(watermarks::checkpoint_hi_inclusive)), + watermarks::tx_hi.eq(excluded(watermarks::tx_hi)), + )) + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to update watermarks upper bound")?; + + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted watermarks"); + }) + .tap_err(|e| { + tracing::error!("Failed to persist watermarks with error: {}", e); + }) + } + + async fn map_epochs_to_cp_tx( + &self, + epochs: &[u64], + ) -> Result, IndexerError> { + use diesel_async::RunQueryDsl; + + let mut connection = self.pool.get().await?; + + let results: Vec<(i64, i64, Option)> = epochs::table + .filter(epochs::epoch.eq_any(epochs.iter().map(|&e| e as i64))) + .select(( + epochs::epoch, + epochs::first_checkpoint_id, + epochs::first_tx_sequence_number, + )) + .load::<(i64, i64, Option)>(&mut connection) + .await + .map_err(Into::into) + .context("Failed to fetch first checkpoint and tx seq num for epochs")?; + + Ok(results + .into_iter() + .map(|(epoch, checkpoint, tx)| { + ( + epoch as u64, + (checkpoint as u64, tx.unwrap_or_default() as u64), + ) + }) + .collect()) + } + + async fn update_watermarks_lower_bound( + &self, + watermarks: Vec<(PrunableTable, u64)>, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + let epochs: Vec = watermarks.iter().map(|(_table, epoch)| *epoch).collect(); + let epoch_mapping = self.map_epochs_to_cp_tx(&epochs).await?; + let lookups: Result, IndexerError> = watermarks + .into_iter() + .map(|(table, epoch)| { + let (checkpoint, tx) = epoch_mapping.get(&epoch).ok_or_else(|| { + IndexerError::PersistentStorageDataCorruptionError(format!( + "Epoch {} not found in epoch mapping", + epoch + )) + })?; + + Ok(StoredWatermark::from_lower_bound_update( + table.as_ref(), + epoch, + table.select_reader_lo(*checkpoint, *tx), + )) + }) + .collect(); + let lower_bound_updates = lookups?; + + let guard = self + .metrics + .checkpoint_db_commit_latency_watermarks + .start_timer(); + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + use diesel::dsl::sql; + use diesel::query_dsl::methods::FilterDsl; + + diesel::insert_into(watermarks::table) + .values(lower_bound_updates) + .on_conflict(watermarks::pipeline) + .do_update() + .set(( + watermarks::reader_lo.eq(excluded(watermarks::reader_lo)), + watermarks::epoch_lo.eq(excluded(watermarks::epoch_lo)), + watermarks::timestamp_ms.eq(sql::( + "(EXTRACT(EPOCH FROM CURRENT_TIMESTAMP) * 1000)::bigint", + )), + )) + .filter(excluded(watermarks::reader_lo).gt(watermarks::reader_lo)) + .filter(excluded(watermarks::epoch_lo).gt(watermarks::epoch_lo)) + .filter( + diesel::dsl::sql::( + "(EXTRACT(EPOCH FROM CURRENT_TIMESTAMP) * 1000)::bigint", + ) + .gt(watermarks::timestamp_ms), + ) + .execute(conn) + .await?; + + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted watermarks"); + }) + .tap_err(|e| { + tracing::error!("Failed to persist watermarks with error: {}", e); + }) + } + + async fn get_watermarks(&self) -> Result<(Vec, i64), IndexerError> { + use diesel_async::RunQueryDsl; + + // read_only transaction, otherwise this will block and get blocked by write transactions to + // the same table. + read_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + let stored = watermarks::table + .load::(conn) + .await + .map_err(Into::into) + .context("Failed reading watermarks from PostgresDB")?; + + let timestamp = diesel::select(diesel::dsl::sql::( + "(EXTRACT(EPOCH FROM CURRENT_TIMESTAMP) * 1000)::bigint", + )) + .get_result(conn) + .await + .map_err(Into::into) + .context("Failed reading current timestamp from PostgresDB")?; + + Ok((stored, timestamp)) + } + .scope_boxed() + }) + .await + } +} + +#[async_trait] +impl IndexerStore for PgIndexerStore { + async fn get_latest_checkpoint_sequence_number(&self) -> Result, IndexerError> { + self.get_latest_checkpoint_sequence_number().await + } + + async fn get_available_epoch_range(&self) -> Result<(u64, u64), IndexerError> { + self.get_prunable_epoch_range().await + } + + async fn get_available_checkpoint_range(&self) -> Result<(u64, u64), IndexerError> { + self.get_available_checkpoint_range().await + } + + async fn get_chain_identifier(&self) -> Result>, IndexerError> { + self.get_chain_identifier().await + } + + async fn get_latest_object_snapshot_checkpoint_sequence_number( + &self, + ) -> Result, IndexerError> { + self.get_latest_object_snapshot_checkpoint_sequence_number() + .await + } + + async fn persist_objects( + &self, + object_changes: Vec, + ) -> Result<(), IndexerError> { + if object_changes.is_empty() { + return Ok(()); + } + let guard = self + .metrics + .checkpoint_db_commit_latency_objects + .start_timer(); + let (indexed_mutations, indexed_deletions) = retain_latest_indexed_objects(object_changes); + let object_mutations = indexed_mutations + .into_iter() + .map(StoredObject::from) + .collect::>(); + let object_deletions = indexed_deletions + .into_iter() + .map(StoredDeletedObject::from) + .collect::>(); + let mutation_len = object_mutations.len(); + let deletion_len = object_deletions.len(); + + let object_mutation_chunks = + chunk!(object_mutations, self.config.parallel_objects_chunk_size); + let object_deletion_chunks = + chunk!(object_deletions, self.config.parallel_objects_chunk_size); + let mutation_futures = object_mutation_chunks + .into_iter() + .map(|c| self.persist_object_mutation_chunk(c)) + .map(Either::Left); + let deletion_futures = object_deletion_chunks + .into_iter() + .map(|c| self.persist_object_deletion_chunk(c)) + .map(Either::Right); + let all_futures = mutation_futures.chain(deletion_futures).collect::>(); + + futures::future::join_all(all_futures) + .await + .into_iter() + .collect::, _>>() + .map_err(|e| { + IndexerError::PostgresWriteError(format!( + "Failed to persist all object mutation or deletion chunks: {:?}", + e + )) + })?; + let elapsed = guard.stop_and_record(); + info!( + elapsed, + "Persisted {} objects mutations and {} deletions", mutation_len, deletion_len + ); + Ok(()) + } + + async fn persist_objects_snapshot( + &self, + object_changes: Vec, + ) -> Result<(), IndexerError> { + if object_changes.is_empty() { + return Ok(()); + } + let guard = self + .metrics + .checkpoint_db_commit_latency_objects_snapshot + .start_timer(); + let (indexed_mutations, indexed_deletions) = retain_latest_indexed_objects(object_changes); + let object_snapshot_mutations: Vec = indexed_mutations + .into_iter() + .map(StoredObjectSnapshot::from) + .collect(); + let object_snapshot_deletions: Vec = indexed_deletions + .into_iter() + .map(StoredObjectSnapshot::from) + .collect(); + let mutation_len = object_snapshot_mutations.len(); + let deletion_len = object_snapshot_deletions.len(); + let object_snapshot_mutation_chunks = chunk!( + object_snapshot_mutations, + self.config.parallel_objects_chunk_size + ); + let object_snapshot_deletion_chunks = chunk!( + object_snapshot_deletions, + self.config.parallel_objects_chunk_size + ); + let mutation_futures = object_snapshot_mutation_chunks + .into_iter() + .map(|c| self.persist_object_snapshot_mutation_chunk(c)) + .map(Either::Left) + .collect::>(); + let deletion_futures = object_snapshot_deletion_chunks + .into_iter() + .map(|c| self.persist_object_snapshot_deletion_chunk(c)) + .map(Either::Right) + .collect::>(); + let all_futures = mutation_futures + .into_iter() + .chain(deletion_futures) + .collect::>(); + futures::future::join_all(all_futures) + .await + .into_iter() + .collect::, _>>() + .map_err(|e| { + IndexerError::PostgresWriteError(format!( + "Failed to persist object snapshot mutation or deletion chunks: {:?}", + e + )) + }) + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!( + elapsed, + "Persisted {} objects snapshot mutations and {} deletions", + mutation_len, + deletion_len + ); + }) + .tap_err(|e| { + tracing::error!( + "Failed to persist object snapshot mutation or deletion chunks: {:?}", + e + ) + })?; + Ok(()) + } + + async fn persist_object_history( + &self, + object_changes: Vec, + ) -> Result<(), IndexerError> { + let skip_history = std::env::var("SKIP_OBJECT_HISTORY") + .map(|val| val.eq_ignore_ascii_case("true")) + .unwrap_or(false); + if skip_history { + info!("skipping object history"); + return Ok(()); + } + + if object_changes.is_empty() { + return Ok(()); + } + let objects = make_objects_history_to_commit(object_changes); + let guard = self + .metrics + .checkpoint_db_commit_latency_objects_history + .start_timer(); + + let len = objects.len(); + let chunks = chunk!(objects, self.config.parallel_objects_chunk_size); + let futures = chunks + .into_iter() + .map(|c| self.persist_objects_history_chunk(c)) + .collect::>(); + + futures::future::join_all(futures) + .await + .into_iter() + .collect::, _>>() + .map_err(|e| { + IndexerError::PostgresWriteError(format!( + "Failed to persist all objects history chunks: {:?}", + e + )) + })?; + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} objects history", len); + Ok(()) + } + + // TODO: There are quite some shared boiler-plate code in all functions. + // We should clean them up eventually. + async fn persist_full_objects_history( + &self, + object_changes: Vec, + ) -> Result<(), IndexerError> { + let skip_history = std::env::var("SKIP_OBJECT_HISTORY") + .map(|val| val.eq_ignore_ascii_case("true")) + .unwrap_or(false); + if skip_history { + info!("skipping object history"); + return Ok(()); + } + + if object_changes.is_empty() { + return Ok(()); + } + let objects: Vec = object_changes + .into_iter() + .flat_map(|c| { + let TransactionObjectChangesToCommit { + changed_objects, + deleted_objects, + } = c; + changed_objects + .into_iter() + .map(|o| o.into()) + .chain(deleted_objects.into_iter().map(|o| o.into())) + }) + .collect(); + let guard = self + .metrics + .checkpoint_db_commit_latency_full_objects_history + .start_timer(); + + let len = objects.len(); + let chunks = chunk!(objects, self.config.parallel_objects_chunk_size); + let futures = chunks + .into_iter() + .map(|c| self.persist_full_objects_history_chunk(c)) + .collect::>(); + + futures::future::join_all(futures) + .await + .into_iter() + .collect::, _>>() + .map_err(|e| { + IndexerError::PostgresWriteError(format!( + "Failed to persist all full objects history chunks: {:?}", + e + )) + })?; + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} full objects history", len); + Ok(()) + } + + async fn persist_objects_version( + &self, + object_versions: Vec, + ) -> Result<(), IndexerError> { + if object_versions.is_empty() { + return Ok(()); + } + + let guard = self + .metrics + .checkpoint_db_commit_latency_objects_version + .start_timer(); + + let len = object_versions.len(); + let chunks = chunk!(object_versions, self.config.parallel_objects_chunk_size); + let futures = chunks + .into_iter() + .map(|c| self.persist_objects_version_chunk(c)) + .collect::>(); + + futures::future::join_all(futures) + .await + .into_iter() + .collect::, _>>() + .map_err(|e| { + IndexerError::PostgresWriteError(format!( + "Failed to persist all objects version chunks: {:?}", + e + )) + })?; + + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} object versions", len); + Ok(()) + } + + async fn persist_checkpoints( + &self, + checkpoints: Vec, + ) -> Result<(), IndexerError> { + self.persist_checkpoints(checkpoints).await + } + + async fn persist_transactions( + &self, + transactions: Vec, + ) -> Result<(), IndexerError> { + let guard = self + .metrics + .checkpoint_db_commit_latency_transactions + .start_timer(); + let len = transactions.len(); + + let chunks = chunk!(transactions, self.config.parallel_chunk_size); + let futures = chunks + .into_iter() + .map(|c| self.persist_transactions_chunk(c)) + .collect::>(); + + futures::future::join_all(futures) + .await + .into_iter() + .collect::, _>>() + .map_err(|e| { + IndexerError::PostgresWriteError(format!( + "Failed to persist all transactions chunks: {:?}", + e + )) + })?; + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} transactions", len); + Ok(()) + } + + async fn persist_events(&self, events: Vec) -> Result<(), IndexerError> { + if events.is_empty() { + return Ok(()); + } + let len = events.len(); + let guard = self + .metrics + .checkpoint_db_commit_latency_events + .start_timer(); + let chunks = chunk!(events, self.config.parallel_chunk_size); + let futures = chunks + .into_iter() + .map(|c| self.persist_events_chunk(c)) + .collect::>(); + + futures::future::join_all(futures) + .await + .into_iter() + .collect::, _>>() + .map_err(|e| { + IndexerError::PostgresWriteError(format!( + "Failed to persist all events chunks: {:?}", + e + )) + })?; + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} events", len); + Ok(()) + } + + async fn persist_displays( + &self, + display_updates: BTreeMap, + ) -> Result<(), IndexerError> { + if display_updates.is_empty() { + return Ok(()); + } + self.persist_display_updates(display_updates.values().cloned().collect::>()) + .await + } + + async fn persist_packages(&self, packages: Vec) -> Result<(), IndexerError> { + if packages.is_empty() { + return Ok(()); + } + self.persist_packages(packages).await + } + + async fn persist_event_indices(&self, indices: Vec) -> Result<(), IndexerError> { + if indices.is_empty() { + return Ok(()); + } + let len = indices.len(); + let guard = self + .metrics + .checkpoint_db_commit_latency_event_indices + .start_timer(); + let chunks = chunk!(indices, self.config.parallel_chunk_size); + + let futures = chunks + .into_iter() + .map(|chunk| self.persist_event_indices_chunk(chunk)) + .collect::>(); + futures::future::join_all(futures) + .await + .into_iter() + .collect::, _>>() + .map_err(|e| { + IndexerError::PostgresWriteError(format!( + "Failed to persist all event_indices chunks: {:?}", + e + )) + }) + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} event_indices chunks", len); + }) + .tap_err(|e| tracing::error!("Failed to persist all event_indices chunks: {:?}", e))?; + Ok(()) + } + + async fn persist_tx_indices(&self, indices: Vec) -> Result<(), IndexerError> { + if indices.is_empty() { + return Ok(()); + } + let len = indices.len(); + let guard = self + .metrics + .checkpoint_db_commit_latency_tx_indices + .start_timer(); + let chunks = chunk!(indices, self.config.parallel_chunk_size); + + let futures = chunks + .into_iter() + .map(|chunk| self.persist_tx_indices_chunk(chunk)) + .collect::>(); + futures::future::join_all(futures) + .await + .into_iter() + .collect::, _>>() + .map_err(|e| { + IndexerError::PostgresWriteError(format!( + "Failed to persist all tx_indices chunks: {:?}", + e + )) + }) + .tap_ok(|_| { + let elapsed = guard.stop_and_record(); + info!(elapsed, "Persisted {} tx_indices chunks", len); + }) + .tap_err(|e| tracing::error!("Failed to persist all tx_indices chunks: {:?}", e))?; + Ok(()) + } + + async fn persist_epoch(&self, epoch: EpochToCommit) -> Result<(), IndexerError> { + self.persist_epoch(epoch).await + } + + async fn advance_epoch(&self, epoch: EpochToCommit) -> Result<(), IndexerError> { + self.advance_epoch(epoch).await + } + + async fn prune_epoch(&self, epoch: u64) -> Result<(), IndexerError> { + let (mut min_cp, max_cp) = match self.get_checkpoint_range_for_epoch(epoch).await? { + (min_cp, Some(max_cp)) => Ok((min_cp, max_cp)), + _ => Err(IndexerError::PostgresReadError(format!( + "Failed to get checkpoint range for epoch {}", + epoch + ))), + }?; + + // NOTE: for disaster recovery, min_cp is the min cp of the current epoch, which is likely + // partially pruned already. min_prunable_cp is the min cp to be pruned. + // By std::cmp::max, we will resume the pruning process from the next checkpoint, instead of + // the first cp of the current epoch. + let min_prunable_cp = self.get_min_prunable_checkpoint().await?; + min_cp = std::cmp::max(min_cp, min_prunable_cp); + for cp in min_cp..=max_cp { + // NOTE: the order of pruning tables is crucial: + // 1. prune checkpoints table, checkpoints table is the source table of available range, + // we prune it first to make sure that we always have full data for checkpoints within the available range; + // 2. then prune tx_* tables; + // 3. then prune pruner_cp_watermark table, which is the checkpoint pruning watermark table and also tx seq source + // of a checkpoint to prune tx_* tables; + // 4. lastly we prune epochs table when all checkpoints of the epoch have been pruned. + info!( + "Pruning checkpoint {} of epoch {} (min_prunable_cp: {})", + cp, epoch, min_prunable_cp + ); + self.prune_checkpoints_table(cp).await?; + + let (min_tx, max_tx) = self.get_transaction_range_for_checkpoint(cp).await?; + self.prune_tx_indices_table(min_tx, max_tx).await?; + info!( + "Pruned transactions for checkpoint {} from tx {} to tx {}", + cp, min_tx, max_tx + ); + self.prune_event_indices_table(min_tx, max_tx).await?; + info!( + "Pruned events of transactions for checkpoint {} from tx {} to tx {}", + cp, min_tx, max_tx + ); + self.metrics.last_pruned_transaction.set(max_tx as i64); + + self.prune_cp_tx_table(cp).await?; + info!("Pruned checkpoint {} of epoch {}", cp, epoch); + self.metrics.last_pruned_checkpoint.set(cp as i64); + } + + Ok(()) + } + + async fn upload_display(&self, epoch_number: u64) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + let mut connection = self.pool.get().await?; + let mut buffer = Cursor::new(Vec::new()); + { + let mut writer = Writer::from_writer(&mut buffer); + let displays = display::table + .load::(&mut connection) + .await + .map_err(Into::into) + .context("Failed to get display from database")?; + info!("Read {} displays", displays.len()); + writer + .write_record(["object_type", "id", "version", "bcs"]) + .map_err(|_| { + IndexerError::GcsError("Failed to write display to csv".to_string()) + })?; + for display in displays { + writer + .write_record(&[ + display.object_type, + hex::encode(display.id), + display.version.to_string(), + hex::encode(display.bcs), + ]) + .map_err(|_| IndexerError::GcsError("Failed to write to csv".to_string()))?; + } + writer + .flush() + .map_err(|_| IndexerError::GcsError("Failed to flush csv".to_string()))?; + } + + if let (Some(cred_path), Some(bucket)) = ( + self.config.gcs_cred_path.clone(), + self.config.gcs_display_bucket.clone(), + ) { + let remote_store_config = ObjectStoreConfig { + object_store: Some(ObjectStoreType::GCS), + bucket: Some(bucket), + google_service_account: Some(cred_path), + object_store_connection_limit: 200, + no_sign_request: false, + ..Default::default() + }; + let remote_store = remote_store_config.make().map_err(|e| { + IndexerError::GcsError(format!("Failed to make GCS remote store: {}", e)) + })?; + let path = Path::from(format!("display_{}.csv", epoch_number).as_str()); + put(&remote_store, &path, buffer.into_inner().into()) + .await + .map_err(|e| IndexerError::GcsError(format!("Failed to put to GCS: {}", e)))?; + } else { + warn!("Either GCS cred path or bucket is not set, skipping display upload."); + } + Ok(()) + } + + async fn restore_display(&self, bytes: bytes::Bytes) -> Result<(), IndexerError> { + let cursor = Cursor::new(bytes); + let mut csv_reader = ReaderBuilder::new().has_headers(true).from_reader(cursor); + let displays = csv_reader + .deserialize() + .collect::, csv::Error>>() + .map_err(|e| { + IndexerError::GcsError(format!("Failed to deserialize display records: {}", e)) + })?; + self.persist_display_updates(displays).await + } + + async fn get_network_total_transactions_by_end_of_epoch( + &self, + epoch: u64, + ) -> Result, IndexerError> { + self.get_network_total_transactions_by_end_of_epoch(epoch) + .await + } + + /// Persist protocol configs and feature flags until the protocol version for the latest epoch + /// we have stored in the db, inclusive. + async fn persist_protocol_configs_and_feature_flags( + &self, + chain_id: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + let chain_id = ChainIdentifier::from( + CheckpointDigest::try_from(chain_id).expect("Unable to convert chain id"), + ); + + let mut all_configs = vec![]; + let mut all_flags = vec![]; + + let (start_version, end_version) = self.get_protocol_version_index_range().await?; + info!( + "Persisting protocol configs with start_version: {}, end_version: {}", + start_version, end_version + ); + + // Gather all protocol configs and feature flags for all versions between start and end. + for version in start_version..=end_version { + let protocol_configs = ProtocolConfig::get_for_version_if_supported( + (version as u64).into(), + chain_id.chain(), + ) + .ok_or(IndexerError::GenericError(format!( + "Unable to fetch protocol version {} and chain {:?}", + version, + chain_id.chain() + )))?; + let configs_vec = protocol_configs + .attr_map() + .into_iter() + .map(|(k, v)| StoredProtocolConfig { + protocol_version: version, + config_name: k, + config_value: v.map(|v| v.to_string()), + }) + .collect::>(); + all_configs.extend(configs_vec); + + let feature_flags = protocol_configs + .feature_map() + .into_iter() + .map(|(k, v)| StoredFeatureFlag { + protocol_version: version, + flag_name: k, + flag_value: v, + }) + .collect::>(); + all_flags.extend(feature_flags); + } + + // Now insert all of them into the db. + // TODO: right now the size of these updates is manageable but later we may consider batching. + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + for config_chunk in all_configs.chunks(PG_COMMIT_CHUNK_SIZE_INTRA_DB_TX) { + diesel::insert_into(protocol_configs::table) + .values(config_chunk) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to write to protocol_configs table")?; + } + + diesel::insert_into(feature_flags::table) + .values(all_flags.clone()) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context("Failed to write to feature_flags table")?; + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await?; + Ok(()) + } + + async fn persist_chain_identifier( + &self, + checkpoint_digest: Vec, + ) -> Result<(), IndexerError> { + use diesel_async::RunQueryDsl; + + transaction_with_retry(&self.pool, PG_DB_COMMIT_SLEEP_DURATION, |conn| { + async { + diesel::insert_into(chain_identifier::table) + .values(StoredChainIdentifier { checkpoint_digest }) + .on_conflict_do_nothing() + .execute(conn) + .await + .map_err(IndexerError::from) + .context("failed to write to chain_identifier table")?; + Ok::<(), IndexerError>(()) + } + .scope_boxed() + }) + .await?; + Ok(()) + } + + async fn persist_raw_checkpoints( + &self, + checkpoints: Vec, + ) -> Result<(), IndexerError> { + self.persist_raw_checkpoints_impl(&checkpoints).await + } + + async fn update_watermarks_upper_bound( + &self, + watermark: CommitterWatermark, + ) -> Result<(), IndexerError> + where + E::Iterator: Iterator>, + { + self.update_watermarks_upper_bound::(watermark).await + } + + async fn update_watermarks_lower_bound( + &self, + watermarks: Vec<(PrunableTable, u64)>, + ) -> Result<(), IndexerError> { + self.update_watermarks_lower_bound(watermarks).await + } + + async fn get_watermarks(&self) -> Result<(Vec, i64), IndexerError> { + self.get_watermarks().await + } +} + +fn make_objects_history_to_commit( + tx_object_changes: Vec, +) -> Vec { + let deleted_objects: Vec = tx_object_changes + .clone() + .into_iter() + .flat_map(|changes| changes.deleted_objects) + .map(|o| o.into()) + .collect(); + let mutated_objects: Vec = tx_object_changes + .into_iter() + .flat_map(|changes| changes.changed_objects) + .map(|o| o.into()) + .collect(); + deleted_objects.into_iter().chain(mutated_objects).collect() +} + +// Partition object changes into deletions and mutations, +// within partition of mutations or deletions, retain the latest with highest version; +// For overlappings of mutations and deletions, only keep one with higher version. +// This is necessary b/c after this step, DB commit will be done in parallel and not in order. +fn retain_latest_indexed_objects( + tx_object_changes: Vec, +) -> (Vec, Vec) { + // Only the last deleted / mutated object will be in the map, + // b/c tx_object_changes are in order and versions always increment, + let (mutations, deletions) = tx_object_changes + .into_iter() + .flat_map(|change| { + change + .changed_objects + .into_iter() + .map(Either::Left) + .chain( + change + .deleted_objects + .into_iter() + .map(Either::Right), + ) + }) + .fold( + (HashMap::::new(), HashMap::::new()), + |(mut mutations, mut deletions), either_change| { + match either_change { + // Remove mutation / deletion with a following deletion / mutation, + // b/c following deletion / mutation always has a higher version. + // Technically, assertions below are not required, double check just in case. + Either::Left(mutation) => { + let id = mutation.object.id(); + let mutation_version = mutation.object.version(); + if let Some(existing) = deletions.remove(&id) { + assert!( + existing.object_version < mutation_version.value(), + "Mutation version ({:?}) should be greater than existing deletion version ({:?}) for object {:?}", + mutation_version, + existing.object_version, + id + ); + } + if let Some(existing) = mutations.insert(id, mutation) { + assert!( + existing.object.version() < mutation_version, + "Mutation version ({:?}) should be greater than existing mutation version ({:?}) for object {:?}", + mutation_version, + existing.object.version(), + id + ); + } + } + Either::Right(deletion) => { + let id = deletion.object_id; + let deletion_version = deletion.object_version; + if let Some(existing) = mutations.remove(&id) { + assert!( + existing.object.version().value() < deletion_version, + "Deletion version ({:?}) should be greater than existing mutation version ({:?}) for object {:?}", + deletion_version, + existing.object.version(), + id + ); + } + if let Some(existing) = deletions.insert(id, deletion) { + assert!( + existing.object_version < deletion_version, + "Deletion version ({:?}) should be greater than existing deletion version ({:?}) for object {:?}", + deletion_version, + existing.object_version, + id + ); + } + } + } + (mutations, deletions) + }, + ); + ( + mutations.into_values().collect(), + deletions.into_values().collect(), + ) +} diff --git a/crates/sui-mvr-indexer/src/store/pg_partition_manager.rs b/crates/sui-mvr-indexer/src/store/pg_partition_manager.rs new file mode 100644 index 0000000000000..876a1b9c56146 --- /dev/null +++ b/crates/sui-mvr-indexer/src/store/pg_partition_manager.rs @@ -0,0 +1,224 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use diesel::sql_types::{BigInt, VarChar}; +use diesel::QueryableByName; +use diesel_async::scoped_futures::ScopedFutureExt; +use std::collections::{BTreeMap, HashMap}; +use std::time::Duration; +use tracing::{error, info}; + +use crate::database::ConnectionPool; +use crate::errors::IndexerError; +use crate::handlers::EpochToCommit; +use crate::models::epoch::StoredEpochInfo; +use crate::store::transaction_with_retry; + +const GET_PARTITION_SQL: &str = r" +SELECT parent.relname AS table_name, + MIN(CAST(SUBSTRING(child.relname FROM '\d+$') AS BIGINT)) AS first_partition, + MAX(CAST(SUBSTRING(child.relname FROM '\d+$') AS BIGINT)) AS last_partition +FROM pg_inherits + JOIN pg_class parent ON pg_inherits.inhparent = parent.oid + JOIN pg_class child ON pg_inherits.inhrelid = child.oid + JOIN pg_namespace nmsp_parent ON nmsp_parent.oid = parent.relnamespace + JOIN pg_namespace nmsp_child ON nmsp_child.oid = child.relnamespace +WHERE parent.relkind = 'p' +GROUP BY table_name; +"; + +#[derive(Clone)] +pub struct PgPartitionManager { + pool: ConnectionPool, + + partition_strategies: HashMap<&'static str, PgPartitionStrategy>, +} + +#[derive(Clone, Copy)] +pub enum PgPartitionStrategy { + CheckpointSequenceNumber, + TxSequenceNumber, + ObjectId, +} + +impl PgPartitionStrategy { + pub fn is_epoch_partitioned(&self) -> bool { + matches!( + self, + Self::CheckpointSequenceNumber | Self::TxSequenceNumber + ) + } +} + +#[derive(Clone, Debug)] +pub struct EpochPartitionData { + last_epoch: u64, + next_epoch: u64, + last_epoch_start_cp: u64, + next_epoch_start_cp: u64, + last_epoch_start_tx: u64, + next_epoch_start_tx: u64, +} + +impl EpochPartitionData { + pub fn compose_data(epoch: EpochToCommit, last_db_epoch: StoredEpochInfo) -> Self { + let last_epoch = last_db_epoch.epoch as u64; + let last_epoch_start_cp = last_db_epoch.first_checkpoint_id as u64; + let next_epoch = epoch.new_epoch_id(); + let next_epoch_start_cp = epoch.new_epoch_first_checkpoint_id(); + let next_epoch_start_tx = epoch.new_epoch_first_tx_sequence_number(); + let last_epoch_start_tx = + next_epoch_start_tx - epoch.last_epoch_total_transactions().unwrap(); + + Self { + last_epoch, + next_epoch, + last_epoch_start_cp, + next_epoch_start_cp, + last_epoch_start_tx, + next_epoch_start_tx, + } + } +} + +impl PgPartitionManager { + pub fn new(pool: ConnectionPool) -> Result { + let mut partition_strategies = HashMap::new(); + partition_strategies.insert("events", PgPartitionStrategy::TxSequenceNumber); + partition_strategies.insert("transactions", PgPartitionStrategy::TxSequenceNumber); + partition_strategies.insert("objects_version", PgPartitionStrategy::ObjectId); + let manager = Self { + pool, + partition_strategies, + }; + Ok(manager) + } + + pub async fn get_table_partitions(&self) -> Result, IndexerError> { + #[derive(QueryableByName, Debug, Clone)] + struct PartitionedTable { + #[diesel(sql_type = VarChar)] + table_name: String, + #[diesel(sql_type = BigInt)] + first_partition: i64, + #[diesel(sql_type = BigInt)] + last_partition: i64, + } + + let mut connection = self.pool.get().await?; + + Ok( + diesel_async::RunQueryDsl::load(diesel::sql_query(GET_PARTITION_SQL), &mut connection) + .await? + .into_iter() + .map(|table: PartitionedTable| { + ( + table.table_name, + (table.first_partition as u64, table.last_partition as u64), + ) + }) + .collect(), + ) + } + + /// Tries to fetch the partitioning strategy for the given partitioned table. Defaults to + /// `CheckpointSequenceNumber` as the majority of our tables are partitioned on an epoch's + /// checkpoints today. + pub fn get_strategy(&self, table_name: &str) -> PgPartitionStrategy { + self.partition_strategies + .get(table_name) + .copied() + .unwrap_or(PgPartitionStrategy::CheckpointSequenceNumber) + } + + pub fn determine_epoch_partition_range( + &self, + table_name: &str, + data: &EpochPartitionData, + ) -> Option<(u64, u64)> { + match self.get_strategy(table_name) { + PgPartitionStrategy::CheckpointSequenceNumber => { + Some((data.last_epoch_start_cp, data.next_epoch_start_cp)) + } + PgPartitionStrategy::TxSequenceNumber => { + Some((data.last_epoch_start_tx, data.next_epoch_start_tx)) + } + PgPartitionStrategy::ObjectId => None, + } + } + + pub async fn advance_epoch( + &self, + table: String, + last_partition: u64, + data: &EpochPartitionData, + ) -> Result<(), IndexerError> { + let Some(partition_range) = self.determine_epoch_partition_range(&table, data) else { + return Ok(()); + }; + if data.next_epoch == 0 { + tracing::info!("Epoch 0 partition has been created in the initial setup."); + return Ok(()); + } + if last_partition == data.last_epoch { + transaction_with_retry(&self.pool, Duration::from_secs(10), |conn| { + async { + diesel_async::RunQueryDsl::execute( + diesel::sql_query("CALL advance_partition($1, $2, $3, $4, $5)") + .bind::(table.clone()) + .bind::(data.last_epoch as i64) + .bind::(data.next_epoch as i64) + .bind::(partition_range.0 as i64) + .bind::(partition_range.1 as i64), + conn, + ) + .await?; + + Ok(()) + } + .scope_boxed() + }) + .await?; + + info!( + "Advanced epoch partition for table {} from {} to {}, prev partition upper bound {}", + table, last_partition, data.next_epoch, partition_range.0 + ); + } else if last_partition != data.next_epoch { + // skip when the partition is already advanced once, which is possible when indexer + // crashes and restarts; error otherwise. + error!( + "Epoch partition for table {} is not in sync with the last epoch {}.", + table, data.last_epoch + ); + } else { + info!( + "Epoch has been advanced to {} already, skipping.", + data.next_epoch + ); + } + Ok(()) + } + + pub async fn drop_table_partition( + &self, + table: String, + partition: u64, + ) -> Result<(), IndexerError> { + transaction_with_retry(&self.pool, Duration::from_secs(10), |conn| { + async { + diesel_async::RunQueryDsl::execute( + diesel::sql_query("CALL drop_partition($1, $2)") + .bind::(table.clone()) + .bind::(partition as i64), + conn, + ) + .await?; + Ok(()) + } + .scope_boxed() + }) + .await?; + Ok(()) + } +} diff --git a/crates/sui-mvr-indexer/src/store/query.rs b/crates/sui-mvr-indexer/src/store/query.rs new file mode 100644 index 0000000000000..93d57b298044d --- /dev/null +++ b/crates/sui-mvr-indexer/src/store/query.rs @@ -0,0 +1,329 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use sui_json_rpc_types::SuiObjectDataFilter; +use sui_types::base_types::ObjectID; + +pub trait DBFilter { + fn to_objects_history_sql(&self, cursor: Option, limit: usize, columns: Vec<&str>) + -> String; + fn to_latest_objects_sql(&self, cursor: Option, limit: usize, columns: Vec<&str>) -> String; +} + +impl DBFilter for SuiObjectDataFilter { + fn to_objects_history_sql( + &self, + cursor: Option, + limit: usize, + columns: Vec<&str>, + ) -> String { + let inner_clauses = to_clauses(self); + let inner_clauses = if let Some(inner_clauses) = inner_clauses { + format!("\n AND {inner_clauses}") + } else { + "".to_string() + }; + let outer_clauses = to_outer_clauses(self); + let outer_clauses = if let Some(outer_clauses) = outer_clauses { + format!("\nAND {outer_clauses}") + } else { + "".to_string() + }; + let cursor = if let Some(cursor) = cursor { + format!("\n AND o.object_id > '{cursor}'") + } else { + "".to_string() + }; + + let columns = columns + .iter() + .map(|c| format!("t1.{c}")) + .collect::>() + .join(", "); + // NOTE: order by checkpoint DESC so that whenever a row from checkpoint is available, + // we will pick that over the one from fast-path, which has checkpoint of -1. + format!( + "SELECT {columns} +FROM (SELECT DISTINCT ON (o.object_id) * + FROM objects_history o + WHERE o.checkpoint <= $1{cursor}{inner_clauses} + ORDER BY o.object_id, version, o.checkpoint DESC) AS t1 +WHERE t1.object_status NOT IN ('deleted', 'wrapped', 'unwrapped_then_deleted'){outer_clauses} +LIMIT {limit};" + ) + } + + fn to_latest_objects_sql( + &self, + cursor: Option, + limit: usize, + columns: Vec<&str>, + ) -> String { + let columns = columns + .iter() + .map(|c| format!("o.{c}")) + .collect::>() + .join(", "); + + let cursor = if let Some(cursor) = cursor { + format!(" AND o.object_id > '{cursor}'") + } else { + "".to_string() + }; + + let inner_clauses = to_latest_objects_clauses(self); + let inner_clauses = if let Some(inner_clauses) = inner_clauses { + format!(" AND {inner_clauses}") + } else { + "".to_string() + }; + + format!( + "SELECT {columns} +FROM objects o WHERE o.object_status NOT IN ('deleted', 'wrapped', 'unwrapped_then_deleted'){cursor}{inner_clauses} +LIMIT {limit};" + ) + } +} + +fn to_latest_objects_clauses(filter: &SuiObjectDataFilter) -> Option { + match filter { + SuiObjectDataFilter::AddressOwner(a) => Some(format!( + "(o.owner_type = 'address_owner' AND o.owner_address = '{a}')" + )), + _ => None, + } +} + +fn to_clauses(filter: &SuiObjectDataFilter) -> Option { + match filter { + SuiObjectDataFilter::MatchAll(sub_filters) => { + let sub_filters = sub_filters.iter().flat_map(to_clauses).collect::>(); + if sub_filters.is_empty() { + None + } else if sub_filters.len() == 1 { + Some(sub_filters[0].to_string()) + } else { + Some(format!("({})", sub_filters.join(" AND "))) + } + } + SuiObjectDataFilter::MatchAny(sub_filters) => { + let sub_filters = sub_filters.iter().flat_map(to_clauses).collect::>(); + if sub_filters.is_empty() { + // Any default to false + Some("FALSE".to_string()) + } else if sub_filters.len() == 1 { + Some(sub_filters[0].to_string()) + } else { + Some(format!("({})", sub_filters.join(" OR "))) + } + } + SuiObjectDataFilter::MatchNone(sub_filters) => { + let sub_filters = sub_filters.iter().flat_map(to_clauses).collect::>(); + if sub_filters.is_empty() { + None + } else { + Some(format!("NOT ({})", sub_filters.join(" OR "))) + } + } + SuiObjectDataFilter::Package(p) => Some(format!("o.object_type LIKE '{}::%'", p.to_hex_literal())), + SuiObjectDataFilter::MoveModule { package, module } => Some(format!( + "o.object_type LIKE '{}::{}::%'", + package.to_hex_literal(), + module + )), + SuiObjectDataFilter::StructType(s) => { + // If people do not provide type_params, we will match all type_params + // e.g. `0x2::coin::Coin` can match `0x2::coin::Coin<0x2::sui::SUI>` + if s.type_params.is_empty() { + Some(format!("o.object_type LIKE '{s}%'")) + } else { + Some(format!("o.object_type = '{s}'")) + } + }, + SuiObjectDataFilter::AddressOwner(a) => { + Some(format!("((o.owner_type = 'address_owner' AND o.owner_address = '{a}') OR (o.old_owner_type = 'address_owner' AND o.old_owner_address = '{a}'))")) + } + SuiObjectDataFilter::ObjectOwner(o) => { + Some(format!("((o.owner_type = 'object_owner' AND o.owner_address = '{o}') OR (o.old_owner_type = 'object_owner' AND o.old_owner_address = '{o}'))")) + } + SuiObjectDataFilter::ObjectId(id) => { + Some(format!("o.object_id = '{id}'")) + } + SuiObjectDataFilter::ObjectIds(ids) => { + if ids.is_empty() { + None + } else { + let ids = ids + .iter() + .map(|o| o.to_string()) + .collect::>() + .join(", "); + Some(format!("o.object_id IN '{ids}'")) + } + } + SuiObjectDataFilter::Version(v) => Some(format!("o.version = {v}")), + } +} + +fn to_outer_clauses(filter: &SuiObjectDataFilter) -> Option { + match filter { + SuiObjectDataFilter::MatchNone(sub_filters) => { + let sub_filters = sub_filters + .iter() + .flat_map(to_outer_clauses) + .collect::>(); + if sub_filters.is_empty() { + None + } else { + Some(format!("NOT ({})", sub_filters.join(" OR "))) + } + } + SuiObjectDataFilter::MatchAll(sub_filters) => { + let sub_filters = sub_filters + .iter() + .flat_map(to_outer_clauses) + .collect::>(); + if sub_filters.is_empty() { + None + } else if sub_filters.len() == 1 { + Some(sub_filters[0].to_string()) + } else { + Some(format!("({})", sub_filters.join(" AND "))) + } + } + SuiObjectDataFilter::MatchAny(sub_filters) => { + let sub_filters = sub_filters + .iter() + .flat_map(to_outer_clauses) + .collect::>(); + if sub_filters.is_empty() { + None + } else if sub_filters.len() == 1 { + Some(sub_filters[0].to_string()) + } else { + Some(format!("({})", sub_filters.join(" OR "))) + } + } + SuiObjectDataFilter::AddressOwner(a) => Some(format!("t1.owner_address = '{a}'")), + _ => None, + } +} + +#[cfg(test)] +mod test { + use std::str::FromStr; + + use move_core_types::ident_str; + + use sui_json_rpc_types::SuiObjectDataFilter; + use sui_types::base_types::{ObjectID, SuiAddress}; + use sui_types::parse_sui_struct_tag; + + use crate::store::query::DBFilter; + + #[test] + fn test_address_filter() { + let address = SuiAddress::from_str( + "0x92dd4d9b0150c251661d821583ef078024ae9e9ee11063e216500861eec7f381", + ) + .unwrap(); + let filter = SuiObjectDataFilter::AddressOwner(address); + + let expected_sql = "SELECT t1.* +FROM (SELECT DISTINCT ON (o.object_id) * + FROM objects_history o + WHERE o.checkpoint <= $1 + AND ((o.owner_type = 'address_owner' AND o.owner_address = '0x92dd4d9b0150c251661d821583ef078024ae9e9ee11063e216500861eec7f381') OR (o.old_owner_type = 'address_owner' AND o.old_owner_address = '0x92dd4d9b0150c251661d821583ef078024ae9e9ee11063e216500861eec7f381')) + ORDER BY o.object_id, version, o.checkpoint DESC) AS t1 +WHERE t1.object_status NOT IN ('deleted', 'wrapped', 'unwrapped_then_deleted') +AND t1.owner_address = '0x92dd4d9b0150c251661d821583ef078024ae9e9ee11063e216500861eec7f381' +LIMIT 100;"; + assert_eq!( + expected_sql, + filter.to_objects_history_sql(None, 100, vec!["*"]) + ); + } + + #[test] + fn test_move_module_filter() { + let filter = SuiObjectDataFilter::MoveModule { + package: ObjectID::from_str( + "0x485d947e293f07e659127dc5196146b49cdf2efbe4b233f4d293fc56aff2aa17", + ) + .unwrap(), + module: ident_str!("test_module").into(), + }; + let expected_sql = "SELECT t1.* +FROM (SELECT DISTINCT ON (o.object_id) * + FROM objects_history o + WHERE o.checkpoint <= $1 + AND o.object_type LIKE '0x485d947e293f07e659127dc5196146b49cdf2efbe4b233f4d293fc56aff2aa17::test_module::%' + ORDER BY o.object_id, version, o.checkpoint DESC) AS t1 +WHERE t1.object_status NOT IN ('deleted', 'wrapped', 'unwrapped_then_deleted') +LIMIT 100;"; + assert_eq!( + expected_sql, + filter.to_objects_history_sql(None, 100, vec!["*"]) + ); + } + + #[test] + fn test_empty_all_filter() { + let filter = SuiObjectDataFilter::MatchAll(vec![]); + let expected_sql = "SELECT t1.* +FROM (SELECT DISTINCT ON (o.object_id) * + FROM objects_history o + WHERE o.checkpoint <= $1 + ORDER BY o.object_id, version, o.checkpoint DESC) AS t1 +WHERE t1.object_status NOT IN ('deleted', 'wrapped', 'unwrapped_then_deleted') +LIMIT 100;"; + assert_eq!( + expected_sql, + filter.to_objects_history_sql(None, 100, vec!["*"]) + ); + } + + #[test] + fn test_empty_any_filter() { + let filter = SuiObjectDataFilter::MatchAny(vec![]); + let expected_sql = "SELECT t1.* +FROM (SELECT DISTINCT ON (o.object_id) * + FROM objects_history o + WHERE o.checkpoint <= $1 + AND FALSE + ORDER BY o.object_id, version, o.checkpoint DESC) AS t1 +WHERE t1.object_status NOT IN ('deleted', 'wrapped', 'unwrapped_then_deleted') +LIMIT 100;"; + assert_eq!( + expected_sql, + filter.to_objects_history_sql(None, 100, vec!["*"]) + ); + } + + #[test] + fn test_all_filter() { + let filter = SuiObjectDataFilter::MatchAll(vec![ + SuiObjectDataFilter::ObjectId( + ObjectID::from_str( + "0xef9fb75a7b3d4cb5551ef0b08c83528b94d5f5cd8be28b1d08a87dbbf3731738", + ) + .unwrap(), + ), + SuiObjectDataFilter::StructType(parse_sui_struct_tag("0x2::test::Test").unwrap()), + ]); + + let expected_sql = "SELECT t1.* +FROM (SELECT DISTINCT ON (o.object_id) * + FROM objects_history o + WHERE o.checkpoint <= $1 + AND (o.object_id = '0xef9fb75a7b3d4cb5551ef0b08c83528b94d5f5cd8be28b1d08a87dbbf3731738' AND o.object_type LIKE '0x2::test::Test%') + ORDER BY o.object_id, version, o.checkpoint DESC) AS t1 +WHERE t1.object_status NOT IN ('deleted', 'wrapped', 'unwrapped_then_deleted') +LIMIT 100;"; + assert_eq!( + expected_sql, + filter.to_objects_history_sql(None, 100, vec!["*"]) + ); + } +} diff --git a/crates/sui-mvr-indexer/src/system_package_task.rs b/crates/sui-mvr-indexer/src/system_package_task.rs new file mode 100644 index 0000000000000..8c2d6586f72d5 --- /dev/null +++ b/crates/sui-mvr-indexer/src/system_package_task.rs @@ -0,0 +1,66 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::indexer_reader::IndexerReader; +use std::time::Duration; +use sui_types::SYSTEM_PACKAGE_ADDRESSES; +use tokio_util::sync::CancellationToken; + +/// Background task responsible for evicting system packages from the package resolver's cache after +/// detecting an epoch boundary. +pub(crate) struct SystemPackageTask { + /// Holds the DB connection and also the package resolver to evict packages from. + reader: IndexerReader, + /// Signal to cancel the task. + cancel: CancellationToken, + /// Interval to sleep for between checks. + interval: Duration, +} + +impl SystemPackageTask { + pub(crate) fn new( + reader: IndexerReader, + cancel: CancellationToken, + interval: Duration, + ) -> Self { + Self { + reader, + cancel, + interval, + } + } + + pub(crate) async fn run(&self) { + let mut last_epoch: i64 = 0; + loop { + tokio::select! { + _ = self.cancel.cancelled() => { + tracing::info!( + "Shutdown signal received, terminating system package eviction task" + ); + return; + } + _ = tokio::time::sleep(self.interval) => { + let next_epoch = match self.reader.get_latest_epoch_info_from_db().await { + Ok(epoch) => epoch.epoch, + Err(e) => { + tracing::error!("Failed to fetch latest epoch: {:?}", e); + continue; + } + }; + + if next_epoch > last_epoch { + last_epoch = next_epoch; + tracing::info!( + "Detected epoch boundary, evicting system packages from cache" + ); + self.reader + .package_resolver() + .package_store() + .evict(SYSTEM_PACKAGE_ADDRESSES.iter().copied()); + } + } + } + } + } +} diff --git a/crates/sui-mvr-indexer/src/tempdb.rs b/crates/sui-mvr-indexer/src/tempdb.rs new file mode 100644 index 0000000000000..d63f34a02a3de --- /dev/null +++ b/crates/sui-mvr-indexer/src/tempdb.rs @@ -0,0 +1,343 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::anyhow; +use anyhow::Context; +use anyhow::Result; +use std::fs::OpenOptions; +use std::{ + path::{Path, PathBuf}, + process::{Child, Command}, + time::{Duration, Instant}, +}; +use tracing::trace; +use url::Url; + +/// A temporary, local postgres database +pub struct TempDb { + database: LocalDatabase, + + // Directory used for the ephemeral database. + // + // On drop the directory will be cleaned an its contents deleted. + // + // NOTE: This needs to be the last entry in this struct so that the database is dropped before + // and has a chance to gracefully shutdown before the directory is deleted. + dir: tempfile::TempDir, +} + +impl TempDb { + /// Create and start a new temporary postgres database. + /// + /// A fresh database will be initialized in a temporary directory that will be cleandup on drop. + /// The running `postgres` service will be serving traffic on an available, os-assigned port. + pub fn new() -> Result { + let dir = tempfile::TempDir::new()?; + let port = get_available_port(); + + let database = LocalDatabase::new_initdb(dir.path().to_owned(), port)?; + + Ok(Self { dir, database }) + } + + pub fn database(&self) -> &LocalDatabase { + &self.database + } + + pub fn database_mut(&mut self) -> &mut LocalDatabase { + &mut self.database + } + + pub fn dir(&self) -> &Path { + self.dir.path() + } +} + +#[derive(Debug)] +struct PostgresProcess { + dir: PathBuf, + inner: Child, +} + +impl PostgresProcess { + fn start(dir: PathBuf, port: u16) -> Result { + let child = Command::new("postgres") + // Set the data directory to use + .arg("-D") + .arg(&dir) + // Set the port to listen for incoming connections + .args(["-p", &port.to_string()]) + // Disable creating and listening on a UDS + .args(["-c", "unix_socket_directories="]) + // pipe stdout and stderr to files located in the data directory + .stdout( + OpenOptions::new() + .create(true) + .append(true) + .open(dir.join("stdout"))?, + ) + .stderr( + OpenOptions::new() + .create(true) + .append(true) + .open(dir.join("stderr"))?, + ) + .spawn() + .context("command not found: postgres")?; + + Ok(Self { dir, inner: child }) + } + + // https://www.postgresql.org/docs/16/app-pg-ctl.html + fn pg_ctl_stop(&mut self) -> Result<()> { + let output = Command::new("pg_ctl") + .arg("stop") + .arg("-D") + .arg(&self.dir) + .arg("-mfast") + .output() + .context("command not found: pg_ctl")?; + + if output.status.success() { + Ok(()) + } else { + Err(anyhow!("couldn't shut down postgres")) + } + } + + fn dump_stdout_stderr(&self) -> Result<(String, String)> { + let stdout = std::fs::read_to_string(self.dir.join("stdout"))?; + let stderr = std::fs::read_to_string(self.dir.join("stderr"))?; + + Ok((stdout, stderr)) + } +} + +impl Drop for PostgresProcess { + // When the Process struct goes out of scope we need to kill the child process + fn drop(&mut self) { + tracing::error!("dropping postgres"); + // check if the process has already been terminated + match self.inner.try_wait() { + // The child process has already terminated, perhaps due to a crash + Ok(Some(_)) => {} + + // The process is still running so we need to attempt to kill it + _ => { + if self.pg_ctl_stop().is_err() { + // Couldn't gracefully stop server so we'll just kill it + self.inner.kill().expect("postgres couldn't be killed"); + } + self.inner.wait().unwrap(); + } + } + + // Dump the contents of stdout/stderr if TRACE is enabled + if tracing::event_enabled!(tracing::Level::TRACE) { + if let Ok((stdout, stderr)) = self.dump_stdout_stderr() { + trace!("stdout: {stdout}"); + trace!("stderr: {stderr}"); + } + } + } +} + +/// Local instance of a `postgres` server. +/// +/// See for more info. +pub struct LocalDatabase { + dir: PathBuf, + port: u16, + url: Url, + process: Option, +} + +impl LocalDatabase { + /// Start a local `postgres` database service. + /// + /// `dir`: The location of the on-disk postgres database. The database must already exist at + /// the provided path. If you instead want to create a new database see `Self::new_initdb`. + /// + /// `port`: The port to listen for incoming connection on. + pub fn new(dir: PathBuf, port: u16) -> Result { + let url = format!( + "postgres://postgres:postgrespw@localhost:{port}/{db_name}", + db_name = "postgres" + ) + .parse() + .unwrap(); + let mut db = Self { + dir, + port, + url, + process: None, + }; + db.start()?; + Ok(db) + } + + /// Initialize and start a local `postgres` database service. + /// + /// Unlike `Self::new`, this will initialize a clean database at the provided path. + pub fn new_initdb(dir: PathBuf, port: u16) -> Result { + initdb(&dir)?; + Self::new(dir, port) + } + + /// Return the url used to connect to the database + pub fn url(&self) -> &Url { + &self.url + } + + fn start(&mut self) -> Result<()> { + if self.process.is_none() { + self.process = Some(PostgresProcess::start(self.dir.clone(), self.port)?); + self.wait_till_ready() + .map_err(|e| anyhow!("unable to start postgres: {e:?}"))?; + } + + Ok(()) + } + + fn health_check(&mut self) -> Result<(), HealthCheckError> { + if let Some(p) = &mut self.process { + match p.inner.try_wait() { + // This would mean the child process has crashed + Ok(Some(_)) => Err(HealthCheckError::NotRunning), + + // This is the case where the process is still running + Ok(None) => pg_isready(self.port), + + // Some other unknown error + Err(e) => Err(HealthCheckError::Unknown(e.to_string())), + } + } else { + Err(HealthCheckError::NotRunning) + } + } + + fn wait_till_ready(&mut self) -> Result<(), HealthCheckError> { + let start = Instant::now(); + + while start.elapsed() < Duration::from_secs(10) { + match self.health_check() { + Ok(()) => return Ok(()), + Err(HealthCheckError::NotReady) => {} + Err(HealthCheckError::NotRunning | HealthCheckError::Unknown(_)) => break, + } + + std::thread::sleep(Duration::from_millis(50)); + } + + Err(HealthCheckError::Unknown( + "timeout reached when waiting for service to be ready".to_owned(), + )) + } +} + +#[derive(Debug)] +enum HealthCheckError { + NotRunning, + NotReady, + #[allow(unused)] + Unknown(String), +} + +/// Run the postgres `pg_isready` command to get the status of database +/// +/// See for more info +fn pg_isready(port: u16) -> Result<(), HealthCheckError> { + let output = Command::new("pg_isready") + .arg("--host=localhost") + .arg("-p") + .arg(port.to_string()) + .arg("--username=postgres") + .output() + .map_err(|e| HealthCheckError::Unknown(format!("command not found: pg_ctl: {e}")))?; + + trace!("pg_isready code: {:?}", output.status.code()); + trace!("pg_isready output: {}", output.stderr.escape_ascii()); + trace!("pg_isready output: {}", output.stdout.escape_ascii()); + if output.status.success() { + Ok(()) + } else { + Err(HealthCheckError::NotReady) + } +} + +/// Run the postgres `initdb` command to initialize a database at the provided path +/// +/// See for more info +fn initdb(dir: &Path) -> Result<()> { + let output = Command::new("initdb") + .arg("-D") + .arg(dir) + .arg("--no-instructions") + .arg("--username=postgres") + .output() + .context("command not found: initdb")?; + + if output.status.success() { + Ok(()) + } else { + Err(anyhow!( + "unable to initialize database: {:?}", + String::from_utf8(output.stderr) + )) + } +} + +/// Return an ephemeral, available port. On unix systems, the port returned will be in the +/// TIME_WAIT state ensuring that the OS won't hand out this port for some grace period. +/// Callers should be able to bind to this port given they use SO_REUSEADDR. +pub fn get_available_port() -> u16 { + const MAX_PORT_RETRIES: u32 = 1000; + + for _ in 0..MAX_PORT_RETRIES { + if let Ok(port) = get_ephemeral_port() { + return port; + } + } + + panic!("Error: could not find an available port"); +} + +fn get_ephemeral_port() -> std::io::Result { + // Request a random available port from the OS + let listener = std::net::TcpListener::bind(("127.0.0.1", 0))?; + let addr = listener.local_addr()?; + + // Create and accept a connection (which we'll promptly drop) in order to force the port + // into the TIME_WAIT state, ensuring that the port will be reserved from some limited + // amount of time (roughly 60s on some Linux systems) + let _sender = std::net::TcpStream::connect(addr)?; + let _incoming = listener.accept()?; + + Ok(addr.port()) +} + +#[cfg(test)] +mod test { + #[tokio::test] + async fn smoketest() { + use crate::database::Connection; + use crate::tempdb::TempDb; + use diesel_async::RunQueryDsl; + + telemetry_subscribers::init_for_testing(); + + let db = TempDb::new().unwrap(); + println!("dir: {:?}", db.dir.path()); + + let url = db.database.url(); + println!("url: {}", url.as_str()); + let mut connection = Connection::dedicated(url).await.unwrap(); + + // Run a simple query to verify the db can properly be queried + let resp = diesel::sql_query("SELECT datname FROM pg_database") + .execute(&mut connection) + .await + .unwrap(); + println!("resp: {:?}", resp); + } +} diff --git a/crates/sui-mvr-indexer/src/test_utils.rs b/crates/sui-mvr-indexer/src/test_utils.rs new file mode 100644 index 0000000000000..431d0dc5854bc --- /dev/null +++ b/crates/sui-mvr-indexer/src/test_utils.rs @@ -0,0 +1,341 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use mysten_metrics::init_metrics; +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; + +use simulacrum::Simulacrum; +use std::net::SocketAddr; +use std::path::PathBuf; +use std::sync::Arc; +use std::time::Duration; +use sui_json_rpc_types::SuiTransactionBlockResponse; + +use crate::config::{IngestionConfig, RetentionConfig, SnapshotLagConfig, UploadOptions}; +use crate::database::Connection; +use crate::database::ConnectionPool; +use crate::db::ConnectionPoolConfig; +use crate::errors::IndexerError; +use crate::indexer::Indexer; +use crate::store::PgIndexerStore; +use crate::tempdb::get_available_port; +use crate::tempdb::TempDb; +use crate::IndexerMetrics; + +/// Wrapper over `Indexer::start_reader` to make it easier to configure an indexer jsonrpc reader +/// for testing. +pub async fn start_indexer_jsonrpc_for_testing( + db_url: String, + fullnode_url: String, + json_rpc_url: String, + cancel: Option, +) -> (JoinHandle>, CancellationToken) { + let token = cancel.unwrap_or_default(); + + // Reduce the connection pool size to 10 for testing + // to prevent maxing out + let pool_config = ConnectionPoolConfig { + pool_size: 5, + connection_timeout: Duration::from_secs(10), + statement_timeout: Duration::from_secs(30), + }; + + println!("db_url: {db_url}"); + println!("pool_config: {pool_config:?}"); + + let registry = prometheus::Registry::default(); + init_metrics(®istry); + + let pool = ConnectionPool::new(db_url.parse().unwrap(), pool_config) + .await + .unwrap(); + + let handle = { + let config = crate::config::JsonRpcConfig { + name_service_options: crate::config::NameServiceOptions::default(), + rpc_address: json_rpc_url.parse().unwrap(), + rpc_client_url: fullnode_url, + }; + let token_clone = token.clone(); + tokio::spawn( + async move { Indexer::start_reader(&config, ®istry, pool, token_clone).await }, + ) + }; + + (handle, token) +} + +/// Wrapper over `Indexer::start_writer_with_config` to make it easier to configure an indexer +/// writer for testing. If the config options are null, default values that have historically worked +/// for testing will be used. +pub async fn start_indexer_writer_for_testing( + db_url: String, + snapshot_config: Option, + retention_config: Option, + data_ingestion_path: Option, + cancel: Option, + start_checkpoint: Option, + end_checkpoint: Option, +) -> ( + PgIndexerStore, + JoinHandle>, + CancellationToken, +) { + let token = cancel.unwrap_or_default(); + let snapshot_config = snapshot_config.unwrap_or(SnapshotLagConfig { + snapshot_min_lag: 5, + sleep_duration: 0, + }); + + // Reduce the connection pool size to 10 for testing to prevent maxing out + let pool_config = ConnectionPoolConfig { + pool_size: 5, + connection_timeout: Duration::from_secs(10), + statement_timeout: Duration::from_secs(30), + }; + + println!("db_url: {db_url}"); + println!("pool_config: {pool_config:?}"); + println!("{data_ingestion_path:?}"); + + let registry = prometheus::Registry::default(); + init_metrics(®istry); + let indexer_metrics = IndexerMetrics::new(®istry); + + let pool = ConnectionPool::new(db_url.parse().unwrap(), pool_config) + .await + .unwrap(); + let store = PgIndexerStore::new( + pool.clone(), + UploadOptions::default(), + indexer_metrics.clone(), + ); + + let handle = { + let connection = Connection::dedicated(&db_url.parse().unwrap()) + .await + .unwrap(); + crate::db::reset_database(connection).await.unwrap(); + + let store_clone = store.clone(); + let mut ingestion_config = IngestionConfig { + start_checkpoint, + end_checkpoint, + ..Default::default() + }; + ingestion_config.sources.data_ingestion_path = data_ingestion_path; + let token_clone = token.clone(); + + tokio::spawn(async move { + Indexer::start_writer( + ingestion_config, + store_clone, + indexer_metrics, + snapshot_config, + retention_config, + token_clone, + None, + ) + .await + }) + }; + + (store, handle, token) +} + +#[derive(Clone)] +pub struct SuiTransactionBlockResponseBuilder<'a> { + response: SuiTransactionBlockResponse, + full_response: &'a SuiTransactionBlockResponse, +} + +impl<'a> SuiTransactionBlockResponseBuilder<'a> { + pub fn new(full_response: &'a SuiTransactionBlockResponse) -> Self { + Self { + response: SuiTransactionBlockResponse::default(), + full_response, + } + } + + pub fn with_input(mut self) -> Self { + self.response = SuiTransactionBlockResponse { + transaction: self.full_response.transaction.clone(), + ..self.response + }; + self + } + + pub fn with_raw_input(mut self) -> Self { + self.response = SuiTransactionBlockResponse { + raw_transaction: self.full_response.raw_transaction.clone(), + ..self.response + }; + self + } + + pub fn with_effects(mut self) -> Self { + self.response = SuiTransactionBlockResponse { + effects: self.full_response.effects.clone(), + ..self.response + }; + self + } + + pub fn with_events(mut self) -> Self { + self.response = SuiTransactionBlockResponse { + events: self.full_response.events.clone(), + ..self.response + }; + self + } + + pub fn with_balance_changes(mut self) -> Self { + self.response = SuiTransactionBlockResponse { + balance_changes: self.full_response.balance_changes.clone(), + ..self.response + }; + self + } + + pub fn with_object_changes(mut self) -> Self { + self.response = SuiTransactionBlockResponse { + object_changes: self.full_response.object_changes.clone(), + ..self.response + }; + self + } + + pub fn with_input_and_changes(mut self) -> Self { + self.response = SuiTransactionBlockResponse { + transaction: self.full_response.transaction.clone(), + balance_changes: self.full_response.balance_changes.clone(), + object_changes: self.full_response.object_changes.clone(), + ..self.response + }; + self + } + + pub fn build(self) -> SuiTransactionBlockResponse { + SuiTransactionBlockResponse { + transaction: self.response.transaction, + raw_transaction: self.response.raw_transaction, + effects: self.response.effects, + events: self.response.events, + balance_changes: self.response.balance_changes, + object_changes: self.response.object_changes, + // Use full response for any fields that aren't showable + ..self.full_response.clone() + } + } +} + +/// Set up a test indexer fetching from a REST endpoint served by the given Simulacrum. +pub async fn set_up( + sim: Arc, + data_ingestion_path: PathBuf, +) -> ( + JoinHandle<()>, + PgIndexerStore, + JoinHandle>, + TempDb, +) { + let database = TempDb::new().unwrap(); + let server_url: SocketAddr = format!("127.0.0.1:{}", get_available_port()) + .parse() + .unwrap(); + + let server_handle = tokio::spawn(async move { + sui_rest_api::RestService::new_without_version(sim) + .start_service(server_url) + .await; + }); + // Starts indexer + let (pg_store, pg_handle, _) = start_indexer_writer_for_testing( + database.database().url().as_str().to_owned(), + None, + None, + Some(data_ingestion_path), + None, /* cancel */ + None, /* start_checkpoint */ + None, /* end_checkpoint */ + ) + .await; + (server_handle, pg_store, pg_handle, database) +} + +pub async fn set_up_with_start_and_end_checkpoints( + sim: Arc, + data_ingestion_path: PathBuf, + start_checkpoint: u64, + end_checkpoint: u64, +) -> ( + JoinHandle<()>, + PgIndexerStore, + JoinHandle>, + TempDb, +) { + let database = TempDb::new().unwrap(); + let server_url: SocketAddr = format!("127.0.0.1:{}", get_available_port()) + .parse() + .unwrap(); + let server_handle = tokio::spawn(async move { + sui_rest_api::RestService::new_without_version(sim) + .start_service(server_url) + .await; + }); + // Starts indexer + let (pg_store, pg_handle, _) = start_indexer_writer_for_testing( + database.database().url().as_str().to_owned(), + None, + None, + Some(data_ingestion_path), + None, /* cancel */ + Some(start_checkpoint), + Some(end_checkpoint), + ) + .await; + (server_handle, pg_store, pg_handle, database) +} + +/// Wait for the indexer to catch up to the given checkpoint sequence number. +pub async fn wait_for_checkpoint( + pg_store: &PgIndexerStore, + checkpoint_sequence_number: u64, +) -> Result<(), IndexerError> { + tokio::time::timeout(Duration::from_secs(30), async { + while { + let cp_opt = pg_store + .get_latest_checkpoint_sequence_number() + .await + .unwrap(); + cp_opt.is_none() || (cp_opt.unwrap() < checkpoint_sequence_number) + } { + tokio::time::sleep(Duration::from_millis(100)).await; + } + }) + .await + .expect("Timeout waiting for indexer to catchup to checkpoint"); + Ok(()) +} + +/// Wait for the indexer to catch up to the given checkpoint sequence number for objects snapshot. +pub async fn wait_for_objects_snapshot( + pg_store: &PgIndexerStore, + checkpoint_sequence_number: u64, +) -> Result<(), IndexerError> { + tokio::time::timeout(Duration::from_secs(30), async { + while { + let cp_opt = pg_store + .get_latest_object_snapshot_checkpoint_sequence_number() + .await + .unwrap(); + cp_opt.is_none() || (cp_opt.unwrap() < checkpoint_sequence_number) + } { + tokio::time::sleep(Duration::from_millis(100)).await; + } + }) + .await + .expect("Timeout waiting for indexer to catchup to checkpoint for objects snapshot"); + Ok(()) +} diff --git a/crates/sui-mvr-indexer/src/types.rs b/crates/sui-mvr-indexer/src/types.rs new file mode 100644 index 0000000000000..6c88e3d27641a --- /dev/null +++ b/crates/sui-mvr-indexer/src/types.rs @@ -0,0 +1,671 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use move_core_types::language_storage::StructTag; +use rand::Rng; +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; +use sui_json_rpc_types::{ + ObjectChange, SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, +}; +use sui_types::base_types::{ObjectDigest, SequenceNumber}; +use sui_types::base_types::{ObjectID, SuiAddress}; +use sui_types::crypto::AggregateAuthoritySignature; +use sui_types::digests::TransactionDigest; +use sui_types::dynamic_field::DynamicFieldType; +use sui_types::effects::TransactionEffects; +use sui_types::messages_checkpoint::{ + CertifiedCheckpointSummary, CheckpointCommitment, CheckpointContents, CheckpointDigest, + CheckpointSequenceNumber, EndOfEpochData, +}; +use sui_types::move_package::MovePackage; +use sui_types::object::{Object, Owner}; +use sui_types::sui_serde::SuiStructTag; +use sui_types::transaction::SenderSignedData; + +use crate::errors::IndexerError; + +pub type IndexerResult = Result; + +#[derive(Debug, Default)] +pub struct IndexedCheckpoint { + // TODO: A lot of fields are now redundant with certified_checkpoint and checkpoint_contents. + pub sequence_number: u64, + pub checkpoint_digest: CheckpointDigest, + pub epoch: u64, + pub tx_digests: Vec, + pub network_total_transactions: u64, + pub previous_checkpoint_digest: Option, + pub timestamp_ms: u64, + pub total_gas_cost: i64, // total gas cost could be negative + pub computation_cost: u64, + pub storage_cost: u64, + pub storage_rebate: u64, + pub non_refundable_storage_fee: u64, + pub checkpoint_commitments: Vec, + pub validator_signature: AggregateAuthoritySignature, + pub successful_tx_num: usize, + pub end_of_epoch_data: Option, + pub end_of_epoch: bool, + pub min_tx_sequence_number: u64, + pub max_tx_sequence_number: u64, + // FIXME: Remove the Default derive and make these fields mandatory. + pub certified_checkpoint: Option, + pub checkpoint_contents: Option, +} + +impl IndexedCheckpoint { + pub fn from_sui_checkpoint( + checkpoint: &CertifiedCheckpointSummary, + contents: &CheckpointContents, + successful_tx_num: usize, + ) -> Self { + let total_gas_cost = checkpoint.epoch_rolling_gas_cost_summary.computation_cost as i64 + + checkpoint.epoch_rolling_gas_cost_summary.storage_cost as i64 + - checkpoint.epoch_rolling_gas_cost_summary.storage_rebate as i64; + let tx_digests = contents.iter().map(|t| t.transaction).collect::>(); + let max_tx_sequence_number = checkpoint.network_total_transactions - 1; + // NOTE: + 1u64 first to avoid subtraction with overflow + let min_tx_sequence_number = max_tx_sequence_number + 1u64 - tx_digests.len() as u64; + let auth_sig = &checkpoint.auth_sig().signature; + Self { + sequence_number: checkpoint.sequence_number, + checkpoint_digest: *checkpoint.digest(), + epoch: checkpoint.epoch, + tx_digests, + previous_checkpoint_digest: checkpoint.previous_digest, + end_of_epoch_data: checkpoint.end_of_epoch_data.clone(), + end_of_epoch: checkpoint.end_of_epoch_data.clone().is_some(), + total_gas_cost, + computation_cost: checkpoint.epoch_rolling_gas_cost_summary.computation_cost, + storage_cost: checkpoint.epoch_rolling_gas_cost_summary.storage_cost, + storage_rebate: checkpoint.epoch_rolling_gas_cost_summary.storage_rebate, + non_refundable_storage_fee: checkpoint + .epoch_rolling_gas_cost_summary + .non_refundable_storage_fee, + successful_tx_num, + network_total_transactions: checkpoint.network_total_transactions, + timestamp_ms: checkpoint.timestamp_ms, + validator_signature: auth_sig.clone(), + checkpoint_commitments: checkpoint.checkpoint_commitments.clone(), + min_tx_sequence_number, + max_tx_sequence_number, + certified_checkpoint: Some(checkpoint.clone()), + checkpoint_contents: Some(contents.clone()), + } + } +} + +#[derive(Debug, Clone)] +pub struct IndexedEvent { + pub tx_sequence_number: u64, + pub event_sequence_number: u64, + pub checkpoint_sequence_number: u64, + pub transaction_digest: TransactionDigest, + pub sender: SuiAddress, + pub package: ObjectID, + pub module: String, + pub event_type: String, + pub event_type_package: ObjectID, + pub event_type_module: String, + /// Struct name of the event, without type parameters. + pub event_type_name: String, + pub bcs: Vec, + pub timestamp_ms: u64, +} + +impl IndexedEvent { + pub fn from_event( + tx_sequence_number: u64, + event_sequence_number: u64, + checkpoint_sequence_number: u64, + transaction_digest: TransactionDigest, + event: &sui_types::event::Event, + timestamp_ms: u64, + ) -> Self { + Self { + tx_sequence_number, + event_sequence_number, + checkpoint_sequence_number, + transaction_digest, + sender: event.sender, + package: event.package_id, + module: event.transaction_module.to_string(), + event_type: event.type_.to_canonical_string(/* with_prefix */ true), + event_type_package: event.type_.address.into(), + event_type_module: event.type_.module.to_string(), + event_type_name: event.type_.name.to_string(), + bcs: event.contents.clone(), + timestamp_ms, + } + } +} + +#[derive(Debug, Clone)] +pub struct EventIndex { + pub tx_sequence_number: u64, + pub event_sequence_number: u64, + pub sender: SuiAddress, + pub emit_package: ObjectID, + pub emit_module: String, + pub type_package: ObjectID, + pub type_module: String, + /// Struct name of the event, without type parameters. + pub type_name: String, + /// Type instantiation of the event, with type name and type parameters, if any. + pub type_instantiation: String, +} + +// for ingestion test +impl EventIndex { + pub fn random() -> Self { + let mut rng = rand::thread_rng(); + EventIndex { + tx_sequence_number: rng.gen(), + event_sequence_number: rng.gen(), + sender: SuiAddress::random_for_testing_only(), + emit_package: ObjectID::random(), + emit_module: rng.gen::().to_string(), + type_package: ObjectID::random(), + type_module: rng.gen::().to_string(), + type_name: rng.gen::().to_string(), + type_instantiation: rng.gen::().to_string(), + } + } +} + +impl EventIndex { + pub fn from_event( + tx_sequence_number: u64, + event_sequence_number: u64, + event: &sui_types::event::Event, + ) -> Self { + let type_instantiation = event + .type_ + .to_canonical_string(/* with_prefix */ true) + .splitn(3, "::") + .collect::>()[2] + .to_string(); + Self { + tx_sequence_number, + event_sequence_number, + sender: event.sender, + emit_package: event.package_id, + emit_module: event.transaction_module.to_string(), + type_package: event.type_.address.into(), + type_module: event.type_.module.to_string(), + type_name: event.type_.name.to_string(), + type_instantiation, + } + } +} + +#[derive(Debug, Copy, Clone)] +pub enum OwnerType { + Immutable = 0, + Address = 1, + Object = 2, + Shared = 3, +} + +pub enum ObjectStatus { + Active = 0, + WrappedOrDeleted = 1, +} + +impl TryFrom for ObjectStatus { + type Error = IndexerError; + + fn try_from(value: i16) -> Result { + Ok(match value { + 0 => ObjectStatus::Active, + 1 => ObjectStatus::WrappedOrDeleted, + value => { + return Err(IndexerError::PersistentStorageDataCorruptionError(format!( + "{value} as ObjectStatus" + ))) + } + }) + } +} + +impl TryFrom for OwnerType { + type Error = IndexerError; + + fn try_from(value: i16) -> Result { + Ok(match value { + 0 => OwnerType::Immutable, + 1 => OwnerType::Address, + 2 => OwnerType::Object, + 3 => OwnerType::Shared, + value => { + return Err(IndexerError::PersistentStorageDataCorruptionError(format!( + "{value} as OwnerType" + ))) + } + }) + } +} + +// Returns owner_type, owner_address +pub fn owner_to_owner_info(owner: &Owner) -> (OwnerType, Option) { + match owner { + Owner::AddressOwner(address) => (OwnerType::Address, Some(*address)), + Owner::ObjectOwner(address) => (OwnerType::Object, Some(*address)), + Owner::Shared { .. } => (OwnerType::Shared, None), + Owner::Immutable => (OwnerType::Immutable, None), + } +} + +#[derive(Debug, Copy, Clone)] +pub enum DynamicFieldKind { + DynamicField = 0, + DynamicObject = 1, +} + +#[derive(Clone, Debug)] +pub struct IndexedObject { + pub checkpoint_sequence_number: CheckpointSequenceNumber, + pub object: Object, + pub df_kind: Option, +} + +impl IndexedObject { + pub fn random() -> Self { + let mut rng = rand::thread_rng(); + let random_address = SuiAddress::random_for_testing_only(); + IndexedObject { + checkpoint_sequence_number: rng.gen(), + object: Object::with_owner_for_testing(random_address), + df_kind: { + let random_value = rng.gen_range(0..3); + match random_value { + 0 => Some(DynamicFieldType::DynamicField), + 1 => Some(DynamicFieldType::DynamicObject), + _ => None, + } + }, + } + } +} + +impl IndexedObject { + pub fn from_object( + checkpoint_sequence_number: CheckpointSequenceNumber, + object: Object, + df_kind: Option, + ) -> Self { + Self { + checkpoint_sequence_number, + object, + df_kind, + } + } +} + +#[derive(Clone, Debug)] +pub struct IndexedDeletedObject { + pub object_id: ObjectID, + pub object_version: u64, + pub checkpoint_sequence_number: u64, +} + +impl IndexedDeletedObject { + pub fn random() -> Self { + let mut rng = rand::thread_rng(); + IndexedDeletedObject { + object_id: ObjectID::random(), + object_version: rng.gen(), + checkpoint_sequence_number: rng.gen(), + } + } +} + +#[derive(Debug)] +pub struct IndexedPackage { + pub package_id: ObjectID, + pub move_package: MovePackage, + pub checkpoint_sequence_number: u64, +} + +#[derive(Debug, Clone)] +pub enum TransactionKind { + SystemTransaction = 0, + ProgrammableTransaction = 1, +} + +#[derive(Debug, Clone)] +pub struct IndexedTransaction { + pub tx_sequence_number: u64, + pub tx_digest: TransactionDigest, + pub sender_signed_data: SenderSignedData, + pub effects: TransactionEffects, + pub checkpoint_sequence_number: u64, + pub timestamp_ms: u64, + pub object_changes: Vec, + pub balance_change: Vec, + pub events: Vec, + pub transaction_kind: TransactionKind, + pub successful_tx_num: u64, +} + +#[derive(Debug, Clone)] +pub struct TxIndex { + pub tx_sequence_number: u64, + pub tx_kind: TransactionKind, + pub transaction_digest: TransactionDigest, + pub checkpoint_sequence_number: u64, + pub input_objects: Vec, + pub changed_objects: Vec, + pub affected_objects: Vec, + pub payers: Vec, + pub sender: SuiAddress, + pub recipients: Vec, + pub move_calls: Vec<(ObjectID, String, String)>, +} + +impl TxIndex { + pub fn random() -> Self { + let mut rng = rand::thread_rng(); + TxIndex { + tx_sequence_number: rng.gen(), + tx_kind: if rng.gen_bool(0.5) { + TransactionKind::SystemTransaction + } else { + TransactionKind::ProgrammableTransaction + }, + transaction_digest: TransactionDigest::random(), + checkpoint_sequence_number: rng.gen(), + input_objects: (0..1000).map(|_| ObjectID::random()).collect(), + changed_objects: (0..1000).map(|_| ObjectID::random()).collect(), + affected_objects: (0..1000).map(|_| ObjectID::random()).collect(), + payers: (0..rng.gen_range(0..100)) + .map(|_| SuiAddress::random_for_testing_only()) + .collect(), + sender: SuiAddress::random_for_testing_only(), + recipients: (0..rng.gen_range(0..1000)) + .map(|_| SuiAddress::random_for_testing_only()) + .collect(), + move_calls: (0..rng.gen_range(0..1000)) + .map(|_| { + ( + ObjectID::random(), + rng.gen::().to_string(), + rng.gen::().to_string(), + ) + }) + .collect(), + } + } +} + +// ObjectChange is not bcs deserializable, IndexedObjectChange is. +#[serde_as] +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub enum IndexedObjectChange { + Published { + package_id: ObjectID, + version: SequenceNumber, + digest: ObjectDigest, + modules: Vec, + }, + Transferred { + sender: SuiAddress, + recipient: Owner, + #[serde_as(as = "SuiStructTag")] + object_type: StructTag, + object_id: ObjectID, + version: SequenceNumber, + digest: ObjectDigest, + }, + /// Object mutated. + Mutated { + sender: SuiAddress, + owner: Owner, + #[serde_as(as = "SuiStructTag")] + object_type: StructTag, + object_id: ObjectID, + version: SequenceNumber, + previous_version: SequenceNumber, + digest: ObjectDigest, + }, + /// Delete object + Deleted { + sender: SuiAddress, + #[serde_as(as = "SuiStructTag")] + object_type: StructTag, + object_id: ObjectID, + version: SequenceNumber, + }, + /// Wrapped object + Wrapped { + sender: SuiAddress, + #[serde_as(as = "SuiStructTag")] + object_type: StructTag, + object_id: ObjectID, + version: SequenceNumber, + }, + /// New object creation + Created { + sender: SuiAddress, + owner: Owner, + #[serde_as(as = "SuiStructTag")] + object_type: StructTag, + object_id: ObjectID, + version: SequenceNumber, + digest: ObjectDigest, + }, +} + +impl From for IndexedObjectChange { + fn from(oc: ObjectChange) -> Self { + match oc { + ObjectChange::Published { + package_id, + version, + digest, + modules, + } => Self::Published { + package_id, + version, + digest, + modules, + }, + ObjectChange::Transferred { + sender, + recipient, + object_type, + object_id, + version, + digest, + } => Self::Transferred { + sender, + recipient, + object_type, + object_id, + version, + digest, + }, + ObjectChange::Mutated { + sender, + owner, + object_type, + object_id, + version, + previous_version, + digest, + } => Self::Mutated { + sender, + owner, + object_type, + object_id, + version, + previous_version, + digest, + }, + ObjectChange::Deleted { + sender, + object_type, + object_id, + version, + } => Self::Deleted { + sender, + object_type, + object_id, + version, + }, + ObjectChange::Wrapped { + sender, + object_type, + object_id, + version, + } => Self::Wrapped { + sender, + object_type, + object_id, + version, + }, + ObjectChange::Created { + sender, + owner, + object_type, + object_id, + version, + digest, + } => Self::Created { + sender, + owner, + object_type, + object_id, + version, + digest, + }, + } + } +} + +impl From for ObjectChange { + fn from(val: IndexedObjectChange) -> Self { + match val { + IndexedObjectChange::Published { + package_id, + version, + digest, + modules, + } => ObjectChange::Published { + package_id, + version, + digest, + modules, + }, + IndexedObjectChange::Transferred { + sender, + recipient, + object_type, + object_id, + version, + digest, + } => ObjectChange::Transferred { + sender, + recipient, + object_type, + object_id, + version, + digest, + }, + IndexedObjectChange::Mutated { + sender, + owner, + object_type, + object_id, + version, + previous_version, + digest, + } => ObjectChange::Mutated { + sender, + owner, + object_type, + object_id, + version, + previous_version, + digest, + }, + IndexedObjectChange::Deleted { + sender, + object_type, + object_id, + version, + } => ObjectChange::Deleted { + sender, + object_type, + object_id, + version, + }, + IndexedObjectChange::Wrapped { + sender, + object_type, + object_id, + version, + } => ObjectChange::Wrapped { + sender, + object_type, + object_id, + version, + }, + IndexedObjectChange::Created { + sender, + owner, + object_type, + object_id, + version, + digest, + } => ObjectChange::Created { + sender, + owner, + object_type, + object_id, + version, + digest, + }, + } + } +} + +// SuiTransactionBlockResponseWithOptions is only used on the reading path +pub struct SuiTransactionBlockResponseWithOptions { + pub response: SuiTransactionBlockResponse, + pub options: SuiTransactionBlockResponseOptions, +} + +impl From for SuiTransactionBlockResponse { + fn from(value: SuiTransactionBlockResponseWithOptions) -> Self { + let SuiTransactionBlockResponseWithOptions { response, options } = value; + + SuiTransactionBlockResponse { + digest: response.digest, + transaction: options.show_input.then_some(response.transaction).flatten(), + raw_transaction: options + .show_raw_input + .then_some(response.raw_transaction) + .unwrap_or_default(), + effects: options.show_effects.then_some(response.effects).flatten(), + events: options.show_events.then_some(response.events).flatten(), + object_changes: options + .show_object_changes + .then_some(response.object_changes) + .flatten(), + balance_changes: options + .show_balance_changes + .then_some(response.balance_changes) + .flatten(), + timestamp_ms: response.timestamp_ms, + confirmed_local_execution: response.confirmed_local_execution, + checkpoint: response.checkpoint, + errors: vec![], + raw_effects: options + .show_raw_effects + .then_some(response.raw_effects) + .unwrap_or_default(), + } + } +} diff --git a/crates/sui-mvr-indexer/tests/ingestion_tests.rs b/crates/sui-mvr-indexer/tests/ingestion_tests.rs new file mode 100644 index 0000000000000..351b243594e4b --- /dev/null +++ b/crates/sui-mvr-indexer/tests/ingestion_tests.rs @@ -0,0 +1,242 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +use std::sync::Arc; + +use diesel::ExpressionMethods; +use diesel::QueryDsl; +use diesel_async::RunQueryDsl; +use simulacrum::Simulacrum; +use sui_mvr_indexer::errors::IndexerError; +use sui_mvr_indexer::handlers::TransactionObjectChangesToCommit; +use sui_mvr_indexer::models::{checkpoints::StoredCheckpoint, objects::StoredObjectSnapshot}; +use sui_mvr_indexer::schema::{checkpoints, objects_snapshot}; +use sui_mvr_indexer::store::indexer_store::IndexerStore; +use sui_mvr_indexer::test_utils::{ + set_up, set_up_with_start_and_end_checkpoints, wait_for_checkpoint, wait_for_objects_snapshot, +}; +use sui_mvr_indexer::types::EventIndex; +use sui_mvr_indexer::types::IndexedDeletedObject; +use sui_mvr_indexer::types::IndexedObject; +use sui_mvr_indexer::types::TxIndex; +use sui_types::base_types::SuiAddress; +use tempfile::tempdir; + +#[tokio::test] +pub async fn test_checkpoint_range_ingestion() -> Result<(), IndexerError> { + let tempdir = tempdir().unwrap(); + let mut sim = Simulacrum::new(); + let data_ingestion_path = tempdir.path().to_path_buf(); + sim.set_data_ingestion_path(data_ingestion_path.clone()); + + // Create multiple checkpoints + for _ in 0..10 { + let transfer_recipient = SuiAddress::random_for_testing_only(); + let (transaction, _) = sim.transfer_txn(transfer_recipient); + let (_, err) = sim.execute_transaction(transaction).unwrap(); + assert!(err.is_none()); + sim.create_checkpoint(); + } + + // Set up indexer with specific start and end checkpoints + let start_checkpoint = 2; + let end_checkpoint = 4; + let (_, pg_store, _, _database) = set_up_with_start_and_end_checkpoints( + Arc::new(sim), + data_ingestion_path, + start_checkpoint, + end_checkpoint, + ) + .await; + + // Wait for the indexer to catch up to the end checkpoint + wait_for_checkpoint(&pg_store, end_checkpoint).await?; + + // Verify that only checkpoints within the specified range were ingested + let mut connection = pg_store.pool().dedicated_connection().await.unwrap(); + let checkpoint_count: i64 = checkpoints::table + .count() + .get_result(&mut connection) + .await + .expect("Failed to count checkpoints"); + assert_eq!(checkpoint_count, 3, "Expected 3 checkpoints to be ingested"); + + // Verify the range of ingested checkpoints + let min_checkpoint = checkpoints::table + .select(diesel::dsl::min(checkpoints::sequence_number)) + .first::>(&mut connection) + .await + .expect("Failed to get min checkpoint") + .expect("Min checkpoint should be Some"); + let max_checkpoint = checkpoints::table + .select(diesel::dsl::max(checkpoints::sequence_number)) + .first::>(&mut connection) + .await + .expect("Failed to get max checkpoint") + .expect("Max checkpoint should be Some"); + assert_eq!( + min_checkpoint, start_checkpoint as i64, + "Minimum ingested checkpoint should be {}", + start_checkpoint + ); + assert_eq!( + max_checkpoint, end_checkpoint as i64, + "Maximum ingested checkpoint should be {}", + end_checkpoint + ); + + Ok(()) +} + +#[tokio::test] +pub async fn test_objects_snapshot() -> Result<(), IndexerError> { + let tempdir = tempdir().unwrap(); + let mut sim = Simulacrum::new(); + let data_ingestion_path = tempdir.path().to_path_buf(); + sim.set_data_ingestion_path(data_ingestion_path.clone()); + + // Run 10 transfer transactions and create 10 checkpoints + let mut last_transaction = None; + let total_checkpoint_sequence_number = 7usize; + for _ in 0..total_checkpoint_sequence_number { + let transfer_recipient = SuiAddress::random_for_testing_only(); + let (transaction, _) = sim.transfer_txn(transfer_recipient); + let (_, err) = sim.execute_transaction(transaction.clone()).unwrap(); + assert!(err.is_none()); + last_transaction = Some(transaction); + let _ = sim.create_checkpoint(); + } + + let (_, pg_store, _, _database) = set_up(Arc::new(sim), data_ingestion_path).await; + + // Wait for objects snapshot at checkpoint max_expected_checkpoint_sequence_number + let max_expected_checkpoint_sequence_number = total_checkpoint_sequence_number - 5; + wait_for_objects_snapshot(&pg_store, max_expected_checkpoint_sequence_number as u64).await?; + + let mut connection = pg_store.pool().dedicated_connection().await.unwrap(); + // Get max checkpoint_sequence_number from objects_snapshot table and assert it's expected + let max_checkpoint_sequence_number = objects_snapshot::table + .select(objects_snapshot::checkpoint_sequence_number) + .order(objects_snapshot::checkpoint_sequence_number.desc()) + .limit(1) + .first::(&mut connection) + .await + .expect("Failed to read max checkpoint_sequence_number from objects_snapshot"); + assert_eq!( + max_checkpoint_sequence_number, + max_expected_checkpoint_sequence_number as i64 + ); + + // Get the object state at max_expected_checkpoint_sequence_number and assert. + let last_tx = last_transaction.unwrap(); + let obj_id = last_tx.gas()[0].0; + let gas_owner_id = last_tx.sender_address(); + + let snapshot_object = objects_snapshot::table + .filter(objects_snapshot::object_id.eq(obj_id.to_vec())) + .filter( + objects_snapshot::checkpoint_sequence_number + .eq(max_expected_checkpoint_sequence_number as i64), + ) + .first::(&mut connection) + .await + .expect("Failed reading object from objects_snapshot"); + // Assert that the object state is as expected at checkpoint max_expected_checkpoint_sequence_number + assert_eq!(snapshot_object.object_id, obj_id.to_vec()); + assert_eq!( + snapshot_object.checkpoint_sequence_number, + max_expected_checkpoint_sequence_number as i64 + ); + assert_eq!(snapshot_object.owner_type, Some(1)); + assert_eq!(snapshot_object.owner_id, Some(gas_owner_id.to_vec())); + Ok(()) +} + +#[tokio::test] +pub async fn test_objects_ingestion() -> Result<(), IndexerError> { + let tempdir = tempdir().unwrap(); + let mut sim = Simulacrum::new(); + let data_ingestion_path = tempdir.path().to_path_buf(); + sim.set_data_ingestion_path(data_ingestion_path.clone()); + + let (_, pg_store, _, _database) = set_up(Arc::new(sim), data_ingestion_path).await; + + let mut objects = Vec::new(); + for _ in 0..1000 { + objects.push(TransactionObjectChangesToCommit { + changed_objects: vec![IndexedObject::random()], + deleted_objects: vec![IndexedDeletedObject::random()], + }); + } + pg_store.persist_objects(objects).await?; + Ok(()) +} + +// test insert large batch of tx_indices +#[tokio::test] +pub async fn test_insert_large_batch_tx_indices() -> Result<(), IndexerError> { + let tempdir = tempdir().unwrap(); + let mut sim = Simulacrum::new(); + let data_ingestion_path = tempdir.path().to_path_buf(); + sim.set_data_ingestion_path(data_ingestion_path.clone()); + + let (_, pg_store, _, _database) = set_up(Arc::new(sim), data_ingestion_path).await; + + let mut v = Vec::new(); + for _ in 0..1000 { + v.push(TxIndex::random()); + } + pg_store.persist_tx_indices(v).await?; + Ok(()) +} + +// test insert large batch of event_indices +#[tokio::test] +pub async fn test_insert_large_batch_event_indices() -> Result<(), IndexerError> { + let tempdir = tempdir().unwrap(); + let mut sim = Simulacrum::new(); + let data_ingestion_path = tempdir.path().to_path_buf(); + sim.set_data_ingestion_path(data_ingestion_path.clone()); + + let (_, pg_store, _, _database) = set_up(Arc::new(sim), data_ingestion_path).await; + + let mut v = Vec::new(); + for _ in 0..1000 { + v.push(EventIndex::random()); + } + pg_store.persist_event_indices(v).await?; + Ok(()) +} + +#[tokio::test] +pub async fn test_epoch_boundary() -> Result<(), IndexerError> { + println!("test_epoch_boundary"); + let tempdir = tempdir().unwrap(); + let mut sim = Simulacrum::new(); + let data_ingestion_path = tempdir.path().to_path_buf(); + sim.set_data_ingestion_path(data_ingestion_path.clone()); + + let transfer_recipient = SuiAddress::random_for_testing_only(); + let (transaction, _) = sim.transfer_txn(transfer_recipient); + let (_, err) = sim.execute_transaction(transaction.clone()).unwrap(); + assert!(err.is_none()); + + sim.create_checkpoint(); // checkpoint 1 + sim.advance_epoch(true); // checkpoint 2 and epoch 1 + + let (transaction, _) = sim.transfer_txn(transfer_recipient); + let (_, err) = sim.execute_transaction(transaction.clone()).unwrap(); + sim.create_checkpoint(); // checkpoint 3 + assert!(err.is_none()); + + let (_, pg_store, _, _database) = set_up(Arc::new(sim), data_ingestion_path).await; + wait_for_checkpoint(&pg_store, 3).await?; + let mut connection = pg_store.pool().dedicated_connection().await.unwrap(); + let db_checkpoint: StoredCheckpoint = checkpoints::table + .order(checkpoints::sequence_number.desc()) + .first::(&mut connection) + .await + .expect("Failed reading checkpoint from PostgresDB"); + assert_eq!(db_checkpoint.sequence_number, 3); + assert_eq!(db_checkpoint.epoch, 1); + Ok(()) +} diff --git a/crates/sui-mvr-indexer/tests/json_rpc_tests.rs b/crates/sui-mvr-indexer/tests/json_rpc_tests.rs new file mode 100644 index 0000000000000..15e501a5f0aa2 --- /dev/null +++ b/crates/sui-mvr-indexer/tests/json_rpc_tests.rs @@ -0,0 +1,243 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::path::PathBuf; + +use sui_json_rpc_api::{CoinReadApiClient, IndexerApiClient, ReadApiClient}; +use sui_json_rpc_types::{ + CoinPage, EventFilter, SuiObjectDataOptions, SuiObjectResponse, SuiObjectResponseQuery, +}; +use sui_swarm_config::genesis_config::DEFAULT_GAS_AMOUNT; +use sui_test_transaction_builder::publish_package; +use sui_types::{event::EventID, transaction::CallArg}; +use test_cluster::TestClusterBuilder; + +#[tokio::test] +async fn test_get_owned_objects() -> Result<(), anyhow::Error> { + let cluster = TestClusterBuilder::new() + .with_indexer_backed_rpc() + .build() + .await; + + let http_client = cluster.rpc_client(); + let address = cluster.get_address_0(); + + let data_option = SuiObjectDataOptions::new().with_owner(); + let objects = http_client + .get_owned_objects( + address, + Some(SuiObjectResponseQuery::new_with_options( + data_option.clone(), + )), + None, + None, + ) + .await? + .data; + let fullnode_objects = cluster + .fullnode_handle + .rpc_client + .get_owned_objects( + address, + Some(SuiObjectResponseQuery::new_with_options( + data_option.clone(), + )), + None, + None, + ) + .await? + .data; + assert_eq!(5, objects.len()); + // TODO: right now we compare the results from indexer and fullnode, but as we deprecate fullnode rpc, + // we should change this to compare the results with the object id/digest from genesis potentially. + assert_eq!(objects, fullnode_objects); + + for obj in &objects { + let oref = obj.clone().into_object().unwrap(); + let result = http_client + .get_object(oref.object_id, Some(data_option.clone())) + .await?; + assert!( + matches!(result, SuiObjectResponse { data: Some(object), .. } if oref.object_id == object.object_id && object.owner.unwrap().get_owner_address()? == address) + ); + } + + // Multiget objectIDs test + let object_ids: Vec<_> = objects + .iter() + .map(|o| o.object().unwrap().object_id) + .collect(); + + let object_resp = http_client + .multi_get_objects(object_ids.clone(), None) + .await?; + let fullnode_object_resp = cluster + .fullnode_handle + .rpc_client + .multi_get_objects(object_ids, None) + .await?; + assert_eq!(5, object_resp.len()); + // TODO: right now we compare the results from indexer and fullnode, but as we deprecate fullnode rpc, + // we should change this to compare the results with the object id/digest from genesis potentially. + assert_eq!(object_resp, fullnode_object_resp); + Ok(()) +} + +#[tokio::test] +async fn test_get_coins() -> Result<(), anyhow::Error> { + let cluster = TestClusterBuilder::new() + .with_indexer_backed_rpc() + .build() + .await; + let http_client = cluster.rpc_client(); + let address = cluster.get_address_0(); + + let result: CoinPage = http_client.get_coins(address, None, None, None).await?; + assert_eq!(5, result.data.len()); + assert!(!result.has_next_page); + + // We should get 0 coins for a non-existent coin type. + let result: CoinPage = http_client + .get_coins(address, Some("0x2::sui::TestCoin".into()), None, None) + .await?; + assert_eq!(0, result.data.len()); + + // We should get all the 5 coins for SUI with the right balance. + let result: CoinPage = http_client + .get_coins(address, Some("0x2::sui::SUI".into()), None, None) + .await?; + assert_eq!(5, result.data.len()); + assert_eq!(result.data[0].balance, DEFAULT_GAS_AMOUNT); + assert!(!result.has_next_page); + + // When we have more than 3 coins, we should get a next page. + let result: CoinPage = http_client + .get_coins(address, Some("0x2::sui::SUI".into()), None, Some(3)) + .await?; + assert_eq!(3, result.data.len()); + assert!(result.has_next_page); + + // We should get the remaining 2 coins with the next page. + let result: CoinPage = http_client + .get_coins( + address, + Some("0x2::sui::SUI".into()), + result.next_cursor, + Some(3), + ) + .await?; + assert_eq!(2, result.data.len(), "{:?}", result); + assert!(!result.has_next_page); + + // No more coins after the last page. + let result: CoinPage = http_client + .get_coins( + address, + Some("0x2::sui::SUI".into()), + result.next_cursor, + None, + ) + .await?; + assert_eq!(0, result.data.len(), "{:?}", result); + assert!(!result.has_next_page); + + Ok(()) +} + +#[tokio::test] +async fn test_events() -> Result<(), anyhow::Error> { + let cluster = TestClusterBuilder::new() + .with_indexer_backed_rpc() + .build() + .await; + + // publish package + let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + path.push("tests/move_test_code"); + let move_package = publish_package(&cluster.wallet, path).await.0; + + // execute a transaction to generate events + let function = "emit_3"; + let arguments = vec![CallArg::Pure(bcs::to_bytes(&5u64).unwrap())]; + let transaction = cluster + .test_transaction_builder() + .await + .move_call(move_package, "events_queries", function, arguments) + .build(); + let signed_transaction = cluster.wallet.sign_transaction(&transaction); + cluster.execute_transaction(signed_transaction).await; + + // query for events + let http_client = cluster.rpc_client(); + + // start with ascending order + let event_filter = EventFilter::All([]); + let mut cursor: Option = None; + let mut limit = None; + let mut descending_order = Some(false); + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(3, result.data.len()); + assert!(!result.has_next_page); + let forward_paginated_events = result.data; + + // Fetch the initial event + limit = Some(1); + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(1, result.data.len()); + assert!(result.has_next_page); + assert_eq!(forward_paginated_events[0], result.data[0]); + + // Fetch remaining events + cursor = result.next_cursor; + limit = None; + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(2, result.data.len()); + assert_eq!(forward_paginated_events[1..], result.data[..]); + + // now descending order - make sure to reset parameters + cursor = None; + descending_order = Some(true); + limit = None; + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(3, result.data.len()); + assert!(!result.has_next_page); + let backward_paginated_events = result.data; + + // Fetch the initial event + limit = Some(1); + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(1, result.data.len()); + assert!(result.has_next_page); + assert_eq!(backward_paginated_events[0], result.data[0]); + assert_eq!(forward_paginated_events[2], result.data[0]); + + // Fetch remaining events + cursor = result.next_cursor; + limit = None; + let result = http_client + .query_events(event_filter.clone(), cursor, limit, descending_order) + .await?; + assert_eq!(2, result.data.len()); + assert_eq!(backward_paginated_events[1..], result.data[..]); + + // check that the forward and backward paginated events are in reverse order + assert_eq!( + forward_paginated_events + .into_iter() + .rev() + .collect::>(), + backward_paginated_events + ); + + Ok(()) +} diff --git a/crates/sui-mvr-indexer/tests/move_test_code/Move.toml b/crates/sui-mvr-indexer/tests/move_test_code/Move.toml new file mode 100644 index 0000000000000..09e9e50f000f0 --- /dev/null +++ b/crates/sui-mvr-indexer/tests/move_test_code/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "move_test_code" +version = "0.0.1" +edition = "2024.beta" + +[dependencies] +Sui = { local = "../../../sui-framework/packages/sui-framework" } + +[addresses] +move_test_code = "0x0" diff --git a/crates/sui-mvr-indexer/tests/move_test_code/sources/events.move b/crates/sui-mvr-indexer/tests/move_test_code/sources/events.move new file mode 100644 index 0000000000000..f32cc7fe109f3 --- /dev/null +++ b/crates/sui-mvr-indexer/tests/move_test_code/sources/events.move @@ -0,0 +1,26 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + + +module move_test_code::events_queries { + use sui::event; + + public struct EventA has copy, drop { + new_value: u64 + } + + public entry fun emit_1(value: u64) { + event::emit(EventA { new_value: value }) + } + + public entry fun emit_2(value: u64) { + event::emit(EventA { new_value: value }); + event::emit(EventA { new_value: value + 1}) + } + + public entry fun emit_3(value: u64) { + event::emit(EventA { new_value: value }); + event::emit(EventA { new_value: value + 1}); + event::emit(EventA { new_value: value + 2}); + } +} diff --git a/crates/sui-mvr-indexer/tests/read_api_tests.rs b/crates/sui-mvr-indexer/tests/read_api_tests.rs new file mode 100644 index 0000000000000..d17b431888f01 --- /dev/null +++ b/crates/sui-mvr-indexer/tests/read_api_tests.rs @@ -0,0 +1,50 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use jsonrpsee::core::RpcResult; +use simulacrum::Simulacrum; +use std::sync::Arc; +use sui_json_rpc_api::ReadApiServer; +use sui_mvr_indexer::apis::read_api::ReadApi; +use sui_mvr_indexer::indexer_reader::IndexerReader; +use sui_mvr_indexer::test_utils::{set_up, wait_for_checkpoint}; +use tempfile::tempdir; + +#[tokio::test] +async fn test_checkpoint_apis() -> RpcResult<()> { + let tempdir = tempdir().unwrap(); + let mut sim = Simulacrum::new(); + let data_ingestion_path = tempdir.path().to_path_buf(); + sim.set_data_ingestion_path(data_ingestion_path.clone()); + sim.create_checkpoint(); + sim.create_checkpoint(); + + let (_, pg_store, _, _database) = set_up(Arc::new(sim), data_ingestion_path).await; + wait_for_checkpoint(&pg_store, 2).await.unwrap(); + + // Test get_latest_checkpoint_sequence_number + let read_api = ReadApi::new(IndexerReader::new(pg_store.pool())); + let latest_checkpoint = read_api.get_latest_checkpoint_sequence_number().await?; + assert_eq!(latest_checkpoint.into_inner(), 2); + + // Test get_checkpoint + let checkpoint_id = sui_json_rpc_types::CheckpointId::SequenceNumber(1); + let checkpoint = read_api.get_checkpoint(checkpoint_id).await?; + assert_eq!(checkpoint.sequence_number, 1); + + // Test get_checkpoints + let checkpoints = read_api.get_checkpoints(None, Some(10), false).await?; + assert_eq!(checkpoints.data.len(), 3); // 0, 1, 2 + assert!(!checkpoints.has_next_page); + assert_eq!(checkpoints.next_cursor, Some(2.into())); + + let checkpoints = read_api + .get_checkpoints(Some(2.into()), Some(2), true) + .await?; + assert_eq!(checkpoints.data.len(), 2); + assert!(!checkpoints.has_next_page); + assert_eq!(checkpoints.next_cursor, Some(0.into())); + assert_eq!(checkpoints.data[0].sequence_number, 1); + assert_eq!(checkpoints.data[1].sequence_number, 0); + Ok(()) +} diff --git a/crates/sui-network/build.rs b/crates/sui-network/build.rs index 3b6926e0fae15..f082ba5f2d24a 100644 --- a/crates/sui-network/build.rs +++ b/crates/sui-network/build.rs @@ -141,15 +141,6 @@ fn build_anemo_services(out_dir: &Path) { let discovery = anemo_build::manual::Service::builder() .name("Discovery") .package("sui") - .method( - anemo_build::manual::Method::builder() - .name("get_known_peers") - .route_name("GetKnownPeers") - .request_type("()") - .response_type("crate::discovery::GetKnownPeersResponse") - .codec_path(codec_path) - .build(), - ) .method( anemo_build::manual::Method::builder() .name("get_known_peers_v2") diff --git a/crates/sui-network/src/discovery/builder.rs b/crates/sui-network/src/discovery/builder.rs index ef56e208f9567..b18c65ba9efe5 100644 --- a/crates/sui-network/src/discovery/builder.rs +++ b/crates/sui-network/src/discovery/builder.rs @@ -58,7 +58,7 @@ impl Builder { // Apply rate limits from configuration as needed. if let Some(limit) = discovery_config.get_known_peers_rate_limit { - discovery_server = discovery_server.add_layer_for_get_known_peers( + discovery_server = discovery_server.add_layer_for_get_known_peers_v2( InboundRequestLayer::new(rate_limit::RateLimitLayer::new( governor::Quota::per_second(limit), rate_limit::WaitMode::Block, diff --git a/crates/sui-network/src/discovery/mod.rs b/crates/sui-network/src/discovery/mod.rs index d701c4f3fb412..a63bc9ca8ca5b 100644 --- a/crates/sui-network/src/discovery/mod.rs +++ b/crates/sui-network/src/discovery/mod.rs @@ -47,7 +47,6 @@ pub use generated::{ discovery_client::DiscoveryClient, discovery_server::{Discovery, DiscoveryServer}, }; -pub use server::GetKnownPeersResponse; pub use server::GetKnownPeersResponseV2; use self::metrics::Metrics; @@ -268,7 +267,6 @@ impl DiscoveryEventLoop { // Query the new node for any peers self.tasks.spawn(query_peer_for_their_known_peers( peer, - self.discovery_config.clone(), self.state.clone(), self.metrics.clone(), self.allowlisted_peers.clone(), @@ -424,7 +422,6 @@ async fn try_to_connect_to_seed_peers( async fn query_peer_for_their_known_peers( peer: Peer, - config: Arc, state: Arc>, metrics: Metrics, allowlisted_peers: Arc>>, @@ -432,50 +429,24 @@ async fn query_peer_for_their_known_peers( let mut client = DiscoveryClient::new(peer); let request = Request::new(()).with_timeout(TIMEOUT); - let found_peers = if config.enable_node_info_signatures() { - client - .get_known_peers_v2(request) - .await - .ok() - .map(Response::into_inner) - .map( - |GetKnownPeersResponseV2 { - own_info, - mut known_peers, - }| { - if !own_info.addresses.is_empty() { - known_peers.push(own_info) - } - known_peers - }, - ) - } else { - client - .get_known_peers(request) - .await - .ok() - .map(Response::into_inner) - .map( - |GetKnownPeersResponse { - own_info, - mut known_peers, - }| { - if !own_info.addresses.is_empty() { - known_peers.push(own_info) - } - known_peers - .into_iter() - .map(|info| { - // SignedNodeInfo with fake default signatures will only work if - // signature verification is disabled. - SignedNodeInfo::new_from_data_and_sig(info, Ed25519Signature::default()) - }) - .collect() - }, - ) - }; + let found_peers = client + .get_known_peers_v2(request) + .await + .ok() + .map(Response::into_inner) + .map( + |GetKnownPeersResponseV2 { + own_info, + mut known_peers, + }| { + if !own_info.addresses.is_empty() { + known_peers.push(own_info) + } + known_peers + }, + ); if let Some(found_peers) = found_peers { - update_known_peers(config, state, metrics, found_peers, allowlisted_peers); + update_known_peers(state, metrics, found_peers, allowlisted_peers); } } @@ -494,57 +465,27 @@ async fn query_connected_peers_for_their_known_peers( .flat_map(|id| network.peer(id)) .choose_multiple(&mut rand::thread_rng(), config.peers_to_query()); - let enable_node_info_signatures = config.enable_node_info_signatures(); let found_peers = peers_to_query .into_iter() .map(DiscoveryClient::new) .map(|mut client| async move { let request = Request::new(()).with_timeout(TIMEOUT); - if enable_node_info_signatures { - client - .get_known_peers_v2(request) - .await - .ok() - .map(Response::into_inner) - .map( - |GetKnownPeersResponseV2 { - own_info, - mut known_peers, - }| { - if !own_info.addresses.is_empty() { - known_peers.push(own_info) - } - known_peers - }, - ) - } else { - client - .get_known_peers(request) - .await - .ok() - .map(Response::into_inner) - .map( - |GetKnownPeersResponse { - own_info, - mut known_peers, - }| { - if !own_info.addresses.is_empty() { - known_peers.push(own_info) - } - known_peers - .into_iter() - .map(|info| { - // SignedNodeInfo with fake default signatures will only work if - // signature verification is disabled. - SignedNodeInfo::new_from_data_and_sig( - info, - Ed25519Signature::default(), - ) - }) - .collect() - }, - ) - } + client + .get_known_peers_v2(request) + .await + .ok() + .map(Response::into_inner) + .map( + |GetKnownPeersResponseV2 { + own_info, + mut known_peers, + }| { + if !own_info.addresses.is_empty() { + known_peers.push(own_info) + } + known_peers + }, + ) }) .pipe(futures::stream::iter) .buffer_unordered(config.peers_to_query()) @@ -553,11 +494,10 @@ async fn query_connected_peers_for_their_known_peers( .collect::>() .await; - update_known_peers(config, state, metrics, found_peers, allowlisted_peers); + update_known_peers(state, metrics, found_peers, allowlisted_peers); } fn update_known_peers( - config: Arc, state: Arc>, metrics: Metrics, found_peers: Vec, @@ -602,24 +542,22 @@ fn update_known_peers( { continue; } - if config.enable_node_info_signatures() { - let Ok(public_key) = Ed25519PublicKey::from_bytes(&peer_info.peer_id.0) else { - debug_fatal!( - // This should never happen. - "Failed to convert anemo PeerId {:?} to Ed25519PublicKey", - peer_info.peer_id - ); - continue; - }; - let msg = bcs::to_bytes(peer_info.data()).expect("BCS serialization should not fail"); - if let Err(e) = public_key.verify(&msg, peer_info.auth_sig()) { - info!( - "Discovery failed to verify signature for NodeInfo for peer {:?}: {e:?}", - peer_info.peer_id - ); - // TODO: consider denylisting the source of bad NodeInfo from future requests. - continue; - } + let Ok(public_key) = Ed25519PublicKey::from_bytes(&peer_info.peer_id.0) else { + debug_fatal!( + // This should never happen. + "Failed to convert anemo PeerId {:?} to Ed25519PublicKey", + peer_info.peer_id + ); + continue; + }; + let msg = bcs::to_bytes(peer_info.data()).expect("BCS serialization should not fail"); + if let Err(e) = public_key.verify(&msg, peer_info.auth_sig()) { + info!( + "Discovery failed to verify signature for NodeInfo for peer {:?}: {e:?}", + peer_info.peer_id + ); + // TODO: consider denylisting the source of bad NodeInfo from future requests. + continue; } let peer = VerifiedSignedNodeInfo::new_from_verified(peer_info); diff --git a/crates/sui-network/src/discovery/server.rs b/crates/sui-network/src/discovery/server.rs index 1a1993273689a..20535a3d6dc4f 100644 --- a/crates/sui-network/src/discovery/server.rs +++ b/crates/sui-network/src/discovery/server.rs @@ -1,7 +1,7 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::{Discovery, NodeInfo, SignedNodeInfo, State, MAX_PEERS_TO_SEND}; +use super::{Discovery, SignedNodeInfo, State, MAX_PEERS_TO_SEND}; use anemo::{Request, Response}; use rand::seq::IteratorRandom; use serde::{Deserialize, Serialize}; @@ -10,12 +10,6 @@ use std::{ sync::{Arc, RwLock}, }; -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct GetKnownPeersResponse { - pub own_info: NodeInfo, - pub known_peers: Vec, -} - #[derive(Clone, Debug, Serialize, Deserialize)] pub struct GetKnownPeersResponseV2 { pub own_info: SignedNodeInfo, @@ -28,21 +22,6 @@ pub(super) struct Server { #[anemo::async_trait] impl Discovery for Server { - async fn get_known_peers( - &self, - request: Request<()>, - ) -> Result, anemo::rpc::Status> { - let resp = self.get_known_peers_v2(request).await?; - Ok(resp.map(|body| GetKnownPeersResponse { - own_info: body.own_info.into_data(), - known_peers: body - .known_peers - .into_iter() - .map(|e| e.into_data()) - .collect(), - })) - } - async fn get_known_peers_v2( &self, _request: Request<()>, diff --git a/crates/sui-network/src/discovery/tests.rs b/crates/sui-network/src/discovery/tests.rs index 01704e7600f9d..5fdf479bea27a 100644 --- a/crates/sui-network/src/discovery/tests.rs +++ b/crates/sui-network/src/discovery/tests.rs @@ -19,7 +19,10 @@ async fn get_known_peers() -> Result<()> { .build_internal(); // Err when own_info not set - server.get_known_peers(Request::new(())).await.unwrap_err(); + server + .get_known_peers_v2(Request::new(())) + .await + .unwrap_err(); // Normal response with our_info let our_info = NodeInfo { @@ -33,11 +36,11 @@ async fn get_known_peers() -> Result<()> { Ed25519Signature::default(), )); let response = server - .get_known_peers(Request::new(())) + .get_known_peers_v2(Request::new(())) .await .unwrap() .into_inner(); - assert_eq!(response.own_info, our_info); + assert_eq!(response.own_info.data(), &our_info); assert!(response.known_peers.is_empty()); // Normal response with some known peers @@ -55,27 +58,32 @@ async fn get_known_peers() -> Result<()> { )), ); let response = server - .get_known_peers(Request::new(())) + .get_known_peers_v2(Request::new(())) .await .unwrap() .into_inner(); - assert_eq!(response.own_info, our_info); - assert_eq!(response.known_peers, vec![other_peer]); + assert_eq!(response.own_info.data(), &our_info); + assert_eq!( + response + .known_peers + .into_iter() + .map(|peer| peer.into_data()) + .collect::>(), + vec![other_peer] + ); Ok(()) } #[tokio::test] async fn make_connection_to_seed_peer() -> Result<()> { - let config = P2pConfig::default().set_discovery_config(DiscoveryConfig { - enable_node_info_signatures: Some(true), - ..DiscoveryConfig::default() - }); - let (builder, server) = Builder::new(create_test_channel().1).config(config).build(); + let mut config = P2pConfig::default(); + let (builder, server) = Builder::new(create_test_channel().1) + .config(config.clone()) + .build(); let (network_1, key_1) = build_network_and_key(|router| router.add_rpc_service(server)); let (_event_loop_1, _handle_1) = builder.build(network_1.clone(), key_1); - let mut config = P2pConfig::default(); config.seed_peers.push(SeedPeer { peer_id: None, address: format!("/dns/localhost/udp/{}", network_1.local_addr().port()).parse()?, @@ -103,15 +111,13 @@ async fn make_connection_to_seed_peer() -> Result<()> { #[tokio::test] async fn make_connection_to_seed_peer_with_peer_id() -> Result<()> { - let config = P2pConfig::default().set_discovery_config(DiscoveryConfig { - enable_node_info_signatures: Some(true), - ..DiscoveryConfig::default() - }); - let (builder, server) = Builder::new(create_test_channel().1).config(config).build(); + let mut config = P2pConfig::default(); + let (builder, server) = Builder::new(create_test_channel().1) + .config(config.clone()) + .build(); let (network_1, key_1) = build_network_and_key(|router| router.add_rpc_service(server)); let (_event_loop_1, _handle_1) = builder.build(network_1.clone(), key_1); - let mut config = P2pConfig::default(); config.seed_peers.push(SeedPeer { peer_id: Some(network_1.peer_id()), address: format!("/dns/localhost/udp/{}", network_1.local_addr().port()).parse()?, @@ -140,15 +146,13 @@ async fn make_connection_to_seed_peer_with_peer_id() -> Result<()> { #[tokio::test(flavor = "current_thread", start_paused = true)] async fn three_nodes_can_connect_via_discovery() -> Result<()> { // Setup the peer that will be the seed for the other two - let config = P2pConfig::default().set_discovery_config(DiscoveryConfig { - enable_node_info_signatures: Some(true), - ..DiscoveryConfig::default() - }); - let (builder, server) = Builder::new(create_test_channel().1).config(config).build(); + let mut config = P2pConfig::default(); + let (builder, server) = Builder::new(create_test_channel().1) + .config(config.clone()) + .build(); let (network_1, key_1) = build_network_and_key(|router| router.add_rpc_service(server)); let (event_loop_1, _handle_1) = builder.build(network_1.clone(), key_1); - let mut config = P2pConfig::default(); config.seed_peers.push(SeedPeer { peer_id: Some(network_1.peer_id()), address: format!("/dns/localhost/udp/{}", network_1.local_addr().port()).parse()?, @@ -207,10 +211,7 @@ async fn three_nodes_can_connect_via_discovery() -> Result<()> { #[tokio::test(flavor = "current_thread", start_paused = true)] async fn peers_are_added_from_reconfig_channel() -> Result<()> { let (tx_1, rx_1) = create_test_channel(); - let config = P2pConfig::default().set_discovery_config(DiscoveryConfig { - enable_node_info_signatures: Some(true), - ..DiscoveryConfig::default() - }); + let config = P2pConfig::default(); let (builder, server) = Builder::new(rx_1).config(config.clone()).build(); let (network_1, key_1) = build_network_and_key(|router| router.add_rpc_service(server)); let (event_loop_1, _handle_1) = builder.build(network_1.clone(), key_1); @@ -299,7 +300,6 @@ async fn test_access_types() { let default_discovery_config = DiscoveryConfig { target_concurrent_connections: Some(100), interval_period_ms: Some(1000), - enable_node_info_signatures: Some(true), ..Default::default() }; let default_p2p_config = P2pConfig { @@ -310,7 +310,6 @@ async fn test_access_types() { target_concurrent_connections: Some(100), interval_period_ms: Some(1000), access_type: Some(AccessType::Private), - enable_node_info_signatures: Some(true), ..Default::default() }; diff --git a/crates/sui-node/src/lib.rs b/crates/sui-node/src/lib.rs index e0fc567bffda6..9fd3db906fd60 100644 --- a/crates/sui-node/src/lib.rs +++ b/crates/sui-node/src/lib.rs @@ -13,6 +13,7 @@ use arc_swap::ArcSwap; use fastcrypto_zkp::bn254::zk_login::JwkId; use fastcrypto_zkp::bn254::zk_login::OIDCProvider; use futures::TryFutureExt; +use mysten_network::server::SUI_TLS_SERVER_NAME; use prometheus::Registry; use std::collections::{BTreeSet, HashMap, HashSet}; use std::fmt; @@ -45,10 +46,8 @@ use sui_types::messages_consensus::AuthorityCapabilitiesV2; use sui_types::sui_system_state::SuiSystemState; use tap::tap::TapFallible; use tokio::runtime::Handle; -use tokio::sync::broadcast; -use tokio::sync::mpsc; -use tokio::sync::{watch, Mutex}; -use tokio::task::JoinHandle; +use tokio::sync::{broadcast, mpsc, watch, Mutex}; +use tokio::task::{JoinHandle, JoinSet}; use tower::ServiceBuilder; use tracing::{debug, error, warn}; use tracing::{error_span, info, Instrument}; @@ -150,10 +149,8 @@ pub struct ValidatorComponents { consensus_manager: ConsensusManager, consensus_store_pruner: ConsensusStorePruner, consensus_adapter: Arc, - // dropping this will eventually stop checkpoint tasks. The receiver side of this channel - // is copied into each checkpoint service task, and they are listening to any change to this - // channel. When the sender is dropped, a change is triggered and those tasks will exit. - checkpoint_service_exit: watch::Sender<()>, + // Keeping the handle to the checkpoint service tasks to shut them down during reconfiguration. + checkpoint_service_tasks: JoinSet<()>, checkpoint_metrics: Arc, sui_tx_validator_metrics: Arc, } @@ -1289,7 +1286,7 @@ impl SuiNode { sui_node_metrics: Arc, sui_tx_validator_metrics: Arc, ) -> Result { - let (checkpoint_service, checkpoint_service_exit) = Self::start_checkpoint_service( + let (checkpoint_service, checkpoint_service_tasks) = Self::start_checkpoint_service( config, consensus_adapter.clone(), checkpoint_store, @@ -1375,7 +1372,7 @@ impl SuiNode { consensus_manager, consensus_store_pruner, consensus_adapter, - checkpoint_service_exit, + checkpoint_service_tasks, checkpoint_metrics, sui_tx_validator_metrics, }) @@ -1390,7 +1387,7 @@ impl SuiNode { state_sync_handle: state_sync::Handle, accumulator: Weak, checkpoint_metrics: Arc, - ) -> (Arc, watch::Sender<()>) { + ) -> (Arc, JoinSet<()>) { let epoch_start_timestamp_ms = epoch_store.epoch_start_state().epoch_start_timestamp_ms(); let epoch_duration_ms = epoch_store.epoch_start_state().epoch_duration_ms(); @@ -1477,8 +1474,13 @@ impl SuiNode { server_builder = server_builder.add_service(ValidatorServer::new(validator_service)); + let tls_config = sui_tls::create_rustls_server_config( + config.network_key_pair().copy().private(), + SUI_TLS_SERVER_NAME.to_string(), + sui_tls::AllowAll, + ); let server = server_builder - .bind(config.network_address()) + .bind(config.network_address(), Some(tls_config)) .await .map_err(|err| anyhow!(err.to_string()))?; let local_addr = server.local_addr(); @@ -1684,16 +1686,28 @@ impl SuiNode { consensus_manager, consensus_store_pruner, consensus_adapter, - checkpoint_service_exit, + mut checkpoint_service_tasks, checkpoint_metrics, sui_tx_validator_metrics, }) = self.validator_components.lock().await.take() { info!("Reconfiguring the validator."); - // Stop the old checkpoint service. - drop(checkpoint_service_exit); + // Cancel the old checkpoint service tasks. + // Waiting for checkpoint builder to finish gracefully is not possible, because it + // may wait on transactions while consensus on peers have already shut down. + checkpoint_service_tasks.abort_all(); + while let Some(result) = checkpoint_service_tasks.join_next().await { + if let Err(err) = result { + if err.is_panic() { + std::panic::resume_unwind(err.into_panic()); + } + warn!("Error in checkpoint service task: {:?}", err); + } + } + info!("Checkpoint service has shut down."); consensus_manager.shutdown().await; + info!("Consensus has shut down."); let new_epoch_store = self .reconfigure_state( @@ -1704,6 +1718,7 @@ impl SuiNode { accumulator.clone(), ) .await; + info!("Epoch store finished reconfiguration."); // No other components should be holding a strong reference to state accumulator // at this point. Confirm here before we swap in the new accumulator. diff --git a/crates/sui-node/src/main.rs b/crates/sui-node/src/main.rs index 167d08ddbf3b7..26db36f28d810 100644 --- a/crates/sui-node/src/main.rs +++ b/crates/sui-node/src/main.rs @@ -47,8 +47,8 @@ fn main() { // TODO: re-enable after we figure out how to eliminate crashes in prod because of this. // ProtocolConfig::poison_get_for_min_version(); - move_vm_profiler::gas_profiler_feature_enabled! { - panic!("Cannot run the sui-node binary with gas-profiler feature enabled"); + move_vm_profiler::tracing_feature_enabled! { + panic!("Cannot run the sui-node binary with tracing feature enabled"); } let args = Args::parse(); diff --git a/crates/sui-open-rpc/Cargo.toml b/crates/sui-open-rpc/Cargo.toml index e6000fe18d0e2..0326794bec69e 100644 --- a/crates/sui-open-rpc/Cargo.toml +++ b/crates/sui-open-rpc/Cargo.toml @@ -21,7 +21,7 @@ anyhow.workspace = true clap.workspace = true pretty_assertions.workspace = true tokio = { workspace = true, features = ["full"] } -fastcrypto = { workspace = true } +fastcrypto.workspace = true sui-json-rpc.workspace = true sui-json-rpc-api.workspace = true sui-json-rpc-types.workspace = true diff --git a/crates/sui-open-rpc/spec/openrpc.json b/crates/sui-open-rpc/spec/openrpc.json index 251b8df3073d9..add1b298f6e2d 100644 --- a/crates/sui-open-rpc/spec/openrpc.json +++ b/crates/sui-open-rpc/spec/openrpc.json @@ -12,7 +12,7 @@ "name": "Apache-2.0", "url": "https://raw.githubusercontent.com/MystenLabs/sui/main/LICENSE" }, - "version": "1.36.1" + "version": "1.37.1" }, "methods": [ { @@ -1293,7 +1293,7 @@ "name": "Result", "value": { "minSupportedProtocolVersion": "1", - "maxSupportedProtocolVersion": "65", + "maxSupportedProtocolVersion": "68", "protocolVersion": "6", "featureFlags": { "accept_zklogin_in_multisig": false, @@ -1310,6 +1310,7 @@ "disable_invariant_violation_check_in_swap_loc": false, "disallow_adding_abilities_on_upgrade": false, "disallow_change_struct_type_params_on_upgrade": false, + "disallow_new_modules_in_deps_only_packages": false, "enable_coin_deny_list": false, "enable_coin_deny_list_v2": false, "enable_effects_v2": false, @@ -1353,6 +1354,7 @@ "soft_bundle": false, "throughput_aware_consensus_submission": false, "txn_base_cost_as_multiplier": false, + "uncompressed_g1_group_elements": false, "upgraded_multisig_supported": false, "validate_identifier_inputs": false, "verify_legacy_zklogin_address": false, @@ -1592,6 +1594,7 @@ "u64": "2" }, "execution_version": null, + "gas_budget_based_txn_cost_absolute_cap_commit_count": null, "gas_budget_based_txn_cost_cap_factor": null, "gas_model_version": { "u64": "5" @@ -1630,6 +1633,7 @@ "group_ops_bls12381_g1_msm_base_cost_per_input": null, "group_ops_bls12381_g1_mul_cost": null, "group_ops_bls12381_g1_sub_cost": null, + "group_ops_bls12381_g1_to_uncompressed_g1_cost": null, "group_ops_bls12381_g2_add_cost": null, "group_ops_bls12381_g2_div_cost": null, "group_ops_bls12381_g2_hash_to_base_cost": null, @@ -1648,6 +1652,10 @@ "group_ops_bls12381_scalar_div_cost": null, "group_ops_bls12381_scalar_mul_cost": null, "group_ops_bls12381_scalar_sub_cost": null, + "group_ops_bls12381_uncompressed_g1_sum_base_cost": null, + "group_ops_bls12381_uncompressed_g1_sum_cost_per_term": null, + "group_ops_bls12381_uncompressed_g1_sum_max_terms": null, + "group_ops_bls12381_uncompressed_g1_to_g1_cost": null, "hash_blake2b256_cost_base": { "u64": "52" }, @@ -1681,6 +1689,7 @@ "hmac_hmac_sha3_256_input_cost_per_byte": { "u64": "2" }, + "max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit": null, "max_accumulated_txn_cost_per_object_in_mysticeti_commit": null, "max_accumulated_txn_cost_per_object_in_narwhal_commit": null, "max_age_of_jwk_in_epochs": null, @@ -1811,6 +1820,7 @@ "max_tx_size_bytes": { "u64": "131072" }, + "max_txn_cost_overage_per_object_in_commit": null, "max_type_argument_depth": { "u32": "16" }, @@ -10714,7 +10724,7 @@ "TransactionFilter": { "oneOf": [ { - "description": "Query by checkpoint.", + "description": "CURRENTLY NOT SUPPORTED. Query by checkpoint.", "type": "object", "required": [ "Checkpoint" @@ -10850,7 +10860,7 @@ "additionalProperties": false }, { - "description": "Query txs that have a given address as sender or recipient.", + "description": "CURRENTLY NOT SUPPORTED. Query txs that have a given address as sender or recipient.", "type": "object", "required": [ "FromOrToAddress" diff --git a/crates/sui-protocol-config/src/lib.rs b/crates/sui-protocol-config/src/lib.rs index 835463599f7b8..caccb8d70d662 100644 --- a/crates/sui-protocol-config/src/lib.rs +++ b/crates/sui-protocol-config/src/lib.rs @@ -18,7 +18,7 @@ use tracing::{info, warn}; /// The minimum and maximum protocol versions supported by this build. const MIN_PROTOCOL_VERSION: u64 = 1; -const MAX_PROTOCOL_VERSION: u64 = 65; +const MAX_PROTOCOL_VERSION: u64 = 68; // Record history of protocol version allocations here: // @@ -185,7 +185,16 @@ const MAX_PROTOCOL_VERSION: u64 = 65; // Add feature flag for Mysticeti fastpath. // Version 62: Makes the event's sending module package upgrade-aware. // Version 63: Enable gas based congestion control in consensus commit. -// Version 64: Switch to distributed vote scoring in consensus in mainnet +// Version 64: Revert congestion control change. +// Version 65: Enable distributed vote scoring in mainnet. +// Version 66: Revert distributed vote scoring in mainnet. +// Framework fix for fungible staking book-keeping. +// Version 67: Re-enable distributed vote scoring in mainnet. +// Version 68: Add G1Uncompressed group to group ops. +// Update to Move stdlib. +// Enable gas based congestion control with overage. +// Further reduce minimum number of random beacon shares. +// Disallow adding new modules in `deps-only` packages. #[derive(Copy, Clone, Debug, Hash, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] pub struct ProtocolVersion(u64); @@ -548,6 +557,13 @@ struct FeatureFlags { // Makes the event's sending module version-aware. #[serde(skip_serializing_if = "is_false")] relocate_event_module: bool, + + // Enable uncompressed group elements in BLS123-81 G1 + #[serde(skip_serializing_if = "is_false")] + uncompressed_g1_group_elements: bool, + + #[serde(skip_serializing_if = "is_false")] + disallow_new_modules_in_deps_only_packages: bool, } fn is_false(b: &bool) -> bool { @@ -1137,6 +1153,11 @@ pub struct ProtocolConfig { group_ops_bls12381_g2_msm_base_cost_per_input: Option, group_ops_bls12381_msm_max_len: Option, group_ops_bls12381_pairing_cost: Option, + group_ops_bls12381_g1_to_uncompressed_g1_cost: Option, + group_ops_bls12381_uncompressed_g1_to_g1_cost: Option, + group_ops_bls12381_uncompressed_g1_sum_base_cost: Option, + group_ops_bls12381_uncompressed_g1_sum_cost_per_term: Option, + group_ops_bls12381_uncompressed_g1_sum_max_terms: Option, // hmac::hmac_sha3_256 hmac_hmac_sha3_256_cost_base: Option, @@ -1235,16 +1256,17 @@ pub struct ProtocolConfig { /// The maximum number of transactions included in a consensus block. consensus_max_num_transactions_in_block: Option, - /// The max accumulated txn execution cost per object in a Narwhal commit. Transactions - /// in a checkpoint will be deferred once their touch shared objects hit this limit. - /// This config is meant to be used when consensus protocol is Narwhal, where each - /// consensus commit corresponding to 1 checkpoint (or 2 if randomness is enabled) + /// DEPRECATED. Do not use. max_accumulated_txn_cost_per_object_in_narwhal_commit: Option, /// The max number of consensus rounds a transaction can be deferred due to shared object congestion. /// Transactions will be cancelled after this many rounds. max_deferral_rounds_for_congestion_control: Option, + /// If >0, congestion control will allow up to one transaction per object to exceed + /// the configured maximum accumulated cost by the given amount. + max_txn_cost_overage_per_object_in_commit: Option, + /// Minimum interval of commit timestamps between consecutive checkpoints. min_checkpoint_interval_ms: Option, @@ -1260,11 +1282,16 @@ pub struct ProtocolConfig { bridge_should_try_to_finalize_committee: Option, /// The max accumulated txn execution cost per object in a mysticeti. Transactions - /// in a commit will be deferred once their touch shared objects hit this limit. + /// in a commit will be deferred once their touch shared objects hit this limit, + /// unless the selected congestion control mode allows overage. /// This config plays the same role as `max_accumulated_txn_cost_per_object_in_narwhal_commit` /// but for mysticeti commits due to that mysticeti has higher commit rate. max_accumulated_txn_cost_per_object_in_mysticeti_commit: Option, + /// As above, but separate per-commit budget for transactions that use randomness. + /// If not configured, uses the setting for `max_accumulated_txn_cost_per_object_in_mysticeti_commit`. + max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit: Option, + /// Configures the garbage collection depth for consensus. When is unset or `0` then the garbage collection /// is disabled. consensus_gc_depth: Option, @@ -1273,6 +1300,10 @@ pub struct ProtocolConfig { /// object congestion control strategy. Basically the max transaction cost is calculated as /// (num of input object + num of commands) * this factor. gas_budget_based_txn_cost_cap_factor: Option, + + /// Adds an absolute cap on the maximum transaction cost when using TotalGasBudgetWithCap at + /// the given multiple of the per-commit budget. + gas_budget_based_txn_cost_absolute_cap_commit_count: Option, } // feature flags @@ -1628,6 +1659,15 @@ impl ProtocolConfig { pub fn relocate_event_module(&self) -> bool { self.feature_flags.relocate_event_module } + + pub fn uncompressed_g1_group_elements(&self) -> bool { + self.feature_flags.uncompressed_g1_group_elements + } + + pub fn disallow_new_modules_in_deps_only_packages(&self) -> bool { + self.feature_flags + .disallow_new_modules_in_deps_only_packages + } } #[cfg(not(msim))] @@ -2048,6 +2088,11 @@ impl ProtocolConfig { group_ops_bls12381_g2_msm_base_cost_per_input: None, group_ops_bls12381_msm_max_len: None, group_ops_bls12381_pairing_cost: None, + group_ops_bls12381_g1_to_uncompressed_g1_cost: None, + group_ops_bls12381_uncompressed_g1_to_g1_cost: None, + group_ops_bls12381_uncompressed_g1_sum_base_cost: None, + group_ops_bls12381_uncompressed_g1_sum_cost_per_term: None, + group_ops_bls12381_uncompressed_g1_sum_max_terms: None, // zklogin::check_zklogin_id check_zklogin_id_cost_base: None, @@ -2132,6 +2177,8 @@ impl ProtocolConfig { max_deferral_rounds_for_congestion_control: None, + max_txn_cost_overage_per_object_in_commit: None, + min_checkpoint_interval_ms: None, checkpoint_summary_version_specific_data: None, @@ -2142,9 +2189,13 @@ impl ProtocolConfig { max_accumulated_txn_cost_per_object_in_mysticeti_commit: None, + max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit: None, + consensus_gc_depth: None, gas_budget_based_txn_cost_cap_factor: None, + + gas_budget_based_txn_cost_absolute_cap_commit_count: None, // When adding a new constant, set it to None in the earliest version, like this: // new_constant: None, }; @@ -2844,6 +2895,43 @@ impl ProtocolConfig { cfg.feature_flags .consensus_distributed_vote_scoring_strategy = true; } + 66 => { + if chain == Chain::Mainnet { + // Revert the distributed vote scoring for mainnet (for one protocol upgrade) + cfg.feature_flags + .consensus_distributed_vote_scoring_strategy = false; + } + } + 67 => { + // Enable it once again. + cfg.feature_flags + .consensus_distributed_vote_scoring_strategy = true; + } + 68 => { + cfg.group_ops_bls12381_g1_to_uncompressed_g1_cost = Some(26); + cfg.group_ops_bls12381_uncompressed_g1_to_g1_cost = Some(52); + cfg.group_ops_bls12381_uncompressed_g1_sum_base_cost = Some(26); + cfg.group_ops_bls12381_uncompressed_g1_sum_cost_per_term = Some(13); + cfg.group_ops_bls12381_uncompressed_g1_sum_max_terms = Some(2000); + + if chain != Chain::Mainnet && chain != Chain::Testnet { + cfg.feature_flags.uncompressed_g1_group_elements = true; + } + + cfg.feature_flags.per_object_congestion_control_mode = + PerObjectCongestionControlMode::TotalGasBudgetWithCap; + cfg.gas_budget_based_txn_cost_cap_factor = Some(400_000); + cfg.max_accumulated_txn_cost_per_object_in_mysticeti_commit = Some(18_500_000); + cfg.max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit = + Some(3_700_000); // 20% of above + cfg.max_txn_cost_overage_per_object_in_commit = Some(u64::MAX); + cfg.gas_budget_based_txn_cost_absolute_cap_commit_count = Some(50); + + // Further reduce minimum number of random beacon shares. + cfg.random_beacon_reduction_lower_bound = Some(500); + + cfg.feature_flags.disallow_new_modules_in_deps_only_packages = true; + } // Use this template when making changes: // // // modify an existing constant. @@ -3009,6 +3097,11 @@ impl ProtocolConfig { pub fn set_gc_depth_for_testing(&mut self, val: u32) { self.consensus_gc_depth = Some(val); } + + pub fn set_disallow_new_modules_in_deps_only_packages_for_testing(&mut self, val: bool) { + self.feature_flags + .disallow_new_modules_in_deps_only_packages = val; + } } type OverrideFn = dyn Fn(ProtocolVersion, ProtocolConfig) -> ProtocolConfig + Send; diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_66.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_66.snap new file mode 100644 index 0000000000000..07a447b25d844 --- /dev/null +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_66.snap @@ -0,0 +1,329 @@ +--- +source: crates/sui-protocol-config/src/lib.rs +expression: "ProtocolConfig::get_for_version(cur, *chain_id)" +--- +version: 66 +feature_flags: + package_upgrades: true + commit_root_state_digest: true + advance_epoch_start_time_in_safe_mode: true + loaded_child_objects_fixed: true + missing_type_is_compatibility_error: true + scoring_decision_with_validity_cutoff: true + consensus_order_end_of_epoch_last: true + disallow_adding_abilities_on_upgrade: true + disable_invariant_violation_check_in_swap_loc: true + advance_to_highest_supported_protocol_version: true + ban_entry_init: true + package_digest_hash_module: true + disallow_change_struct_type_params_on_upgrade: true + no_extraneous_module_bytes: true + narwhal_versioned_metadata: true + zklogin_auth: true + consensus_transaction_ordering: ByGasPrice + simplified_unwrap_then_delete: true + upgraded_multisig_supported: true + txn_base_cost_as_multiplier: true + shared_object_deletion: true + narwhal_new_leader_election_schedule: true + loaded_child_object_format: true + enable_jwk_consensus_updates: true + end_of_epoch_transaction_supported: true + simple_conservation_checks: true + loaded_child_object_format_type: true + receive_objects: true + random_beacon: true + bridge: true + enable_effects_v2: true + narwhal_certificate_v2: true + verify_legacy_zklogin_address: true + recompute_has_public_transfer_in_execution: true + accept_zklogin_in_multisig: true + include_consensus_digest_in_prologue: true + hardened_otw_check: true + allow_receiving_object_id: true + enable_coin_deny_list: true + enable_group_ops_native_functions: true + reject_mutable_random_on_entry_functions: true + per_object_congestion_control_mode: TotalTxCount + consensus_choice: Mysticeti + consensus_network: Tonic + zklogin_max_epoch_upper_bound_delta: 30 + mysticeti_leader_scoring_and_schedule: true + reshare_at_same_initial_version: true + resolve_abort_locations_to_package_id: true + mysticeti_use_committed_subdag_digest: true + record_consensus_determined_version_assignments_in_prologue: true + fresh_vm_on_framework_upgrade: true + prepend_prologue_tx_in_consensus_commit_in_checkpoints: true + mysticeti_num_leaders_per_round: 1 + soft_bundle: true + enable_coin_deny_list_v2: true + rethrow_serialization_type_layout_errors: true + consensus_round_prober: true + validate_identifier_inputs: true + relocate_event_module: true +max_tx_size_bytes: 131072 +max_input_objects: 2048 +max_size_written_objects: 5000000 +max_size_written_objects_system_tx: 50000000 +max_serialized_tx_effects_size_bytes: 524288 +max_serialized_tx_effects_size_bytes_system_tx: 8388608 +max_gas_payment_objects: 256 +max_modules_in_publish: 64 +max_package_dependencies: 32 +max_arguments: 512 +max_type_arguments: 16 +max_type_argument_depth: 16 +max_pure_argument_size: 16384 +max_programmable_tx_commands: 1024 +move_binary_format_version: 7 +min_move_binary_format_version: 6 +binary_module_handles: 100 +binary_struct_handles: 300 +binary_function_handles: 1500 +binary_function_instantiations: 750 +binary_signatures: 1000 +binary_constant_pool: 4000 +binary_identifiers: 10000 +binary_address_identifiers: 100 +binary_struct_defs: 200 +binary_struct_def_instantiations: 100 +binary_function_defs: 1000 +binary_field_handles: 500 +binary_field_instantiations: 250 +binary_friend_decls: 100 +max_move_object_size: 256000 +max_move_package_size: 102400 +max_publish_or_upgrade_per_ptb: 5 +max_tx_gas: 50000000000 +max_gas_price: 100000 +max_gas_computation_bucket: 5000000 +gas_rounding_step: 1000 +max_loop_depth: 5 +max_generic_instantiation_length: 32 +max_function_parameters: 128 +max_basic_blocks: 1024 +max_value_stack_size: 1024 +max_type_nodes: 256 +max_push_size: 10000 +max_struct_definitions: 200 +max_function_definitions: 1000 +max_fields_in_struct: 32 +max_dependency_depth: 100 +max_num_event_emit: 1024 +max_num_new_move_object_ids: 2048 +max_num_new_move_object_ids_system_tx: 32768 +max_num_deleted_move_object_ids: 2048 +max_num_deleted_move_object_ids_system_tx: 32768 +max_num_transferred_move_object_ids: 2048 +max_num_transferred_move_object_ids_system_tx: 32768 +max_event_emit_size: 256000 +max_event_emit_size_total: 65536000 +max_move_vector_len: 262144 +max_move_identifier_len: 128 +max_move_value_depth: 128 +max_back_edges_per_function: 10000 +max_back_edges_per_module: 10000 +max_verifier_meter_ticks_per_function: 16000000 +max_meter_ticks_per_module: 16000000 +max_meter_ticks_per_package: 16000000 +object_runtime_max_num_cached_objects: 1000 +object_runtime_max_num_cached_objects_system_tx: 16000 +object_runtime_max_num_store_entries: 1000 +object_runtime_max_num_store_entries_system_tx: 16000 +base_tx_cost_fixed: 1000 +package_publish_cost_fixed: 1000 +base_tx_cost_per_byte: 0 +package_publish_cost_per_byte: 80 +obj_access_cost_read_per_byte: 15 +obj_access_cost_mutate_per_byte: 40 +obj_access_cost_delete_per_byte: 40 +obj_access_cost_verify_per_byte: 200 +max_type_to_layout_nodes: 512 +gas_model_version: 8 +obj_data_cost_refundable: 100 +obj_metadata_cost_non_refundable: 50 +storage_rebate_rate: 9900 +storage_fund_reinvest_rate: 500 +reward_slashing_rate: 10000 +storage_gas_price: 76 +max_transactions_per_checkpoint: 10000 +max_checkpoint_size_bytes: 31457280 +buffer_stake_for_protocol_upgrade_bps: 5000 +address_from_bytes_cost_base: 52 +address_to_u256_cost_base: 52 +address_from_u256_cost_base: 52 +config_read_setting_impl_cost_base: 100 +config_read_setting_impl_cost_per_byte: 40 +dynamic_field_hash_type_and_key_cost_base: 100 +dynamic_field_hash_type_and_key_type_cost_per_byte: 2 +dynamic_field_hash_type_and_key_value_cost_per_byte: 2 +dynamic_field_hash_type_and_key_type_tag_cost_per_byte: 2 +dynamic_field_add_child_object_cost_base: 100 +dynamic_field_add_child_object_type_cost_per_byte: 10 +dynamic_field_add_child_object_value_cost_per_byte: 10 +dynamic_field_add_child_object_struct_tag_cost_per_byte: 10 +dynamic_field_borrow_child_object_cost_base: 100 +dynamic_field_borrow_child_object_child_ref_cost_per_byte: 10 +dynamic_field_borrow_child_object_type_cost_per_byte: 10 +dynamic_field_remove_child_object_cost_base: 100 +dynamic_field_remove_child_object_child_cost_per_byte: 2 +dynamic_field_remove_child_object_type_cost_per_byte: 2 +dynamic_field_has_child_object_cost_base: 100 +dynamic_field_has_child_object_with_ty_cost_base: 100 +dynamic_field_has_child_object_with_ty_type_cost_per_byte: 2 +dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: 2 +event_emit_cost_base: 52 +event_emit_value_size_derivation_cost_per_byte: 2 +event_emit_tag_size_derivation_cost_per_byte: 5 +event_emit_output_cost_per_byte: 10 +object_borrow_uid_cost_base: 52 +object_delete_impl_cost_base: 52 +object_record_new_uid_cost_base: 52 +transfer_transfer_internal_cost_base: 52 +transfer_freeze_object_cost_base: 52 +transfer_share_object_cost_base: 52 +transfer_receive_object_cost_base: 52 +tx_context_derive_id_cost_base: 52 +types_is_one_time_witness_cost_base: 52 +types_is_one_time_witness_type_tag_cost_per_byte: 2 +types_is_one_time_witness_type_cost_per_byte: 2 +validator_validate_metadata_cost_base: 52 +validator_validate_metadata_data_cost_per_byte: 2 +crypto_invalid_arguments_cost: 100 +bls12381_bls12381_min_sig_verify_cost_base: 52 +bls12381_bls12381_min_sig_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_sig_verify_msg_cost_per_block: 2 +bls12381_bls12381_min_pk_verify_cost_base: 52 +bls12381_bls12381_min_pk_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_pk_verify_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_keccak256_cost_base: 52 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_sha256_cost_base: 52 +ecdsa_k1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_k1_decompress_pubkey_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_k1_secp256k1_verify_sha256_cost_base: 52 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_keccak256_cost_base: 52 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_sha256_cost_base: 52 +ecdsa_r1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_keccak256_cost_base: 52 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_sha256_cost_base: 52 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_block: 2 +ecvrf_ecvrf_verify_cost_base: 52 +ecvrf_ecvrf_verify_alpha_string_cost_per_byte: 2 +ecvrf_ecvrf_verify_alpha_string_cost_per_block: 2 +ed25519_ed25519_verify_cost_base: 52 +ed25519_ed25519_verify_msg_cost_per_byte: 2 +ed25519_ed25519_verify_msg_cost_per_block: 2 +groth16_prepare_verifying_key_bls12381_cost_base: 52 +groth16_prepare_verifying_key_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bn254_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_public_input_cost_per_byte: 2 +hash_blake2b256_cost_base: 52 +hash_blake2b256_data_cost_per_byte: 2 +hash_blake2b256_data_cost_per_block: 2 +hash_keccak256_cost_base: 52 +hash_keccak256_data_cost_per_byte: 2 +hash_keccak256_data_cost_per_block: 2 +group_ops_bls12381_decode_scalar_cost: 52 +group_ops_bls12381_decode_g1_cost: 52 +group_ops_bls12381_decode_g2_cost: 52 +group_ops_bls12381_decode_gt_cost: 52 +group_ops_bls12381_scalar_add_cost: 52 +group_ops_bls12381_g1_add_cost: 52 +group_ops_bls12381_g2_add_cost: 52 +group_ops_bls12381_gt_add_cost: 52 +group_ops_bls12381_scalar_sub_cost: 52 +group_ops_bls12381_g1_sub_cost: 52 +group_ops_bls12381_g2_sub_cost: 52 +group_ops_bls12381_gt_sub_cost: 52 +group_ops_bls12381_scalar_mul_cost: 52 +group_ops_bls12381_g1_mul_cost: 52 +group_ops_bls12381_g2_mul_cost: 52 +group_ops_bls12381_gt_mul_cost: 52 +group_ops_bls12381_scalar_div_cost: 52 +group_ops_bls12381_g1_div_cost: 52 +group_ops_bls12381_g2_div_cost: 52 +group_ops_bls12381_gt_div_cost: 52 +group_ops_bls12381_g1_hash_to_base_cost: 52 +group_ops_bls12381_g2_hash_to_base_cost: 52 +group_ops_bls12381_g1_hash_to_cost_per_byte: 2 +group_ops_bls12381_g2_hash_to_cost_per_byte: 2 +group_ops_bls12381_g1_msm_base_cost: 52 +group_ops_bls12381_g2_msm_base_cost: 52 +group_ops_bls12381_g1_msm_base_cost_per_input: 52 +group_ops_bls12381_g2_msm_base_cost_per_input: 52 +group_ops_bls12381_msm_max_len: 32 +group_ops_bls12381_pairing_cost: 52 +hmac_hmac_sha3_256_cost_base: 52 +hmac_hmac_sha3_256_input_cost_per_byte: 2 +hmac_hmac_sha3_256_input_cost_per_block: 2 +check_zklogin_id_cost_base: 200 +check_zklogin_issuer_cost_base: 200 +bcs_per_byte_serialized_cost: 2 +bcs_legacy_min_output_size_cost: 1 +bcs_failure_cost: 52 +hash_sha2_256_base_cost: 52 +hash_sha2_256_per_byte_cost: 2 +hash_sha2_256_legacy_min_input_len_cost: 1 +hash_sha3_256_base_cost: 52 +hash_sha3_256_per_byte_cost: 2 +hash_sha3_256_legacy_min_input_len_cost: 1 +type_name_get_base_cost: 52 +type_name_get_per_byte_cost: 2 +string_check_utf8_base_cost: 52 +string_check_utf8_per_byte_cost: 2 +string_is_char_boundary_base_cost: 52 +string_sub_string_base_cost: 52 +string_sub_string_per_byte_cost: 2 +string_index_of_base_cost: 52 +string_index_of_per_byte_pattern_cost: 2 +string_index_of_per_byte_searched_cost: 2 +vector_empty_base_cost: 52 +vector_length_base_cost: 52 +vector_push_back_base_cost: 52 +vector_push_back_legacy_per_abstract_memory_unit_cost: 2 +vector_borrow_base_cost: 52 +vector_pop_back_base_cost: 52 +vector_destroy_empty_base_cost: 52 +vector_swap_base_cost: 52 +debug_print_base_cost: 52 +debug_print_stack_trace_base_cost: 52 +execution_version: 3 +consensus_bad_nodes_stake_threshold: 20 +max_jwk_votes_per_validator_per_epoch: 240 +max_age_of_jwk_in_epochs: 1 +random_beacon_reduction_allowed_delta: 800 +random_beacon_reduction_lower_bound: 700 +random_beacon_dkg_timeout_round: 3000 +random_beacon_min_round_interval_ms: 500 +random_beacon_dkg_version: 1 +consensus_max_transaction_size_bytes: 262144 +consensus_max_transactions_in_block_bytes: 524288 +consensus_max_num_transactions_in_block: 512 +max_accumulated_txn_cost_per_object_in_narwhal_commit: 40 +max_deferral_rounds_for_congestion_control: 10 +min_checkpoint_interval_ms: 200 +checkpoint_summary_version_specific_data: 1 +max_soft_bundle_size: 5 +bridge_should_try_to_finalize_committee: true +max_accumulated_txn_cost_per_object_in_mysticeti_commit: 3 +gas_budget_based_txn_cost_cap_factor: 400000 + diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_67.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_67.snap new file mode 100644 index 0000000000000..f050b4e16302d --- /dev/null +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_67.snap @@ -0,0 +1,330 @@ +--- +source: crates/sui-protocol-config/src/lib.rs +expression: "ProtocolConfig::get_for_version(cur, *chain_id)" +--- +version: 67 +feature_flags: + package_upgrades: true + commit_root_state_digest: true + advance_epoch_start_time_in_safe_mode: true + loaded_child_objects_fixed: true + missing_type_is_compatibility_error: true + scoring_decision_with_validity_cutoff: true + consensus_order_end_of_epoch_last: true + disallow_adding_abilities_on_upgrade: true + disable_invariant_violation_check_in_swap_loc: true + advance_to_highest_supported_protocol_version: true + ban_entry_init: true + package_digest_hash_module: true + disallow_change_struct_type_params_on_upgrade: true + no_extraneous_module_bytes: true + narwhal_versioned_metadata: true + zklogin_auth: true + consensus_transaction_ordering: ByGasPrice + simplified_unwrap_then_delete: true + upgraded_multisig_supported: true + txn_base_cost_as_multiplier: true + shared_object_deletion: true + narwhal_new_leader_election_schedule: true + loaded_child_object_format: true + enable_jwk_consensus_updates: true + end_of_epoch_transaction_supported: true + simple_conservation_checks: true + loaded_child_object_format_type: true + receive_objects: true + random_beacon: true + bridge: true + enable_effects_v2: true + narwhal_certificate_v2: true + verify_legacy_zklogin_address: true + recompute_has_public_transfer_in_execution: true + accept_zklogin_in_multisig: true + include_consensus_digest_in_prologue: true + hardened_otw_check: true + allow_receiving_object_id: true + enable_coin_deny_list: true + enable_group_ops_native_functions: true + reject_mutable_random_on_entry_functions: true + per_object_congestion_control_mode: TotalTxCount + consensus_choice: Mysticeti + consensus_network: Tonic + zklogin_max_epoch_upper_bound_delta: 30 + mysticeti_leader_scoring_and_schedule: true + reshare_at_same_initial_version: true + resolve_abort_locations_to_package_id: true + mysticeti_use_committed_subdag_digest: true + record_consensus_determined_version_assignments_in_prologue: true + fresh_vm_on_framework_upgrade: true + prepend_prologue_tx_in_consensus_commit_in_checkpoints: true + mysticeti_num_leaders_per_round: 1 + soft_bundle: true + enable_coin_deny_list_v2: true + rethrow_serialization_type_layout_errors: true + consensus_distributed_vote_scoring_strategy: true + consensus_round_prober: true + validate_identifier_inputs: true + relocate_event_module: true +max_tx_size_bytes: 131072 +max_input_objects: 2048 +max_size_written_objects: 5000000 +max_size_written_objects_system_tx: 50000000 +max_serialized_tx_effects_size_bytes: 524288 +max_serialized_tx_effects_size_bytes_system_tx: 8388608 +max_gas_payment_objects: 256 +max_modules_in_publish: 64 +max_package_dependencies: 32 +max_arguments: 512 +max_type_arguments: 16 +max_type_argument_depth: 16 +max_pure_argument_size: 16384 +max_programmable_tx_commands: 1024 +move_binary_format_version: 7 +min_move_binary_format_version: 6 +binary_module_handles: 100 +binary_struct_handles: 300 +binary_function_handles: 1500 +binary_function_instantiations: 750 +binary_signatures: 1000 +binary_constant_pool: 4000 +binary_identifiers: 10000 +binary_address_identifiers: 100 +binary_struct_defs: 200 +binary_struct_def_instantiations: 100 +binary_function_defs: 1000 +binary_field_handles: 500 +binary_field_instantiations: 250 +binary_friend_decls: 100 +max_move_object_size: 256000 +max_move_package_size: 102400 +max_publish_or_upgrade_per_ptb: 5 +max_tx_gas: 50000000000 +max_gas_price: 100000 +max_gas_computation_bucket: 5000000 +gas_rounding_step: 1000 +max_loop_depth: 5 +max_generic_instantiation_length: 32 +max_function_parameters: 128 +max_basic_blocks: 1024 +max_value_stack_size: 1024 +max_type_nodes: 256 +max_push_size: 10000 +max_struct_definitions: 200 +max_function_definitions: 1000 +max_fields_in_struct: 32 +max_dependency_depth: 100 +max_num_event_emit: 1024 +max_num_new_move_object_ids: 2048 +max_num_new_move_object_ids_system_tx: 32768 +max_num_deleted_move_object_ids: 2048 +max_num_deleted_move_object_ids_system_tx: 32768 +max_num_transferred_move_object_ids: 2048 +max_num_transferred_move_object_ids_system_tx: 32768 +max_event_emit_size: 256000 +max_event_emit_size_total: 65536000 +max_move_vector_len: 262144 +max_move_identifier_len: 128 +max_move_value_depth: 128 +max_back_edges_per_function: 10000 +max_back_edges_per_module: 10000 +max_verifier_meter_ticks_per_function: 16000000 +max_meter_ticks_per_module: 16000000 +max_meter_ticks_per_package: 16000000 +object_runtime_max_num_cached_objects: 1000 +object_runtime_max_num_cached_objects_system_tx: 16000 +object_runtime_max_num_store_entries: 1000 +object_runtime_max_num_store_entries_system_tx: 16000 +base_tx_cost_fixed: 1000 +package_publish_cost_fixed: 1000 +base_tx_cost_per_byte: 0 +package_publish_cost_per_byte: 80 +obj_access_cost_read_per_byte: 15 +obj_access_cost_mutate_per_byte: 40 +obj_access_cost_delete_per_byte: 40 +obj_access_cost_verify_per_byte: 200 +max_type_to_layout_nodes: 512 +gas_model_version: 8 +obj_data_cost_refundable: 100 +obj_metadata_cost_non_refundable: 50 +storage_rebate_rate: 9900 +storage_fund_reinvest_rate: 500 +reward_slashing_rate: 10000 +storage_gas_price: 76 +max_transactions_per_checkpoint: 10000 +max_checkpoint_size_bytes: 31457280 +buffer_stake_for_protocol_upgrade_bps: 5000 +address_from_bytes_cost_base: 52 +address_to_u256_cost_base: 52 +address_from_u256_cost_base: 52 +config_read_setting_impl_cost_base: 100 +config_read_setting_impl_cost_per_byte: 40 +dynamic_field_hash_type_and_key_cost_base: 100 +dynamic_field_hash_type_and_key_type_cost_per_byte: 2 +dynamic_field_hash_type_and_key_value_cost_per_byte: 2 +dynamic_field_hash_type_and_key_type_tag_cost_per_byte: 2 +dynamic_field_add_child_object_cost_base: 100 +dynamic_field_add_child_object_type_cost_per_byte: 10 +dynamic_field_add_child_object_value_cost_per_byte: 10 +dynamic_field_add_child_object_struct_tag_cost_per_byte: 10 +dynamic_field_borrow_child_object_cost_base: 100 +dynamic_field_borrow_child_object_child_ref_cost_per_byte: 10 +dynamic_field_borrow_child_object_type_cost_per_byte: 10 +dynamic_field_remove_child_object_cost_base: 100 +dynamic_field_remove_child_object_child_cost_per_byte: 2 +dynamic_field_remove_child_object_type_cost_per_byte: 2 +dynamic_field_has_child_object_cost_base: 100 +dynamic_field_has_child_object_with_ty_cost_base: 100 +dynamic_field_has_child_object_with_ty_type_cost_per_byte: 2 +dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: 2 +event_emit_cost_base: 52 +event_emit_value_size_derivation_cost_per_byte: 2 +event_emit_tag_size_derivation_cost_per_byte: 5 +event_emit_output_cost_per_byte: 10 +object_borrow_uid_cost_base: 52 +object_delete_impl_cost_base: 52 +object_record_new_uid_cost_base: 52 +transfer_transfer_internal_cost_base: 52 +transfer_freeze_object_cost_base: 52 +transfer_share_object_cost_base: 52 +transfer_receive_object_cost_base: 52 +tx_context_derive_id_cost_base: 52 +types_is_one_time_witness_cost_base: 52 +types_is_one_time_witness_type_tag_cost_per_byte: 2 +types_is_one_time_witness_type_cost_per_byte: 2 +validator_validate_metadata_cost_base: 52 +validator_validate_metadata_data_cost_per_byte: 2 +crypto_invalid_arguments_cost: 100 +bls12381_bls12381_min_sig_verify_cost_base: 52 +bls12381_bls12381_min_sig_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_sig_verify_msg_cost_per_block: 2 +bls12381_bls12381_min_pk_verify_cost_base: 52 +bls12381_bls12381_min_pk_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_pk_verify_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_keccak256_cost_base: 52 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_sha256_cost_base: 52 +ecdsa_k1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_k1_decompress_pubkey_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_k1_secp256k1_verify_sha256_cost_base: 52 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_keccak256_cost_base: 52 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_sha256_cost_base: 52 +ecdsa_r1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_keccak256_cost_base: 52 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_sha256_cost_base: 52 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_block: 2 +ecvrf_ecvrf_verify_cost_base: 52 +ecvrf_ecvrf_verify_alpha_string_cost_per_byte: 2 +ecvrf_ecvrf_verify_alpha_string_cost_per_block: 2 +ed25519_ed25519_verify_cost_base: 52 +ed25519_ed25519_verify_msg_cost_per_byte: 2 +ed25519_ed25519_verify_msg_cost_per_block: 2 +groth16_prepare_verifying_key_bls12381_cost_base: 52 +groth16_prepare_verifying_key_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bn254_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_public_input_cost_per_byte: 2 +hash_blake2b256_cost_base: 52 +hash_blake2b256_data_cost_per_byte: 2 +hash_blake2b256_data_cost_per_block: 2 +hash_keccak256_cost_base: 52 +hash_keccak256_data_cost_per_byte: 2 +hash_keccak256_data_cost_per_block: 2 +group_ops_bls12381_decode_scalar_cost: 52 +group_ops_bls12381_decode_g1_cost: 52 +group_ops_bls12381_decode_g2_cost: 52 +group_ops_bls12381_decode_gt_cost: 52 +group_ops_bls12381_scalar_add_cost: 52 +group_ops_bls12381_g1_add_cost: 52 +group_ops_bls12381_g2_add_cost: 52 +group_ops_bls12381_gt_add_cost: 52 +group_ops_bls12381_scalar_sub_cost: 52 +group_ops_bls12381_g1_sub_cost: 52 +group_ops_bls12381_g2_sub_cost: 52 +group_ops_bls12381_gt_sub_cost: 52 +group_ops_bls12381_scalar_mul_cost: 52 +group_ops_bls12381_g1_mul_cost: 52 +group_ops_bls12381_g2_mul_cost: 52 +group_ops_bls12381_gt_mul_cost: 52 +group_ops_bls12381_scalar_div_cost: 52 +group_ops_bls12381_g1_div_cost: 52 +group_ops_bls12381_g2_div_cost: 52 +group_ops_bls12381_gt_div_cost: 52 +group_ops_bls12381_g1_hash_to_base_cost: 52 +group_ops_bls12381_g2_hash_to_base_cost: 52 +group_ops_bls12381_g1_hash_to_cost_per_byte: 2 +group_ops_bls12381_g2_hash_to_cost_per_byte: 2 +group_ops_bls12381_g1_msm_base_cost: 52 +group_ops_bls12381_g2_msm_base_cost: 52 +group_ops_bls12381_g1_msm_base_cost_per_input: 52 +group_ops_bls12381_g2_msm_base_cost_per_input: 52 +group_ops_bls12381_msm_max_len: 32 +group_ops_bls12381_pairing_cost: 52 +hmac_hmac_sha3_256_cost_base: 52 +hmac_hmac_sha3_256_input_cost_per_byte: 2 +hmac_hmac_sha3_256_input_cost_per_block: 2 +check_zklogin_id_cost_base: 200 +check_zklogin_issuer_cost_base: 200 +bcs_per_byte_serialized_cost: 2 +bcs_legacy_min_output_size_cost: 1 +bcs_failure_cost: 52 +hash_sha2_256_base_cost: 52 +hash_sha2_256_per_byte_cost: 2 +hash_sha2_256_legacy_min_input_len_cost: 1 +hash_sha3_256_base_cost: 52 +hash_sha3_256_per_byte_cost: 2 +hash_sha3_256_legacy_min_input_len_cost: 1 +type_name_get_base_cost: 52 +type_name_get_per_byte_cost: 2 +string_check_utf8_base_cost: 52 +string_check_utf8_per_byte_cost: 2 +string_is_char_boundary_base_cost: 52 +string_sub_string_base_cost: 52 +string_sub_string_per_byte_cost: 2 +string_index_of_base_cost: 52 +string_index_of_per_byte_pattern_cost: 2 +string_index_of_per_byte_searched_cost: 2 +vector_empty_base_cost: 52 +vector_length_base_cost: 52 +vector_push_back_base_cost: 52 +vector_push_back_legacy_per_abstract_memory_unit_cost: 2 +vector_borrow_base_cost: 52 +vector_pop_back_base_cost: 52 +vector_destroy_empty_base_cost: 52 +vector_swap_base_cost: 52 +debug_print_base_cost: 52 +debug_print_stack_trace_base_cost: 52 +execution_version: 3 +consensus_bad_nodes_stake_threshold: 20 +max_jwk_votes_per_validator_per_epoch: 240 +max_age_of_jwk_in_epochs: 1 +random_beacon_reduction_allowed_delta: 800 +random_beacon_reduction_lower_bound: 700 +random_beacon_dkg_timeout_round: 3000 +random_beacon_min_round_interval_ms: 500 +random_beacon_dkg_version: 1 +consensus_max_transaction_size_bytes: 262144 +consensus_max_transactions_in_block_bytes: 524288 +consensus_max_num_transactions_in_block: 512 +max_accumulated_txn_cost_per_object_in_narwhal_commit: 40 +max_deferral_rounds_for_congestion_control: 10 +min_checkpoint_interval_ms: 200 +checkpoint_summary_version_specific_data: 1 +max_soft_bundle_size: 5 +bridge_should_try_to_finalize_committee: true +max_accumulated_txn_cost_per_object_in_mysticeti_commit: 3 +gas_budget_based_txn_cost_cap_factor: 400000 + diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_68.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_68.snap new file mode 100644 index 0000000000000..492aebef1a352 --- /dev/null +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Mainnet_version_68.snap @@ -0,0 +1,339 @@ +--- +source: crates/sui-protocol-config/src/lib.rs +expression: "ProtocolConfig::get_for_version(cur, *chain_id)" +--- +version: 68 +feature_flags: + package_upgrades: true + commit_root_state_digest: true + advance_epoch_start_time_in_safe_mode: true + loaded_child_objects_fixed: true + missing_type_is_compatibility_error: true + scoring_decision_with_validity_cutoff: true + consensus_order_end_of_epoch_last: true + disallow_adding_abilities_on_upgrade: true + disable_invariant_violation_check_in_swap_loc: true + advance_to_highest_supported_protocol_version: true + ban_entry_init: true + package_digest_hash_module: true + disallow_change_struct_type_params_on_upgrade: true + no_extraneous_module_bytes: true + narwhal_versioned_metadata: true + zklogin_auth: true + consensus_transaction_ordering: ByGasPrice + simplified_unwrap_then_delete: true + upgraded_multisig_supported: true + txn_base_cost_as_multiplier: true + shared_object_deletion: true + narwhal_new_leader_election_schedule: true + loaded_child_object_format: true + enable_jwk_consensus_updates: true + end_of_epoch_transaction_supported: true + simple_conservation_checks: true + loaded_child_object_format_type: true + receive_objects: true + random_beacon: true + bridge: true + enable_effects_v2: true + narwhal_certificate_v2: true + verify_legacy_zklogin_address: true + recompute_has_public_transfer_in_execution: true + accept_zklogin_in_multisig: true + include_consensus_digest_in_prologue: true + hardened_otw_check: true + allow_receiving_object_id: true + enable_coin_deny_list: true + enable_group_ops_native_functions: true + reject_mutable_random_on_entry_functions: true + per_object_congestion_control_mode: TotalGasBudgetWithCap + consensus_choice: Mysticeti + consensus_network: Tonic + zklogin_max_epoch_upper_bound_delta: 30 + mysticeti_leader_scoring_and_schedule: true + reshare_at_same_initial_version: true + resolve_abort_locations_to_package_id: true + mysticeti_use_committed_subdag_digest: true + record_consensus_determined_version_assignments_in_prologue: true + fresh_vm_on_framework_upgrade: true + prepend_prologue_tx_in_consensus_commit_in_checkpoints: true + mysticeti_num_leaders_per_round: 1 + soft_bundle: true + enable_coin_deny_list_v2: true + rethrow_serialization_type_layout_errors: true + consensus_distributed_vote_scoring_strategy: true + consensus_round_prober: true + validate_identifier_inputs: true + relocate_event_module: true + disallow_new_modules_in_deps_only_packages: true +max_tx_size_bytes: 131072 +max_input_objects: 2048 +max_size_written_objects: 5000000 +max_size_written_objects_system_tx: 50000000 +max_serialized_tx_effects_size_bytes: 524288 +max_serialized_tx_effects_size_bytes_system_tx: 8388608 +max_gas_payment_objects: 256 +max_modules_in_publish: 64 +max_package_dependencies: 32 +max_arguments: 512 +max_type_arguments: 16 +max_type_argument_depth: 16 +max_pure_argument_size: 16384 +max_programmable_tx_commands: 1024 +move_binary_format_version: 7 +min_move_binary_format_version: 6 +binary_module_handles: 100 +binary_struct_handles: 300 +binary_function_handles: 1500 +binary_function_instantiations: 750 +binary_signatures: 1000 +binary_constant_pool: 4000 +binary_identifiers: 10000 +binary_address_identifiers: 100 +binary_struct_defs: 200 +binary_struct_def_instantiations: 100 +binary_function_defs: 1000 +binary_field_handles: 500 +binary_field_instantiations: 250 +binary_friend_decls: 100 +max_move_object_size: 256000 +max_move_package_size: 102400 +max_publish_or_upgrade_per_ptb: 5 +max_tx_gas: 50000000000 +max_gas_price: 100000 +max_gas_computation_bucket: 5000000 +gas_rounding_step: 1000 +max_loop_depth: 5 +max_generic_instantiation_length: 32 +max_function_parameters: 128 +max_basic_blocks: 1024 +max_value_stack_size: 1024 +max_type_nodes: 256 +max_push_size: 10000 +max_struct_definitions: 200 +max_function_definitions: 1000 +max_fields_in_struct: 32 +max_dependency_depth: 100 +max_num_event_emit: 1024 +max_num_new_move_object_ids: 2048 +max_num_new_move_object_ids_system_tx: 32768 +max_num_deleted_move_object_ids: 2048 +max_num_deleted_move_object_ids_system_tx: 32768 +max_num_transferred_move_object_ids: 2048 +max_num_transferred_move_object_ids_system_tx: 32768 +max_event_emit_size: 256000 +max_event_emit_size_total: 65536000 +max_move_vector_len: 262144 +max_move_identifier_len: 128 +max_move_value_depth: 128 +max_back_edges_per_function: 10000 +max_back_edges_per_module: 10000 +max_verifier_meter_ticks_per_function: 16000000 +max_meter_ticks_per_module: 16000000 +max_meter_ticks_per_package: 16000000 +object_runtime_max_num_cached_objects: 1000 +object_runtime_max_num_cached_objects_system_tx: 16000 +object_runtime_max_num_store_entries: 1000 +object_runtime_max_num_store_entries_system_tx: 16000 +base_tx_cost_fixed: 1000 +package_publish_cost_fixed: 1000 +base_tx_cost_per_byte: 0 +package_publish_cost_per_byte: 80 +obj_access_cost_read_per_byte: 15 +obj_access_cost_mutate_per_byte: 40 +obj_access_cost_delete_per_byte: 40 +obj_access_cost_verify_per_byte: 200 +max_type_to_layout_nodes: 512 +gas_model_version: 8 +obj_data_cost_refundable: 100 +obj_metadata_cost_non_refundable: 50 +storage_rebate_rate: 9900 +storage_fund_reinvest_rate: 500 +reward_slashing_rate: 10000 +storage_gas_price: 76 +max_transactions_per_checkpoint: 10000 +max_checkpoint_size_bytes: 31457280 +buffer_stake_for_protocol_upgrade_bps: 5000 +address_from_bytes_cost_base: 52 +address_to_u256_cost_base: 52 +address_from_u256_cost_base: 52 +config_read_setting_impl_cost_base: 100 +config_read_setting_impl_cost_per_byte: 40 +dynamic_field_hash_type_and_key_cost_base: 100 +dynamic_field_hash_type_and_key_type_cost_per_byte: 2 +dynamic_field_hash_type_and_key_value_cost_per_byte: 2 +dynamic_field_hash_type_and_key_type_tag_cost_per_byte: 2 +dynamic_field_add_child_object_cost_base: 100 +dynamic_field_add_child_object_type_cost_per_byte: 10 +dynamic_field_add_child_object_value_cost_per_byte: 10 +dynamic_field_add_child_object_struct_tag_cost_per_byte: 10 +dynamic_field_borrow_child_object_cost_base: 100 +dynamic_field_borrow_child_object_child_ref_cost_per_byte: 10 +dynamic_field_borrow_child_object_type_cost_per_byte: 10 +dynamic_field_remove_child_object_cost_base: 100 +dynamic_field_remove_child_object_child_cost_per_byte: 2 +dynamic_field_remove_child_object_type_cost_per_byte: 2 +dynamic_field_has_child_object_cost_base: 100 +dynamic_field_has_child_object_with_ty_cost_base: 100 +dynamic_field_has_child_object_with_ty_type_cost_per_byte: 2 +dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: 2 +event_emit_cost_base: 52 +event_emit_value_size_derivation_cost_per_byte: 2 +event_emit_tag_size_derivation_cost_per_byte: 5 +event_emit_output_cost_per_byte: 10 +object_borrow_uid_cost_base: 52 +object_delete_impl_cost_base: 52 +object_record_new_uid_cost_base: 52 +transfer_transfer_internal_cost_base: 52 +transfer_freeze_object_cost_base: 52 +transfer_share_object_cost_base: 52 +transfer_receive_object_cost_base: 52 +tx_context_derive_id_cost_base: 52 +types_is_one_time_witness_cost_base: 52 +types_is_one_time_witness_type_tag_cost_per_byte: 2 +types_is_one_time_witness_type_cost_per_byte: 2 +validator_validate_metadata_cost_base: 52 +validator_validate_metadata_data_cost_per_byte: 2 +crypto_invalid_arguments_cost: 100 +bls12381_bls12381_min_sig_verify_cost_base: 52 +bls12381_bls12381_min_sig_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_sig_verify_msg_cost_per_block: 2 +bls12381_bls12381_min_pk_verify_cost_base: 52 +bls12381_bls12381_min_pk_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_pk_verify_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_keccak256_cost_base: 52 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_sha256_cost_base: 52 +ecdsa_k1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_k1_decompress_pubkey_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_k1_secp256k1_verify_sha256_cost_base: 52 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_keccak256_cost_base: 52 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_sha256_cost_base: 52 +ecdsa_r1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_keccak256_cost_base: 52 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_sha256_cost_base: 52 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_block: 2 +ecvrf_ecvrf_verify_cost_base: 52 +ecvrf_ecvrf_verify_alpha_string_cost_per_byte: 2 +ecvrf_ecvrf_verify_alpha_string_cost_per_block: 2 +ed25519_ed25519_verify_cost_base: 52 +ed25519_ed25519_verify_msg_cost_per_byte: 2 +ed25519_ed25519_verify_msg_cost_per_block: 2 +groth16_prepare_verifying_key_bls12381_cost_base: 52 +groth16_prepare_verifying_key_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bn254_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_public_input_cost_per_byte: 2 +hash_blake2b256_cost_base: 52 +hash_blake2b256_data_cost_per_byte: 2 +hash_blake2b256_data_cost_per_block: 2 +hash_keccak256_cost_base: 52 +hash_keccak256_data_cost_per_byte: 2 +hash_keccak256_data_cost_per_block: 2 +group_ops_bls12381_decode_scalar_cost: 52 +group_ops_bls12381_decode_g1_cost: 52 +group_ops_bls12381_decode_g2_cost: 52 +group_ops_bls12381_decode_gt_cost: 52 +group_ops_bls12381_scalar_add_cost: 52 +group_ops_bls12381_g1_add_cost: 52 +group_ops_bls12381_g2_add_cost: 52 +group_ops_bls12381_gt_add_cost: 52 +group_ops_bls12381_scalar_sub_cost: 52 +group_ops_bls12381_g1_sub_cost: 52 +group_ops_bls12381_g2_sub_cost: 52 +group_ops_bls12381_gt_sub_cost: 52 +group_ops_bls12381_scalar_mul_cost: 52 +group_ops_bls12381_g1_mul_cost: 52 +group_ops_bls12381_g2_mul_cost: 52 +group_ops_bls12381_gt_mul_cost: 52 +group_ops_bls12381_scalar_div_cost: 52 +group_ops_bls12381_g1_div_cost: 52 +group_ops_bls12381_g2_div_cost: 52 +group_ops_bls12381_gt_div_cost: 52 +group_ops_bls12381_g1_hash_to_base_cost: 52 +group_ops_bls12381_g2_hash_to_base_cost: 52 +group_ops_bls12381_g1_hash_to_cost_per_byte: 2 +group_ops_bls12381_g2_hash_to_cost_per_byte: 2 +group_ops_bls12381_g1_msm_base_cost: 52 +group_ops_bls12381_g2_msm_base_cost: 52 +group_ops_bls12381_g1_msm_base_cost_per_input: 52 +group_ops_bls12381_g2_msm_base_cost_per_input: 52 +group_ops_bls12381_msm_max_len: 32 +group_ops_bls12381_pairing_cost: 52 +group_ops_bls12381_g1_to_uncompressed_g1_cost: 26 +group_ops_bls12381_uncompressed_g1_to_g1_cost: 52 +group_ops_bls12381_uncompressed_g1_sum_base_cost: 26 +group_ops_bls12381_uncompressed_g1_sum_cost_per_term: 13 +group_ops_bls12381_uncompressed_g1_sum_max_terms: 2000 +hmac_hmac_sha3_256_cost_base: 52 +hmac_hmac_sha3_256_input_cost_per_byte: 2 +hmac_hmac_sha3_256_input_cost_per_block: 2 +check_zklogin_id_cost_base: 200 +check_zklogin_issuer_cost_base: 200 +bcs_per_byte_serialized_cost: 2 +bcs_legacy_min_output_size_cost: 1 +bcs_failure_cost: 52 +hash_sha2_256_base_cost: 52 +hash_sha2_256_per_byte_cost: 2 +hash_sha2_256_legacy_min_input_len_cost: 1 +hash_sha3_256_base_cost: 52 +hash_sha3_256_per_byte_cost: 2 +hash_sha3_256_legacy_min_input_len_cost: 1 +type_name_get_base_cost: 52 +type_name_get_per_byte_cost: 2 +string_check_utf8_base_cost: 52 +string_check_utf8_per_byte_cost: 2 +string_is_char_boundary_base_cost: 52 +string_sub_string_base_cost: 52 +string_sub_string_per_byte_cost: 2 +string_index_of_base_cost: 52 +string_index_of_per_byte_pattern_cost: 2 +string_index_of_per_byte_searched_cost: 2 +vector_empty_base_cost: 52 +vector_length_base_cost: 52 +vector_push_back_base_cost: 52 +vector_push_back_legacy_per_abstract_memory_unit_cost: 2 +vector_borrow_base_cost: 52 +vector_pop_back_base_cost: 52 +vector_destroy_empty_base_cost: 52 +vector_swap_base_cost: 52 +debug_print_base_cost: 52 +debug_print_stack_trace_base_cost: 52 +execution_version: 3 +consensus_bad_nodes_stake_threshold: 20 +max_jwk_votes_per_validator_per_epoch: 240 +max_age_of_jwk_in_epochs: 1 +random_beacon_reduction_allowed_delta: 800 +random_beacon_reduction_lower_bound: 500 +random_beacon_dkg_timeout_round: 3000 +random_beacon_min_round_interval_ms: 500 +random_beacon_dkg_version: 1 +consensus_max_transaction_size_bytes: 262144 +consensus_max_transactions_in_block_bytes: 524288 +consensus_max_num_transactions_in_block: 512 +max_accumulated_txn_cost_per_object_in_narwhal_commit: 40 +max_deferral_rounds_for_congestion_control: 10 +max_txn_cost_overage_per_object_in_commit: 18446744073709551615 +min_checkpoint_interval_ms: 200 +checkpoint_summary_version_specific_data: 1 +max_soft_bundle_size: 5 +bridge_should_try_to_finalize_committee: true +max_accumulated_txn_cost_per_object_in_mysticeti_commit: 18500000 +max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit: 3700000 +gas_budget_based_txn_cost_cap_factor: 400000 +gas_budget_based_txn_cost_absolute_cap_commit_count: 50 + diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_66.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_66.snap new file mode 100644 index 0000000000000..4d141e8b8de23 --- /dev/null +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_66.snap @@ -0,0 +1,330 @@ +--- +source: crates/sui-protocol-config/src/lib.rs +expression: "ProtocolConfig::get_for_version(cur, *chain_id)" +--- +version: 66 +feature_flags: + package_upgrades: true + commit_root_state_digest: true + advance_epoch_start_time_in_safe_mode: true + loaded_child_objects_fixed: true + missing_type_is_compatibility_error: true + scoring_decision_with_validity_cutoff: true + consensus_order_end_of_epoch_last: true + disallow_adding_abilities_on_upgrade: true + disable_invariant_violation_check_in_swap_loc: true + advance_to_highest_supported_protocol_version: true + ban_entry_init: true + package_digest_hash_module: true + disallow_change_struct_type_params_on_upgrade: true + no_extraneous_module_bytes: true + narwhal_versioned_metadata: true + zklogin_auth: true + consensus_transaction_ordering: ByGasPrice + simplified_unwrap_then_delete: true + upgraded_multisig_supported: true + txn_base_cost_as_multiplier: true + shared_object_deletion: true + narwhal_new_leader_election_schedule: true + loaded_child_object_format: true + enable_jwk_consensus_updates: true + end_of_epoch_transaction_supported: true + simple_conservation_checks: true + loaded_child_object_format_type: true + receive_objects: true + random_beacon: true + bridge: true + enable_effects_v2: true + narwhal_certificate_v2: true + verify_legacy_zklogin_address: true + recompute_has_public_transfer_in_execution: true + accept_zklogin_in_multisig: true + include_consensus_digest_in_prologue: true + hardened_otw_check: true + allow_receiving_object_id: true + enable_coin_deny_list: true + enable_group_ops_native_functions: true + reject_mutable_random_on_entry_functions: true + per_object_congestion_control_mode: TotalTxCount + consensus_choice: Mysticeti + consensus_network: Tonic + zklogin_max_epoch_upper_bound_delta: 30 + mysticeti_leader_scoring_and_schedule: true + reshare_at_same_initial_version: true + resolve_abort_locations_to_package_id: true + mysticeti_use_committed_subdag_digest: true + record_consensus_determined_version_assignments_in_prologue: true + fresh_vm_on_framework_upgrade: true + prepend_prologue_tx_in_consensus_commit_in_checkpoints: true + mysticeti_num_leaders_per_round: 1 + soft_bundle: true + enable_coin_deny_list_v2: true + rethrow_serialization_type_layout_errors: true + consensus_distributed_vote_scoring_strategy: true + consensus_round_prober: true + validate_identifier_inputs: true + relocate_event_module: true +max_tx_size_bytes: 131072 +max_input_objects: 2048 +max_size_written_objects: 5000000 +max_size_written_objects_system_tx: 50000000 +max_serialized_tx_effects_size_bytes: 524288 +max_serialized_tx_effects_size_bytes_system_tx: 8388608 +max_gas_payment_objects: 256 +max_modules_in_publish: 64 +max_package_dependencies: 32 +max_arguments: 512 +max_type_arguments: 16 +max_type_argument_depth: 16 +max_pure_argument_size: 16384 +max_programmable_tx_commands: 1024 +move_binary_format_version: 7 +min_move_binary_format_version: 6 +binary_module_handles: 100 +binary_struct_handles: 300 +binary_function_handles: 1500 +binary_function_instantiations: 750 +binary_signatures: 1000 +binary_constant_pool: 4000 +binary_identifiers: 10000 +binary_address_identifiers: 100 +binary_struct_defs: 200 +binary_struct_def_instantiations: 100 +binary_function_defs: 1000 +binary_field_handles: 500 +binary_field_instantiations: 250 +binary_friend_decls: 100 +max_move_object_size: 256000 +max_move_package_size: 102400 +max_publish_or_upgrade_per_ptb: 5 +max_tx_gas: 50000000000 +max_gas_price: 100000 +max_gas_computation_bucket: 5000000 +gas_rounding_step: 1000 +max_loop_depth: 5 +max_generic_instantiation_length: 32 +max_function_parameters: 128 +max_basic_blocks: 1024 +max_value_stack_size: 1024 +max_type_nodes: 256 +max_push_size: 10000 +max_struct_definitions: 200 +max_function_definitions: 1000 +max_fields_in_struct: 32 +max_dependency_depth: 100 +max_num_event_emit: 1024 +max_num_new_move_object_ids: 2048 +max_num_new_move_object_ids_system_tx: 32768 +max_num_deleted_move_object_ids: 2048 +max_num_deleted_move_object_ids_system_tx: 32768 +max_num_transferred_move_object_ids: 2048 +max_num_transferred_move_object_ids_system_tx: 32768 +max_event_emit_size: 256000 +max_event_emit_size_total: 65536000 +max_move_vector_len: 262144 +max_move_identifier_len: 128 +max_move_value_depth: 128 +max_back_edges_per_function: 10000 +max_back_edges_per_module: 10000 +max_verifier_meter_ticks_per_function: 16000000 +max_meter_ticks_per_module: 16000000 +max_meter_ticks_per_package: 16000000 +object_runtime_max_num_cached_objects: 1000 +object_runtime_max_num_cached_objects_system_tx: 16000 +object_runtime_max_num_store_entries: 1000 +object_runtime_max_num_store_entries_system_tx: 16000 +base_tx_cost_fixed: 1000 +package_publish_cost_fixed: 1000 +base_tx_cost_per_byte: 0 +package_publish_cost_per_byte: 80 +obj_access_cost_read_per_byte: 15 +obj_access_cost_mutate_per_byte: 40 +obj_access_cost_delete_per_byte: 40 +obj_access_cost_verify_per_byte: 200 +max_type_to_layout_nodes: 512 +gas_model_version: 8 +obj_data_cost_refundable: 100 +obj_metadata_cost_non_refundable: 50 +storage_rebate_rate: 9900 +storage_fund_reinvest_rate: 500 +reward_slashing_rate: 10000 +storage_gas_price: 76 +max_transactions_per_checkpoint: 10000 +max_checkpoint_size_bytes: 31457280 +buffer_stake_for_protocol_upgrade_bps: 5000 +address_from_bytes_cost_base: 52 +address_to_u256_cost_base: 52 +address_from_u256_cost_base: 52 +config_read_setting_impl_cost_base: 100 +config_read_setting_impl_cost_per_byte: 40 +dynamic_field_hash_type_and_key_cost_base: 100 +dynamic_field_hash_type_and_key_type_cost_per_byte: 2 +dynamic_field_hash_type_and_key_value_cost_per_byte: 2 +dynamic_field_hash_type_and_key_type_tag_cost_per_byte: 2 +dynamic_field_add_child_object_cost_base: 100 +dynamic_field_add_child_object_type_cost_per_byte: 10 +dynamic_field_add_child_object_value_cost_per_byte: 10 +dynamic_field_add_child_object_struct_tag_cost_per_byte: 10 +dynamic_field_borrow_child_object_cost_base: 100 +dynamic_field_borrow_child_object_child_ref_cost_per_byte: 10 +dynamic_field_borrow_child_object_type_cost_per_byte: 10 +dynamic_field_remove_child_object_cost_base: 100 +dynamic_field_remove_child_object_child_cost_per_byte: 2 +dynamic_field_remove_child_object_type_cost_per_byte: 2 +dynamic_field_has_child_object_cost_base: 100 +dynamic_field_has_child_object_with_ty_cost_base: 100 +dynamic_field_has_child_object_with_ty_type_cost_per_byte: 2 +dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: 2 +event_emit_cost_base: 52 +event_emit_value_size_derivation_cost_per_byte: 2 +event_emit_tag_size_derivation_cost_per_byte: 5 +event_emit_output_cost_per_byte: 10 +object_borrow_uid_cost_base: 52 +object_delete_impl_cost_base: 52 +object_record_new_uid_cost_base: 52 +transfer_transfer_internal_cost_base: 52 +transfer_freeze_object_cost_base: 52 +transfer_share_object_cost_base: 52 +transfer_receive_object_cost_base: 52 +tx_context_derive_id_cost_base: 52 +types_is_one_time_witness_cost_base: 52 +types_is_one_time_witness_type_tag_cost_per_byte: 2 +types_is_one_time_witness_type_cost_per_byte: 2 +validator_validate_metadata_cost_base: 52 +validator_validate_metadata_data_cost_per_byte: 2 +crypto_invalid_arguments_cost: 100 +bls12381_bls12381_min_sig_verify_cost_base: 52 +bls12381_bls12381_min_sig_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_sig_verify_msg_cost_per_block: 2 +bls12381_bls12381_min_pk_verify_cost_base: 52 +bls12381_bls12381_min_pk_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_pk_verify_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_keccak256_cost_base: 52 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_sha256_cost_base: 52 +ecdsa_k1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_k1_decompress_pubkey_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_k1_secp256k1_verify_sha256_cost_base: 52 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_keccak256_cost_base: 52 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_sha256_cost_base: 52 +ecdsa_r1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_keccak256_cost_base: 52 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_sha256_cost_base: 52 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_block: 2 +ecvrf_ecvrf_verify_cost_base: 52 +ecvrf_ecvrf_verify_alpha_string_cost_per_byte: 2 +ecvrf_ecvrf_verify_alpha_string_cost_per_block: 2 +ed25519_ed25519_verify_cost_base: 52 +ed25519_ed25519_verify_msg_cost_per_byte: 2 +ed25519_ed25519_verify_msg_cost_per_block: 2 +groth16_prepare_verifying_key_bls12381_cost_base: 52 +groth16_prepare_verifying_key_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bn254_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_public_input_cost_per_byte: 2 +hash_blake2b256_cost_base: 52 +hash_blake2b256_data_cost_per_byte: 2 +hash_blake2b256_data_cost_per_block: 2 +hash_keccak256_cost_base: 52 +hash_keccak256_data_cost_per_byte: 2 +hash_keccak256_data_cost_per_block: 2 +group_ops_bls12381_decode_scalar_cost: 52 +group_ops_bls12381_decode_g1_cost: 52 +group_ops_bls12381_decode_g2_cost: 52 +group_ops_bls12381_decode_gt_cost: 52 +group_ops_bls12381_scalar_add_cost: 52 +group_ops_bls12381_g1_add_cost: 52 +group_ops_bls12381_g2_add_cost: 52 +group_ops_bls12381_gt_add_cost: 52 +group_ops_bls12381_scalar_sub_cost: 52 +group_ops_bls12381_g1_sub_cost: 52 +group_ops_bls12381_g2_sub_cost: 52 +group_ops_bls12381_gt_sub_cost: 52 +group_ops_bls12381_scalar_mul_cost: 52 +group_ops_bls12381_g1_mul_cost: 52 +group_ops_bls12381_g2_mul_cost: 52 +group_ops_bls12381_gt_mul_cost: 52 +group_ops_bls12381_scalar_div_cost: 52 +group_ops_bls12381_g1_div_cost: 52 +group_ops_bls12381_g2_div_cost: 52 +group_ops_bls12381_gt_div_cost: 52 +group_ops_bls12381_g1_hash_to_base_cost: 52 +group_ops_bls12381_g2_hash_to_base_cost: 52 +group_ops_bls12381_g1_hash_to_cost_per_byte: 2 +group_ops_bls12381_g2_hash_to_cost_per_byte: 2 +group_ops_bls12381_g1_msm_base_cost: 52 +group_ops_bls12381_g2_msm_base_cost: 52 +group_ops_bls12381_g1_msm_base_cost_per_input: 52 +group_ops_bls12381_g2_msm_base_cost_per_input: 52 +group_ops_bls12381_msm_max_len: 32 +group_ops_bls12381_pairing_cost: 52 +hmac_hmac_sha3_256_cost_base: 52 +hmac_hmac_sha3_256_input_cost_per_byte: 2 +hmac_hmac_sha3_256_input_cost_per_block: 2 +check_zklogin_id_cost_base: 200 +check_zklogin_issuer_cost_base: 200 +bcs_per_byte_serialized_cost: 2 +bcs_legacy_min_output_size_cost: 1 +bcs_failure_cost: 52 +hash_sha2_256_base_cost: 52 +hash_sha2_256_per_byte_cost: 2 +hash_sha2_256_legacy_min_input_len_cost: 1 +hash_sha3_256_base_cost: 52 +hash_sha3_256_per_byte_cost: 2 +hash_sha3_256_legacy_min_input_len_cost: 1 +type_name_get_base_cost: 52 +type_name_get_per_byte_cost: 2 +string_check_utf8_base_cost: 52 +string_check_utf8_per_byte_cost: 2 +string_is_char_boundary_base_cost: 52 +string_sub_string_base_cost: 52 +string_sub_string_per_byte_cost: 2 +string_index_of_base_cost: 52 +string_index_of_per_byte_pattern_cost: 2 +string_index_of_per_byte_searched_cost: 2 +vector_empty_base_cost: 52 +vector_length_base_cost: 52 +vector_push_back_base_cost: 52 +vector_push_back_legacy_per_abstract_memory_unit_cost: 2 +vector_borrow_base_cost: 52 +vector_pop_back_base_cost: 52 +vector_destroy_empty_base_cost: 52 +vector_swap_base_cost: 52 +debug_print_base_cost: 52 +debug_print_stack_trace_base_cost: 52 +execution_version: 3 +consensus_bad_nodes_stake_threshold: 20 +max_jwk_votes_per_validator_per_epoch: 240 +max_age_of_jwk_in_epochs: 1 +random_beacon_reduction_allowed_delta: 800 +random_beacon_reduction_lower_bound: 700 +random_beacon_dkg_timeout_round: 3000 +random_beacon_min_round_interval_ms: 500 +random_beacon_dkg_version: 1 +consensus_max_transaction_size_bytes: 262144 +consensus_max_transactions_in_block_bytes: 524288 +consensus_max_num_transactions_in_block: 512 +max_accumulated_txn_cost_per_object_in_narwhal_commit: 40 +max_deferral_rounds_for_congestion_control: 10 +min_checkpoint_interval_ms: 200 +checkpoint_summary_version_specific_data: 1 +max_soft_bundle_size: 5 +bridge_should_try_to_finalize_committee: true +max_accumulated_txn_cost_per_object_in_mysticeti_commit: 3 +gas_budget_based_txn_cost_cap_factor: 400000 + diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_67.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_67.snap new file mode 100644 index 0000000000000..f050b4e16302d --- /dev/null +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_67.snap @@ -0,0 +1,330 @@ +--- +source: crates/sui-protocol-config/src/lib.rs +expression: "ProtocolConfig::get_for_version(cur, *chain_id)" +--- +version: 67 +feature_flags: + package_upgrades: true + commit_root_state_digest: true + advance_epoch_start_time_in_safe_mode: true + loaded_child_objects_fixed: true + missing_type_is_compatibility_error: true + scoring_decision_with_validity_cutoff: true + consensus_order_end_of_epoch_last: true + disallow_adding_abilities_on_upgrade: true + disable_invariant_violation_check_in_swap_loc: true + advance_to_highest_supported_protocol_version: true + ban_entry_init: true + package_digest_hash_module: true + disallow_change_struct_type_params_on_upgrade: true + no_extraneous_module_bytes: true + narwhal_versioned_metadata: true + zklogin_auth: true + consensus_transaction_ordering: ByGasPrice + simplified_unwrap_then_delete: true + upgraded_multisig_supported: true + txn_base_cost_as_multiplier: true + shared_object_deletion: true + narwhal_new_leader_election_schedule: true + loaded_child_object_format: true + enable_jwk_consensus_updates: true + end_of_epoch_transaction_supported: true + simple_conservation_checks: true + loaded_child_object_format_type: true + receive_objects: true + random_beacon: true + bridge: true + enable_effects_v2: true + narwhal_certificate_v2: true + verify_legacy_zklogin_address: true + recompute_has_public_transfer_in_execution: true + accept_zklogin_in_multisig: true + include_consensus_digest_in_prologue: true + hardened_otw_check: true + allow_receiving_object_id: true + enable_coin_deny_list: true + enable_group_ops_native_functions: true + reject_mutable_random_on_entry_functions: true + per_object_congestion_control_mode: TotalTxCount + consensus_choice: Mysticeti + consensus_network: Tonic + zklogin_max_epoch_upper_bound_delta: 30 + mysticeti_leader_scoring_and_schedule: true + reshare_at_same_initial_version: true + resolve_abort_locations_to_package_id: true + mysticeti_use_committed_subdag_digest: true + record_consensus_determined_version_assignments_in_prologue: true + fresh_vm_on_framework_upgrade: true + prepend_prologue_tx_in_consensus_commit_in_checkpoints: true + mysticeti_num_leaders_per_round: 1 + soft_bundle: true + enable_coin_deny_list_v2: true + rethrow_serialization_type_layout_errors: true + consensus_distributed_vote_scoring_strategy: true + consensus_round_prober: true + validate_identifier_inputs: true + relocate_event_module: true +max_tx_size_bytes: 131072 +max_input_objects: 2048 +max_size_written_objects: 5000000 +max_size_written_objects_system_tx: 50000000 +max_serialized_tx_effects_size_bytes: 524288 +max_serialized_tx_effects_size_bytes_system_tx: 8388608 +max_gas_payment_objects: 256 +max_modules_in_publish: 64 +max_package_dependencies: 32 +max_arguments: 512 +max_type_arguments: 16 +max_type_argument_depth: 16 +max_pure_argument_size: 16384 +max_programmable_tx_commands: 1024 +move_binary_format_version: 7 +min_move_binary_format_version: 6 +binary_module_handles: 100 +binary_struct_handles: 300 +binary_function_handles: 1500 +binary_function_instantiations: 750 +binary_signatures: 1000 +binary_constant_pool: 4000 +binary_identifiers: 10000 +binary_address_identifiers: 100 +binary_struct_defs: 200 +binary_struct_def_instantiations: 100 +binary_function_defs: 1000 +binary_field_handles: 500 +binary_field_instantiations: 250 +binary_friend_decls: 100 +max_move_object_size: 256000 +max_move_package_size: 102400 +max_publish_or_upgrade_per_ptb: 5 +max_tx_gas: 50000000000 +max_gas_price: 100000 +max_gas_computation_bucket: 5000000 +gas_rounding_step: 1000 +max_loop_depth: 5 +max_generic_instantiation_length: 32 +max_function_parameters: 128 +max_basic_blocks: 1024 +max_value_stack_size: 1024 +max_type_nodes: 256 +max_push_size: 10000 +max_struct_definitions: 200 +max_function_definitions: 1000 +max_fields_in_struct: 32 +max_dependency_depth: 100 +max_num_event_emit: 1024 +max_num_new_move_object_ids: 2048 +max_num_new_move_object_ids_system_tx: 32768 +max_num_deleted_move_object_ids: 2048 +max_num_deleted_move_object_ids_system_tx: 32768 +max_num_transferred_move_object_ids: 2048 +max_num_transferred_move_object_ids_system_tx: 32768 +max_event_emit_size: 256000 +max_event_emit_size_total: 65536000 +max_move_vector_len: 262144 +max_move_identifier_len: 128 +max_move_value_depth: 128 +max_back_edges_per_function: 10000 +max_back_edges_per_module: 10000 +max_verifier_meter_ticks_per_function: 16000000 +max_meter_ticks_per_module: 16000000 +max_meter_ticks_per_package: 16000000 +object_runtime_max_num_cached_objects: 1000 +object_runtime_max_num_cached_objects_system_tx: 16000 +object_runtime_max_num_store_entries: 1000 +object_runtime_max_num_store_entries_system_tx: 16000 +base_tx_cost_fixed: 1000 +package_publish_cost_fixed: 1000 +base_tx_cost_per_byte: 0 +package_publish_cost_per_byte: 80 +obj_access_cost_read_per_byte: 15 +obj_access_cost_mutate_per_byte: 40 +obj_access_cost_delete_per_byte: 40 +obj_access_cost_verify_per_byte: 200 +max_type_to_layout_nodes: 512 +gas_model_version: 8 +obj_data_cost_refundable: 100 +obj_metadata_cost_non_refundable: 50 +storage_rebate_rate: 9900 +storage_fund_reinvest_rate: 500 +reward_slashing_rate: 10000 +storage_gas_price: 76 +max_transactions_per_checkpoint: 10000 +max_checkpoint_size_bytes: 31457280 +buffer_stake_for_protocol_upgrade_bps: 5000 +address_from_bytes_cost_base: 52 +address_to_u256_cost_base: 52 +address_from_u256_cost_base: 52 +config_read_setting_impl_cost_base: 100 +config_read_setting_impl_cost_per_byte: 40 +dynamic_field_hash_type_and_key_cost_base: 100 +dynamic_field_hash_type_and_key_type_cost_per_byte: 2 +dynamic_field_hash_type_and_key_value_cost_per_byte: 2 +dynamic_field_hash_type_and_key_type_tag_cost_per_byte: 2 +dynamic_field_add_child_object_cost_base: 100 +dynamic_field_add_child_object_type_cost_per_byte: 10 +dynamic_field_add_child_object_value_cost_per_byte: 10 +dynamic_field_add_child_object_struct_tag_cost_per_byte: 10 +dynamic_field_borrow_child_object_cost_base: 100 +dynamic_field_borrow_child_object_child_ref_cost_per_byte: 10 +dynamic_field_borrow_child_object_type_cost_per_byte: 10 +dynamic_field_remove_child_object_cost_base: 100 +dynamic_field_remove_child_object_child_cost_per_byte: 2 +dynamic_field_remove_child_object_type_cost_per_byte: 2 +dynamic_field_has_child_object_cost_base: 100 +dynamic_field_has_child_object_with_ty_cost_base: 100 +dynamic_field_has_child_object_with_ty_type_cost_per_byte: 2 +dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: 2 +event_emit_cost_base: 52 +event_emit_value_size_derivation_cost_per_byte: 2 +event_emit_tag_size_derivation_cost_per_byte: 5 +event_emit_output_cost_per_byte: 10 +object_borrow_uid_cost_base: 52 +object_delete_impl_cost_base: 52 +object_record_new_uid_cost_base: 52 +transfer_transfer_internal_cost_base: 52 +transfer_freeze_object_cost_base: 52 +transfer_share_object_cost_base: 52 +transfer_receive_object_cost_base: 52 +tx_context_derive_id_cost_base: 52 +types_is_one_time_witness_cost_base: 52 +types_is_one_time_witness_type_tag_cost_per_byte: 2 +types_is_one_time_witness_type_cost_per_byte: 2 +validator_validate_metadata_cost_base: 52 +validator_validate_metadata_data_cost_per_byte: 2 +crypto_invalid_arguments_cost: 100 +bls12381_bls12381_min_sig_verify_cost_base: 52 +bls12381_bls12381_min_sig_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_sig_verify_msg_cost_per_block: 2 +bls12381_bls12381_min_pk_verify_cost_base: 52 +bls12381_bls12381_min_pk_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_pk_verify_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_keccak256_cost_base: 52 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_sha256_cost_base: 52 +ecdsa_k1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_k1_decompress_pubkey_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_k1_secp256k1_verify_sha256_cost_base: 52 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_keccak256_cost_base: 52 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_sha256_cost_base: 52 +ecdsa_r1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_keccak256_cost_base: 52 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_sha256_cost_base: 52 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_block: 2 +ecvrf_ecvrf_verify_cost_base: 52 +ecvrf_ecvrf_verify_alpha_string_cost_per_byte: 2 +ecvrf_ecvrf_verify_alpha_string_cost_per_block: 2 +ed25519_ed25519_verify_cost_base: 52 +ed25519_ed25519_verify_msg_cost_per_byte: 2 +ed25519_ed25519_verify_msg_cost_per_block: 2 +groth16_prepare_verifying_key_bls12381_cost_base: 52 +groth16_prepare_verifying_key_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bn254_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_public_input_cost_per_byte: 2 +hash_blake2b256_cost_base: 52 +hash_blake2b256_data_cost_per_byte: 2 +hash_blake2b256_data_cost_per_block: 2 +hash_keccak256_cost_base: 52 +hash_keccak256_data_cost_per_byte: 2 +hash_keccak256_data_cost_per_block: 2 +group_ops_bls12381_decode_scalar_cost: 52 +group_ops_bls12381_decode_g1_cost: 52 +group_ops_bls12381_decode_g2_cost: 52 +group_ops_bls12381_decode_gt_cost: 52 +group_ops_bls12381_scalar_add_cost: 52 +group_ops_bls12381_g1_add_cost: 52 +group_ops_bls12381_g2_add_cost: 52 +group_ops_bls12381_gt_add_cost: 52 +group_ops_bls12381_scalar_sub_cost: 52 +group_ops_bls12381_g1_sub_cost: 52 +group_ops_bls12381_g2_sub_cost: 52 +group_ops_bls12381_gt_sub_cost: 52 +group_ops_bls12381_scalar_mul_cost: 52 +group_ops_bls12381_g1_mul_cost: 52 +group_ops_bls12381_g2_mul_cost: 52 +group_ops_bls12381_gt_mul_cost: 52 +group_ops_bls12381_scalar_div_cost: 52 +group_ops_bls12381_g1_div_cost: 52 +group_ops_bls12381_g2_div_cost: 52 +group_ops_bls12381_gt_div_cost: 52 +group_ops_bls12381_g1_hash_to_base_cost: 52 +group_ops_bls12381_g2_hash_to_base_cost: 52 +group_ops_bls12381_g1_hash_to_cost_per_byte: 2 +group_ops_bls12381_g2_hash_to_cost_per_byte: 2 +group_ops_bls12381_g1_msm_base_cost: 52 +group_ops_bls12381_g2_msm_base_cost: 52 +group_ops_bls12381_g1_msm_base_cost_per_input: 52 +group_ops_bls12381_g2_msm_base_cost_per_input: 52 +group_ops_bls12381_msm_max_len: 32 +group_ops_bls12381_pairing_cost: 52 +hmac_hmac_sha3_256_cost_base: 52 +hmac_hmac_sha3_256_input_cost_per_byte: 2 +hmac_hmac_sha3_256_input_cost_per_block: 2 +check_zklogin_id_cost_base: 200 +check_zklogin_issuer_cost_base: 200 +bcs_per_byte_serialized_cost: 2 +bcs_legacy_min_output_size_cost: 1 +bcs_failure_cost: 52 +hash_sha2_256_base_cost: 52 +hash_sha2_256_per_byte_cost: 2 +hash_sha2_256_legacy_min_input_len_cost: 1 +hash_sha3_256_base_cost: 52 +hash_sha3_256_per_byte_cost: 2 +hash_sha3_256_legacy_min_input_len_cost: 1 +type_name_get_base_cost: 52 +type_name_get_per_byte_cost: 2 +string_check_utf8_base_cost: 52 +string_check_utf8_per_byte_cost: 2 +string_is_char_boundary_base_cost: 52 +string_sub_string_base_cost: 52 +string_sub_string_per_byte_cost: 2 +string_index_of_base_cost: 52 +string_index_of_per_byte_pattern_cost: 2 +string_index_of_per_byte_searched_cost: 2 +vector_empty_base_cost: 52 +vector_length_base_cost: 52 +vector_push_back_base_cost: 52 +vector_push_back_legacy_per_abstract_memory_unit_cost: 2 +vector_borrow_base_cost: 52 +vector_pop_back_base_cost: 52 +vector_destroy_empty_base_cost: 52 +vector_swap_base_cost: 52 +debug_print_base_cost: 52 +debug_print_stack_trace_base_cost: 52 +execution_version: 3 +consensus_bad_nodes_stake_threshold: 20 +max_jwk_votes_per_validator_per_epoch: 240 +max_age_of_jwk_in_epochs: 1 +random_beacon_reduction_allowed_delta: 800 +random_beacon_reduction_lower_bound: 700 +random_beacon_dkg_timeout_round: 3000 +random_beacon_min_round_interval_ms: 500 +random_beacon_dkg_version: 1 +consensus_max_transaction_size_bytes: 262144 +consensus_max_transactions_in_block_bytes: 524288 +consensus_max_num_transactions_in_block: 512 +max_accumulated_txn_cost_per_object_in_narwhal_commit: 40 +max_deferral_rounds_for_congestion_control: 10 +min_checkpoint_interval_ms: 200 +checkpoint_summary_version_specific_data: 1 +max_soft_bundle_size: 5 +bridge_should_try_to_finalize_committee: true +max_accumulated_txn_cost_per_object_in_mysticeti_commit: 3 +gas_budget_based_txn_cost_cap_factor: 400000 + diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_68.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_68.snap new file mode 100644 index 0000000000000..492aebef1a352 --- /dev/null +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__Testnet_version_68.snap @@ -0,0 +1,339 @@ +--- +source: crates/sui-protocol-config/src/lib.rs +expression: "ProtocolConfig::get_for_version(cur, *chain_id)" +--- +version: 68 +feature_flags: + package_upgrades: true + commit_root_state_digest: true + advance_epoch_start_time_in_safe_mode: true + loaded_child_objects_fixed: true + missing_type_is_compatibility_error: true + scoring_decision_with_validity_cutoff: true + consensus_order_end_of_epoch_last: true + disallow_adding_abilities_on_upgrade: true + disable_invariant_violation_check_in_swap_loc: true + advance_to_highest_supported_protocol_version: true + ban_entry_init: true + package_digest_hash_module: true + disallow_change_struct_type_params_on_upgrade: true + no_extraneous_module_bytes: true + narwhal_versioned_metadata: true + zklogin_auth: true + consensus_transaction_ordering: ByGasPrice + simplified_unwrap_then_delete: true + upgraded_multisig_supported: true + txn_base_cost_as_multiplier: true + shared_object_deletion: true + narwhal_new_leader_election_schedule: true + loaded_child_object_format: true + enable_jwk_consensus_updates: true + end_of_epoch_transaction_supported: true + simple_conservation_checks: true + loaded_child_object_format_type: true + receive_objects: true + random_beacon: true + bridge: true + enable_effects_v2: true + narwhal_certificate_v2: true + verify_legacy_zklogin_address: true + recompute_has_public_transfer_in_execution: true + accept_zklogin_in_multisig: true + include_consensus_digest_in_prologue: true + hardened_otw_check: true + allow_receiving_object_id: true + enable_coin_deny_list: true + enable_group_ops_native_functions: true + reject_mutable_random_on_entry_functions: true + per_object_congestion_control_mode: TotalGasBudgetWithCap + consensus_choice: Mysticeti + consensus_network: Tonic + zklogin_max_epoch_upper_bound_delta: 30 + mysticeti_leader_scoring_and_schedule: true + reshare_at_same_initial_version: true + resolve_abort_locations_to_package_id: true + mysticeti_use_committed_subdag_digest: true + record_consensus_determined_version_assignments_in_prologue: true + fresh_vm_on_framework_upgrade: true + prepend_prologue_tx_in_consensus_commit_in_checkpoints: true + mysticeti_num_leaders_per_round: 1 + soft_bundle: true + enable_coin_deny_list_v2: true + rethrow_serialization_type_layout_errors: true + consensus_distributed_vote_scoring_strategy: true + consensus_round_prober: true + validate_identifier_inputs: true + relocate_event_module: true + disallow_new_modules_in_deps_only_packages: true +max_tx_size_bytes: 131072 +max_input_objects: 2048 +max_size_written_objects: 5000000 +max_size_written_objects_system_tx: 50000000 +max_serialized_tx_effects_size_bytes: 524288 +max_serialized_tx_effects_size_bytes_system_tx: 8388608 +max_gas_payment_objects: 256 +max_modules_in_publish: 64 +max_package_dependencies: 32 +max_arguments: 512 +max_type_arguments: 16 +max_type_argument_depth: 16 +max_pure_argument_size: 16384 +max_programmable_tx_commands: 1024 +move_binary_format_version: 7 +min_move_binary_format_version: 6 +binary_module_handles: 100 +binary_struct_handles: 300 +binary_function_handles: 1500 +binary_function_instantiations: 750 +binary_signatures: 1000 +binary_constant_pool: 4000 +binary_identifiers: 10000 +binary_address_identifiers: 100 +binary_struct_defs: 200 +binary_struct_def_instantiations: 100 +binary_function_defs: 1000 +binary_field_handles: 500 +binary_field_instantiations: 250 +binary_friend_decls: 100 +max_move_object_size: 256000 +max_move_package_size: 102400 +max_publish_or_upgrade_per_ptb: 5 +max_tx_gas: 50000000000 +max_gas_price: 100000 +max_gas_computation_bucket: 5000000 +gas_rounding_step: 1000 +max_loop_depth: 5 +max_generic_instantiation_length: 32 +max_function_parameters: 128 +max_basic_blocks: 1024 +max_value_stack_size: 1024 +max_type_nodes: 256 +max_push_size: 10000 +max_struct_definitions: 200 +max_function_definitions: 1000 +max_fields_in_struct: 32 +max_dependency_depth: 100 +max_num_event_emit: 1024 +max_num_new_move_object_ids: 2048 +max_num_new_move_object_ids_system_tx: 32768 +max_num_deleted_move_object_ids: 2048 +max_num_deleted_move_object_ids_system_tx: 32768 +max_num_transferred_move_object_ids: 2048 +max_num_transferred_move_object_ids_system_tx: 32768 +max_event_emit_size: 256000 +max_event_emit_size_total: 65536000 +max_move_vector_len: 262144 +max_move_identifier_len: 128 +max_move_value_depth: 128 +max_back_edges_per_function: 10000 +max_back_edges_per_module: 10000 +max_verifier_meter_ticks_per_function: 16000000 +max_meter_ticks_per_module: 16000000 +max_meter_ticks_per_package: 16000000 +object_runtime_max_num_cached_objects: 1000 +object_runtime_max_num_cached_objects_system_tx: 16000 +object_runtime_max_num_store_entries: 1000 +object_runtime_max_num_store_entries_system_tx: 16000 +base_tx_cost_fixed: 1000 +package_publish_cost_fixed: 1000 +base_tx_cost_per_byte: 0 +package_publish_cost_per_byte: 80 +obj_access_cost_read_per_byte: 15 +obj_access_cost_mutate_per_byte: 40 +obj_access_cost_delete_per_byte: 40 +obj_access_cost_verify_per_byte: 200 +max_type_to_layout_nodes: 512 +gas_model_version: 8 +obj_data_cost_refundable: 100 +obj_metadata_cost_non_refundable: 50 +storage_rebate_rate: 9900 +storage_fund_reinvest_rate: 500 +reward_slashing_rate: 10000 +storage_gas_price: 76 +max_transactions_per_checkpoint: 10000 +max_checkpoint_size_bytes: 31457280 +buffer_stake_for_protocol_upgrade_bps: 5000 +address_from_bytes_cost_base: 52 +address_to_u256_cost_base: 52 +address_from_u256_cost_base: 52 +config_read_setting_impl_cost_base: 100 +config_read_setting_impl_cost_per_byte: 40 +dynamic_field_hash_type_and_key_cost_base: 100 +dynamic_field_hash_type_and_key_type_cost_per_byte: 2 +dynamic_field_hash_type_and_key_value_cost_per_byte: 2 +dynamic_field_hash_type_and_key_type_tag_cost_per_byte: 2 +dynamic_field_add_child_object_cost_base: 100 +dynamic_field_add_child_object_type_cost_per_byte: 10 +dynamic_field_add_child_object_value_cost_per_byte: 10 +dynamic_field_add_child_object_struct_tag_cost_per_byte: 10 +dynamic_field_borrow_child_object_cost_base: 100 +dynamic_field_borrow_child_object_child_ref_cost_per_byte: 10 +dynamic_field_borrow_child_object_type_cost_per_byte: 10 +dynamic_field_remove_child_object_cost_base: 100 +dynamic_field_remove_child_object_child_cost_per_byte: 2 +dynamic_field_remove_child_object_type_cost_per_byte: 2 +dynamic_field_has_child_object_cost_base: 100 +dynamic_field_has_child_object_with_ty_cost_base: 100 +dynamic_field_has_child_object_with_ty_type_cost_per_byte: 2 +dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: 2 +event_emit_cost_base: 52 +event_emit_value_size_derivation_cost_per_byte: 2 +event_emit_tag_size_derivation_cost_per_byte: 5 +event_emit_output_cost_per_byte: 10 +object_borrow_uid_cost_base: 52 +object_delete_impl_cost_base: 52 +object_record_new_uid_cost_base: 52 +transfer_transfer_internal_cost_base: 52 +transfer_freeze_object_cost_base: 52 +transfer_share_object_cost_base: 52 +transfer_receive_object_cost_base: 52 +tx_context_derive_id_cost_base: 52 +types_is_one_time_witness_cost_base: 52 +types_is_one_time_witness_type_tag_cost_per_byte: 2 +types_is_one_time_witness_type_cost_per_byte: 2 +validator_validate_metadata_cost_base: 52 +validator_validate_metadata_data_cost_per_byte: 2 +crypto_invalid_arguments_cost: 100 +bls12381_bls12381_min_sig_verify_cost_base: 52 +bls12381_bls12381_min_sig_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_sig_verify_msg_cost_per_block: 2 +bls12381_bls12381_min_pk_verify_cost_base: 52 +bls12381_bls12381_min_pk_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_pk_verify_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_keccak256_cost_base: 52 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_sha256_cost_base: 52 +ecdsa_k1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_k1_decompress_pubkey_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_k1_secp256k1_verify_sha256_cost_base: 52 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_keccak256_cost_base: 52 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_sha256_cost_base: 52 +ecdsa_r1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_keccak256_cost_base: 52 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_sha256_cost_base: 52 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_block: 2 +ecvrf_ecvrf_verify_cost_base: 52 +ecvrf_ecvrf_verify_alpha_string_cost_per_byte: 2 +ecvrf_ecvrf_verify_alpha_string_cost_per_block: 2 +ed25519_ed25519_verify_cost_base: 52 +ed25519_ed25519_verify_msg_cost_per_byte: 2 +ed25519_ed25519_verify_msg_cost_per_block: 2 +groth16_prepare_verifying_key_bls12381_cost_base: 52 +groth16_prepare_verifying_key_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bn254_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_public_input_cost_per_byte: 2 +hash_blake2b256_cost_base: 52 +hash_blake2b256_data_cost_per_byte: 2 +hash_blake2b256_data_cost_per_block: 2 +hash_keccak256_cost_base: 52 +hash_keccak256_data_cost_per_byte: 2 +hash_keccak256_data_cost_per_block: 2 +group_ops_bls12381_decode_scalar_cost: 52 +group_ops_bls12381_decode_g1_cost: 52 +group_ops_bls12381_decode_g2_cost: 52 +group_ops_bls12381_decode_gt_cost: 52 +group_ops_bls12381_scalar_add_cost: 52 +group_ops_bls12381_g1_add_cost: 52 +group_ops_bls12381_g2_add_cost: 52 +group_ops_bls12381_gt_add_cost: 52 +group_ops_bls12381_scalar_sub_cost: 52 +group_ops_bls12381_g1_sub_cost: 52 +group_ops_bls12381_g2_sub_cost: 52 +group_ops_bls12381_gt_sub_cost: 52 +group_ops_bls12381_scalar_mul_cost: 52 +group_ops_bls12381_g1_mul_cost: 52 +group_ops_bls12381_g2_mul_cost: 52 +group_ops_bls12381_gt_mul_cost: 52 +group_ops_bls12381_scalar_div_cost: 52 +group_ops_bls12381_g1_div_cost: 52 +group_ops_bls12381_g2_div_cost: 52 +group_ops_bls12381_gt_div_cost: 52 +group_ops_bls12381_g1_hash_to_base_cost: 52 +group_ops_bls12381_g2_hash_to_base_cost: 52 +group_ops_bls12381_g1_hash_to_cost_per_byte: 2 +group_ops_bls12381_g2_hash_to_cost_per_byte: 2 +group_ops_bls12381_g1_msm_base_cost: 52 +group_ops_bls12381_g2_msm_base_cost: 52 +group_ops_bls12381_g1_msm_base_cost_per_input: 52 +group_ops_bls12381_g2_msm_base_cost_per_input: 52 +group_ops_bls12381_msm_max_len: 32 +group_ops_bls12381_pairing_cost: 52 +group_ops_bls12381_g1_to_uncompressed_g1_cost: 26 +group_ops_bls12381_uncompressed_g1_to_g1_cost: 52 +group_ops_bls12381_uncompressed_g1_sum_base_cost: 26 +group_ops_bls12381_uncompressed_g1_sum_cost_per_term: 13 +group_ops_bls12381_uncompressed_g1_sum_max_terms: 2000 +hmac_hmac_sha3_256_cost_base: 52 +hmac_hmac_sha3_256_input_cost_per_byte: 2 +hmac_hmac_sha3_256_input_cost_per_block: 2 +check_zklogin_id_cost_base: 200 +check_zklogin_issuer_cost_base: 200 +bcs_per_byte_serialized_cost: 2 +bcs_legacy_min_output_size_cost: 1 +bcs_failure_cost: 52 +hash_sha2_256_base_cost: 52 +hash_sha2_256_per_byte_cost: 2 +hash_sha2_256_legacy_min_input_len_cost: 1 +hash_sha3_256_base_cost: 52 +hash_sha3_256_per_byte_cost: 2 +hash_sha3_256_legacy_min_input_len_cost: 1 +type_name_get_base_cost: 52 +type_name_get_per_byte_cost: 2 +string_check_utf8_base_cost: 52 +string_check_utf8_per_byte_cost: 2 +string_is_char_boundary_base_cost: 52 +string_sub_string_base_cost: 52 +string_sub_string_per_byte_cost: 2 +string_index_of_base_cost: 52 +string_index_of_per_byte_pattern_cost: 2 +string_index_of_per_byte_searched_cost: 2 +vector_empty_base_cost: 52 +vector_length_base_cost: 52 +vector_push_back_base_cost: 52 +vector_push_back_legacy_per_abstract_memory_unit_cost: 2 +vector_borrow_base_cost: 52 +vector_pop_back_base_cost: 52 +vector_destroy_empty_base_cost: 52 +vector_swap_base_cost: 52 +debug_print_base_cost: 52 +debug_print_stack_trace_base_cost: 52 +execution_version: 3 +consensus_bad_nodes_stake_threshold: 20 +max_jwk_votes_per_validator_per_epoch: 240 +max_age_of_jwk_in_epochs: 1 +random_beacon_reduction_allowed_delta: 800 +random_beacon_reduction_lower_bound: 500 +random_beacon_dkg_timeout_round: 3000 +random_beacon_min_round_interval_ms: 500 +random_beacon_dkg_version: 1 +consensus_max_transaction_size_bytes: 262144 +consensus_max_transactions_in_block_bytes: 524288 +consensus_max_num_transactions_in_block: 512 +max_accumulated_txn_cost_per_object_in_narwhal_commit: 40 +max_deferral_rounds_for_congestion_control: 10 +max_txn_cost_overage_per_object_in_commit: 18446744073709551615 +min_checkpoint_interval_ms: 200 +checkpoint_summary_version_specific_data: 1 +max_soft_bundle_size: 5 +bridge_should_try_to_finalize_committee: true +max_accumulated_txn_cost_per_object_in_mysticeti_commit: 18500000 +max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit: 3700000 +gas_budget_based_txn_cost_cap_factor: 400000 +gas_budget_based_txn_cost_absolute_cap_commit_count: 50 + diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_66.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_66.snap new file mode 100644 index 0000000000000..747c437d6bbee --- /dev/null +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_66.snap @@ -0,0 +1,340 @@ +--- +source: crates/sui-protocol-config/src/lib.rs +expression: "ProtocolConfig::get_for_version(cur, *chain_id)" +--- +version: 66 +feature_flags: + package_upgrades: true + commit_root_state_digest: true + advance_epoch_start_time_in_safe_mode: true + loaded_child_objects_fixed: true + missing_type_is_compatibility_error: true + scoring_decision_with_validity_cutoff: true + consensus_order_end_of_epoch_last: true + disallow_adding_abilities_on_upgrade: true + disable_invariant_violation_check_in_swap_loc: true + advance_to_highest_supported_protocol_version: true + ban_entry_init: true + package_digest_hash_module: true + disallow_change_struct_type_params_on_upgrade: true + no_extraneous_module_bytes: true + narwhal_versioned_metadata: true + zklogin_auth: true + consensus_transaction_ordering: ByGasPrice + simplified_unwrap_then_delete: true + upgraded_multisig_supported: true + txn_base_cost_as_multiplier: true + shared_object_deletion: true + narwhal_new_leader_election_schedule: true + loaded_child_object_format: true + enable_jwk_consensus_updates: true + end_of_epoch_transaction_supported: true + simple_conservation_checks: true + loaded_child_object_format_type: true + receive_objects: true + random_beacon: true + bridge: true + enable_effects_v2: true + narwhal_certificate_v2: true + verify_legacy_zklogin_address: true + recompute_has_public_transfer_in_execution: true + accept_zklogin_in_multisig: true + include_consensus_digest_in_prologue: true + hardened_otw_check: true + allow_receiving_object_id: true + enable_poseidon: true + enable_coin_deny_list: true + enable_group_ops_native_functions: true + enable_group_ops_native_function_msm: true + reject_mutable_random_on_entry_functions: true + per_object_congestion_control_mode: TotalTxCount + consensus_choice: Mysticeti + consensus_network: Tonic + zklogin_max_epoch_upper_bound_delta: 30 + mysticeti_leader_scoring_and_schedule: true + reshare_at_same_initial_version: true + resolve_abort_locations_to_package_id: true + mysticeti_use_committed_subdag_digest: true + enable_vdf: true + record_consensus_determined_version_assignments_in_prologue: true + fresh_vm_on_framework_upgrade: true + prepend_prologue_tx_in_consensus_commit_in_checkpoints: true + mysticeti_num_leaders_per_round: 1 + soft_bundle: true + enable_coin_deny_list_v2: true + passkey_auth: true + authority_capabilities_v2: true + rethrow_serialization_type_layout_errors: true + consensus_distributed_vote_scoring_strategy: true + consensus_round_prober: true + validate_identifier_inputs: true + mysticeti_fastpath: true + relocate_event_module: true +max_tx_size_bytes: 131072 +max_input_objects: 2048 +max_size_written_objects: 5000000 +max_size_written_objects_system_tx: 50000000 +max_serialized_tx_effects_size_bytes: 524288 +max_serialized_tx_effects_size_bytes_system_tx: 8388608 +max_gas_payment_objects: 256 +max_modules_in_publish: 64 +max_package_dependencies: 32 +max_arguments: 512 +max_type_arguments: 16 +max_type_argument_depth: 16 +max_pure_argument_size: 16384 +max_programmable_tx_commands: 1024 +move_binary_format_version: 7 +min_move_binary_format_version: 6 +binary_module_handles: 100 +binary_struct_handles: 300 +binary_function_handles: 1500 +binary_function_instantiations: 750 +binary_signatures: 1000 +binary_constant_pool: 4000 +binary_identifiers: 10000 +binary_address_identifiers: 100 +binary_struct_defs: 200 +binary_struct_def_instantiations: 100 +binary_function_defs: 1000 +binary_field_handles: 500 +binary_field_instantiations: 250 +binary_friend_decls: 100 +max_move_object_size: 256000 +max_move_package_size: 102400 +max_publish_or_upgrade_per_ptb: 5 +max_tx_gas: 50000000000 +max_gas_price: 100000 +max_gas_computation_bucket: 5000000 +gas_rounding_step: 1000 +max_loop_depth: 5 +max_generic_instantiation_length: 32 +max_function_parameters: 128 +max_basic_blocks: 1024 +max_value_stack_size: 1024 +max_type_nodes: 256 +max_push_size: 10000 +max_struct_definitions: 200 +max_function_definitions: 1000 +max_fields_in_struct: 32 +max_dependency_depth: 100 +max_num_event_emit: 1024 +max_num_new_move_object_ids: 2048 +max_num_new_move_object_ids_system_tx: 32768 +max_num_deleted_move_object_ids: 2048 +max_num_deleted_move_object_ids_system_tx: 32768 +max_num_transferred_move_object_ids: 2048 +max_num_transferred_move_object_ids_system_tx: 32768 +max_event_emit_size: 256000 +max_event_emit_size_total: 65536000 +max_move_vector_len: 262144 +max_move_identifier_len: 128 +max_move_value_depth: 128 +max_back_edges_per_function: 10000 +max_back_edges_per_module: 10000 +max_verifier_meter_ticks_per_function: 16000000 +max_meter_ticks_per_module: 16000000 +max_meter_ticks_per_package: 16000000 +object_runtime_max_num_cached_objects: 1000 +object_runtime_max_num_cached_objects_system_tx: 16000 +object_runtime_max_num_store_entries: 1000 +object_runtime_max_num_store_entries_system_tx: 16000 +base_tx_cost_fixed: 1000 +package_publish_cost_fixed: 1000 +base_tx_cost_per_byte: 0 +package_publish_cost_per_byte: 80 +obj_access_cost_read_per_byte: 15 +obj_access_cost_mutate_per_byte: 40 +obj_access_cost_delete_per_byte: 40 +obj_access_cost_verify_per_byte: 200 +max_type_to_layout_nodes: 512 +gas_model_version: 8 +obj_data_cost_refundable: 100 +obj_metadata_cost_non_refundable: 50 +storage_rebate_rate: 9900 +storage_fund_reinvest_rate: 500 +reward_slashing_rate: 10000 +storage_gas_price: 76 +max_transactions_per_checkpoint: 10000 +max_checkpoint_size_bytes: 31457280 +buffer_stake_for_protocol_upgrade_bps: 5000 +address_from_bytes_cost_base: 52 +address_to_u256_cost_base: 52 +address_from_u256_cost_base: 52 +config_read_setting_impl_cost_base: 100 +config_read_setting_impl_cost_per_byte: 40 +dynamic_field_hash_type_and_key_cost_base: 100 +dynamic_field_hash_type_and_key_type_cost_per_byte: 2 +dynamic_field_hash_type_and_key_value_cost_per_byte: 2 +dynamic_field_hash_type_and_key_type_tag_cost_per_byte: 2 +dynamic_field_add_child_object_cost_base: 100 +dynamic_field_add_child_object_type_cost_per_byte: 10 +dynamic_field_add_child_object_value_cost_per_byte: 10 +dynamic_field_add_child_object_struct_tag_cost_per_byte: 10 +dynamic_field_borrow_child_object_cost_base: 100 +dynamic_field_borrow_child_object_child_ref_cost_per_byte: 10 +dynamic_field_borrow_child_object_type_cost_per_byte: 10 +dynamic_field_remove_child_object_cost_base: 100 +dynamic_field_remove_child_object_child_cost_per_byte: 2 +dynamic_field_remove_child_object_type_cost_per_byte: 2 +dynamic_field_has_child_object_cost_base: 100 +dynamic_field_has_child_object_with_ty_cost_base: 100 +dynamic_field_has_child_object_with_ty_type_cost_per_byte: 2 +dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: 2 +event_emit_cost_base: 52 +event_emit_value_size_derivation_cost_per_byte: 2 +event_emit_tag_size_derivation_cost_per_byte: 5 +event_emit_output_cost_per_byte: 10 +object_borrow_uid_cost_base: 52 +object_delete_impl_cost_base: 52 +object_record_new_uid_cost_base: 52 +transfer_transfer_internal_cost_base: 52 +transfer_freeze_object_cost_base: 52 +transfer_share_object_cost_base: 52 +transfer_receive_object_cost_base: 52 +tx_context_derive_id_cost_base: 52 +types_is_one_time_witness_cost_base: 52 +types_is_one_time_witness_type_tag_cost_per_byte: 2 +types_is_one_time_witness_type_cost_per_byte: 2 +validator_validate_metadata_cost_base: 52 +validator_validate_metadata_data_cost_per_byte: 2 +crypto_invalid_arguments_cost: 100 +bls12381_bls12381_min_sig_verify_cost_base: 52 +bls12381_bls12381_min_sig_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_sig_verify_msg_cost_per_block: 2 +bls12381_bls12381_min_pk_verify_cost_base: 52 +bls12381_bls12381_min_pk_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_pk_verify_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_keccak256_cost_base: 52 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_sha256_cost_base: 52 +ecdsa_k1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_k1_decompress_pubkey_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_k1_secp256k1_verify_sha256_cost_base: 52 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_keccak256_cost_base: 52 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_sha256_cost_base: 52 +ecdsa_r1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_keccak256_cost_base: 52 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_sha256_cost_base: 52 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_block: 2 +ecvrf_ecvrf_verify_cost_base: 52 +ecvrf_ecvrf_verify_alpha_string_cost_per_byte: 2 +ecvrf_ecvrf_verify_alpha_string_cost_per_block: 2 +ed25519_ed25519_verify_cost_base: 52 +ed25519_ed25519_verify_msg_cost_per_byte: 2 +ed25519_ed25519_verify_msg_cost_per_block: 2 +groth16_prepare_verifying_key_bls12381_cost_base: 52 +groth16_prepare_verifying_key_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bn254_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_public_input_cost_per_byte: 2 +hash_blake2b256_cost_base: 52 +hash_blake2b256_data_cost_per_byte: 2 +hash_blake2b256_data_cost_per_block: 2 +hash_keccak256_cost_base: 52 +hash_keccak256_data_cost_per_byte: 2 +hash_keccak256_data_cost_per_block: 2 +poseidon_bn254_cost_base: 260 +poseidon_bn254_cost_per_block: 10 +group_ops_bls12381_decode_scalar_cost: 52 +group_ops_bls12381_decode_g1_cost: 52 +group_ops_bls12381_decode_g2_cost: 52 +group_ops_bls12381_decode_gt_cost: 52 +group_ops_bls12381_scalar_add_cost: 52 +group_ops_bls12381_g1_add_cost: 52 +group_ops_bls12381_g2_add_cost: 52 +group_ops_bls12381_gt_add_cost: 52 +group_ops_bls12381_scalar_sub_cost: 52 +group_ops_bls12381_g1_sub_cost: 52 +group_ops_bls12381_g2_sub_cost: 52 +group_ops_bls12381_gt_sub_cost: 52 +group_ops_bls12381_scalar_mul_cost: 52 +group_ops_bls12381_g1_mul_cost: 52 +group_ops_bls12381_g2_mul_cost: 52 +group_ops_bls12381_gt_mul_cost: 52 +group_ops_bls12381_scalar_div_cost: 52 +group_ops_bls12381_g1_div_cost: 52 +group_ops_bls12381_g2_div_cost: 52 +group_ops_bls12381_gt_div_cost: 52 +group_ops_bls12381_g1_hash_to_base_cost: 52 +group_ops_bls12381_g2_hash_to_base_cost: 52 +group_ops_bls12381_g1_hash_to_cost_per_byte: 2 +group_ops_bls12381_g2_hash_to_cost_per_byte: 2 +group_ops_bls12381_g1_msm_base_cost: 52 +group_ops_bls12381_g2_msm_base_cost: 52 +group_ops_bls12381_g1_msm_base_cost_per_input: 52 +group_ops_bls12381_g2_msm_base_cost_per_input: 52 +group_ops_bls12381_msm_max_len: 32 +group_ops_bls12381_pairing_cost: 52 +hmac_hmac_sha3_256_cost_base: 52 +hmac_hmac_sha3_256_input_cost_per_byte: 2 +hmac_hmac_sha3_256_input_cost_per_block: 2 +check_zklogin_id_cost_base: 200 +check_zklogin_issuer_cost_base: 200 +vdf_verify_vdf_cost: 1500 +vdf_hash_to_input_cost: 100 +bcs_per_byte_serialized_cost: 2 +bcs_legacy_min_output_size_cost: 1 +bcs_failure_cost: 52 +hash_sha2_256_base_cost: 52 +hash_sha2_256_per_byte_cost: 2 +hash_sha2_256_legacy_min_input_len_cost: 1 +hash_sha3_256_base_cost: 52 +hash_sha3_256_per_byte_cost: 2 +hash_sha3_256_legacy_min_input_len_cost: 1 +type_name_get_base_cost: 52 +type_name_get_per_byte_cost: 2 +string_check_utf8_base_cost: 52 +string_check_utf8_per_byte_cost: 2 +string_is_char_boundary_base_cost: 52 +string_sub_string_base_cost: 52 +string_sub_string_per_byte_cost: 2 +string_index_of_base_cost: 52 +string_index_of_per_byte_pattern_cost: 2 +string_index_of_per_byte_searched_cost: 2 +vector_empty_base_cost: 52 +vector_length_base_cost: 52 +vector_push_back_base_cost: 52 +vector_push_back_legacy_per_abstract_memory_unit_cost: 2 +vector_borrow_base_cost: 52 +vector_pop_back_base_cost: 52 +vector_destroy_empty_base_cost: 52 +vector_swap_base_cost: 52 +debug_print_base_cost: 52 +debug_print_stack_trace_base_cost: 52 +execution_version: 3 +consensus_bad_nodes_stake_threshold: 20 +max_jwk_votes_per_validator_per_epoch: 240 +max_age_of_jwk_in_epochs: 1 +random_beacon_reduction_allowed_delta: 800 +random_beacon_reduction_lower_bound: 700 +random_beacon_dkg_timeout_round: 3000 +random_beacon_min_round_interval_ms: 500 +random_beacon_dkg_version: 1 +consensus_max_transaction_size_bytes: 262144 +consensus_max_transactions_in_block_bytes: 524288 +consensus_max_num_transactions_in_block: 512 +max_accumulated_txn_cost_per_object_in_narwhal_commit: 40 +max_deferral_rounds_for_congestion_control: 10 +min_checkpoint_interval_ms: 200 +checkpoint_summary_version_specific_data: 1 +max_soft_bundle_size: 5 +bridge_should_try_to_finalize_committee: true +max_accumulated_txn_cost_per_object_in_mysticeti_commit: 3 +gas_budget_based_txn_cost_cap_factor: 400000 + diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_67.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_67.snap new file mode 100644 index 0000000000000..f7710f5f46b34 --- /dev/null +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_67.snap @@ -0,0 +1,340 @@ +--- +source: crates/sui-protocol-config/src/lib.rs +expression: "ProtocolConfig::get_for_version(cur, *chain_id)" +--- +version: 67 +feature_flags: + package_upgrades: true + commit_root_state_digest: true + advance_epoch_start_time_in_safe_mode: true + loaded_child_objects_fixed: true + missing_type_is_compatibility_error: true + scoring_decision_with_validity_cutoff: true + consensus_order_end_of_epoch_last: true + disallow_adding_abilities_on_upgrade: true + disable_invariant_violation_check_in_swap_loc: true + advance_to_highest_supported_protocol_version: true + ban_entry_init: true + package_digest_hash_module: true + disallow_change_struct_type_params_on_upgrade: true + no_extraneous_module_bytes: true + narwhal_versioned_metadata: true + zklogin_auth: true + consensus_transaction_ordering: ByGasPrice + simplified_unwrap_then_delete: true + upgraded_multisig_supported: true + txn_base_cost_as_multiplier: true + shared_object_deletion: true + narwhal_new_leader_election_schedule: true + loaded_child_object_format: true + enable_jwk_consensus_updates: true + end_of_epoch_transaction_supported: true + simple_conservation_checks: true + loaded_child_object_format_type: true + receive_objects: true + random_beacon: true + bridge: true + enable_effects_v2: true + narwhal_certificate_v2: true + verify_legacy_zklogin_address: true + recompute_has_public_transfer_in_execution: true + accept_zklogin_in_multisig: true + include_consensus_digest_in_prologue: true + hardened_otw_check: true + allow_receiving_object_id: true + enable_poseidon: true + enable_coin_deny_list: true + enable_group_ops_native_functions: true + enable_group_ops_native_function_msm: true + reject_mutable_random_on_entry_functions: true + per_object_congestion_control_mode: TotalTxCount + consensus_choice: Mysticeti + consensus_network: Tonic + zklogin_max_epoch_upper_bound_delta: 30 + mysticeti_leader_scoring_and_schedule: true + reshare_at_same_initial_version: true + resolve_abort_locations_to_package_id: true + mysticeti_use_committed_subdag_digest: true + enable_vdf: true + record_consensus_determined_version_assignments_in_prologue: true + fresh_vm_on_framework_upgrade: true + prepend_prologue_tx_in_consensus_commit_in_checkpoints: true + mysticeti_num_leaders_per_round: 1 + soft_bundle: true + enable_coin_deny_list_v2: true + passkey_auth: true + authority_capabilities_v2: true + rethrow_serialization_type_layout_errors: true + consensus_distributed_vote_scoring_strategy: true + consensus_round_prober: true + validate_identifier_inputs: true + mysticeti_fastpath: true + relocate_event_module: true +max_tx_size_bytes: 131072 +max_input_objects: 2048 +max_size_written_objects: 5000000 +max_size_written_objects_system_tx: 50000000 +max_serialized_tx_effects_size_bytes: 524288 +max_serialized_tx_effects_size_bytes_system_tx: 8388608 +max_gas_payment_objects: 256 +max_modules_in_publish: 64 +max_package_dependencies: 32 +max_arguments: 512 +max_type_arguments: 16 +max_type_argument_depth: 16 +max_pure_argument_size: 16384 +max_programmable_tx_commands: 1024 +move_binary_format_version: 7 +min_move_binary_format_version: 6 +binary_module_handles: 100 +binary_struct_handles: 300 +binary_function_handles: 1500 +binary_function_instantiations: 750 +binary_signatures: 1000 +binary_constant_pool: 4000 +binary_identifiers: 10000 +binary_address_identifiers: 100 +binary_struct_defs: 200 +binary_struct_def_instantiations: 100 +binary_function_defs: 1000 +binary_field_handles: 500 +binary_field_instantiations: 250 +binary_friend_decls: 100 +max_move_object_size: 256000 +max_move_package_size: 102400 +max_publish_or_upgrade_per_ptb: 5 +max_tx_gas: 50000000000 +max_gas_price: 100000 +max_gas_computation_bucket: 5000000 +gas_rounding_step: 1000 +max_loop_depth: 5 +max_generic_instantiation_length: 32 +max_function_parameters: 128 +max_basic_blocks: 1024 +max_value_stack_size: 1024 +max_type_nodes: 256 +max_push_size: 10000 +max_struct_definitions: 200 +max_function_definitions: 1000 +max_fields_in_struct: 32 +max_dependency_depth: 100 +max_num_event_emit: 1024 +max_num_new_move_object_ids: 2048 +max_num_new_move_object_ids_system_tx: 32768 +max_num_deleted_move_object_ids: 2048 +max_num_deleted_move_object_ids_system_tx: 32768 +max_num_transferred_move_object_ids: 2048 +max_num_transferred_move_object_ids_system_tx: 32768 +max_event_emit_size: 256000 +max_event_emit_size_total: 65536000 +max_move_vector_len: 262144 +max_move_identifier_len: 128 +max_move_value_depth: 128 +max_back_edges_per_function: 10000 +max_back_edges_per_module: 10000 +max_verifier_meter_ticks_per_function: 16000000 +max_meter_ticks_per_module: 16000000 +max_meter_ticks_per_package: 16000000 +object_runtime_max_num_cached_objects: 1000 +object_runtime_max_num_cached_objects_system_tx: 16000 +object_runtime_max_num_store_entries: 1000 +object_runtime_max_num_store_entries_system_tx: 16000 +base_tx_cost_fixed: 1000 +package_publish_cost_fixed: 1000 +base_tx_cost_per_byte: 0 +package_publish_cost_per_byte: 80 +obj_access_cost_read_per_byte: 15 +obj_access_cost_mutate_per_byte: 40 +obj_access_cost_delete_per_byte: 40 +obj_access_cost_verify_per_byte: 200 +max_type_to_layout_nodes: 512 +gas_model_version: 8 +obj_data_cost_refundable: 100 +obj_metadata_cost_non_refundable: 50 +storage_rebate_rate: 9900 +storage_fund_reinvest_rate: 500 +reward_slashing_rate: 10000 +storage_gas_price: 76 +max_transactions_per_checkpoint: 10000 +max_checkpoint_size_bytes: 31457280 +buffer_stake_for_protocol_upgrade_bps: 5000 +address_from_bytes_cost_base: 52 +address_to_u256_cost_base: 52 +address_from_u256_cost_base: 52 +config_read_setting_impl_cost_base: 100 +config_read_setting_impl_cost_per_byte: 40 +dynamic_field_hash_type_and_key_cost_base: 100 +dynamic_field_hash_type_and_key_type_cost_per_byte: 2 +dynamic_field_hash_type_and_key_value_cost_per_byte: 2 +dynamic_field_hash_type_and_key_type_tag_cost_per_byte: 2 +dynamic_field_add_child_object_cost_base: 100 +dynamic_field_add_child_object_type_cost_per_byte: 10 +dynamic_field_add_child_object_value_cost_per_byte: 10 +dynamic_field_add_child_object_struct_tag_cost_per_byte: 10 +dynamic_field_borrow_child_object_cost_base: 100 +dynamic_field_borrow_child_object_child_ref_cost_per_byte: 10 +dynamic_field_borrow_child_object_type_cost_per_byte: 10 +dynamic_field_remove_child_object_cost_base: 100 +dynamic_field_remove_child_object_child_cost_per_byte: 2 +dynamic_field_remove_child_object_type_cost_per_byte: 2 +dynamic_field_has_child_object_cost_base: 100 +dynamic_field_has_child_object_with_ty_cost_base: 100 +dynamic_field_has_child_object_with_ty_type_cost_per_byte: 2 +dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: 2 +event_emit_cost_base: 52 +event_emit_value_size_derivation_cost_per_byte: 2 +event_emit_tag_size_derivation_cost_per_byte: 5 +event_emit_output_cost_per_byte: 10 +object_borrow_uid_cost_base: 52 +object_delete_impl_cost_base: 52 +object_record_new_uid_cost_base: 52 +transfer_transfer_internal_cost_base: 52 +transfer_freeze_object_cost_base: 52 +transfer_share_object_cost_base: 52 +transfer_receive_object_cost_base: 52 +tx_context_derive_id_cost_base: 52 +types_is_one_time_witness_cost_base: 52 +types_is_one_time_witness_type_tag_cost_per_byte: 2 +types_is_one_time_witness_type_cost_per_byte: 2 +validator_validate_metadata_cost_base: 52 +validator_validate_metadata_data_cost_per_byte: 2 +crypto_invalid_arguments_cost: 100 +bls12381_bls12381_min_sig_verify_cost_base: 52 +bls12381_bls12381_min_sig_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_sig_verify_msg_cost_per_block: 2 +bls12381_bls12381_min_pk_verify_cost_base: 52 +bls12381_bls12381_min_pk_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_pk_verify_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_keccak256_cost_base: 52 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_sha256_cost_base: 52 +ecdsa_k1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_k1_decompress_pubkey_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_k1_secp256k1_verify_sha256_cost_base: 52 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_keccak256_cost_base: 52 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_sha256_cost_base: 52 +ecdsa_r1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_keccak256_cost_base: 52 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_sha256_cost_base: 52 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_block: 2 +ecvrf_ecvrf_verify_cost_base: 52 +ecvrf_ecvrf_verify_alpha_string_cost_per_byte: 2 +ecvrf_ecvrf_verify_alpha_string_cost_per_block: 2 +ed25519_ed25519_verify_cost_base: 52 +ed25519_ed25519_verify_msg_cost_per_byte: 2 +ed25519_ed25519_verify_msg_cost_per_block: 2 +groth16_prepare_verifying_key_bls12381_cost_base: 52 +groth16_prepare_verifying_key_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bn254_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_public_input_cost_per_byte: 2 +hash_blake2b256_cost_base: 52 +hash_blake2b256_data_cost_per_byte: 2 +hash_blake2b256_data_cost_per_block: 2 +hash_keccak256_cost_base: 52 +hash_keccak256_data_cost_per_byte: 2 +hash_keccak256_data_cost_per_block: 2 +poseidon_bn254_cost_base: 260 +poseidon_bn254_cost_per_block: 10 +group_ops_bls12381_decode_scalar_cost: 52 +group_ops_bls12381_decode_g1_cost: 52 +group_ops_bls12381_decode_g2_cost: 52 +group_ops_bls12381_decode_gt_cost: 52 +group_ops_bls12381_scalar_add_cost: 52 +group_ops_bls12381_g1_add_cost: 52 +group_ops_bls12381_g2_add_cost: 52 +group_ops_bls12381_gt_add_cost: 52 +group_ops_bls12381_scalar_sub_cost: 52 +group_ops_bls12381_g1_sub_cost: 52 +group_ops_bls12381_g2_sub_cost: 52 +group_ops_bls12381_gt_sub_cost: 52 +group_ops_bls12381_scalar_mul_cost: 52 +group_ops_bls12381_g1_mul_cost: 52 +group_ops_bls12381_g2_mul_cost: 52 +group_ops_bls12381_gt_mul_cost: 52 +group_ops_bls12381_scalar_div_cost: 52 +group_ops_bls12381_g1_div_cost: 52 +group_ops_bls12381_g2_div_cost: 52 +group_ops_bls12381_gt_div_cost: 52 +group_ops_bls12381_g1_hash_to_base_cost: 52 +group_ops_bls12381_g2_hash_to_base_cost: 52 +group_ops_bls12381_g1_hash_to_cost_per_byte: 2 +group_ops_bls12381_g2_hash_to_cost_per_byte: 2 +group_ops_bls12381_g1_msm_base_cost: 52 +group_ops_bls12381_g2_msm_base_cost: 52 +group_ops_bls12381_g1_msm_base_cost_per_input: 52 +group_ops_bls12381_g2_msm_base_cost_per_input: 52 +group_ops_bls12381_msm_max_len: 32 +group_ops_bls12381_pairing_cost: 52 +hmac_hmac_sha3_256_cost_base: 52 +hmac_hmac_sha3_256_input_cost_per_byte: 2 +hmac_hmac_sha3_256_input_cost_per_block: 2 +check_zklogin_id_cost_base: 200 +check_zklogin_issuer_cost_base: 200 +vdf_verify_vdf_cost: 1500 +vdf_hash_to_input_cost: 100 +bcs_per_byte_serialized_cost: 2 +bcs_legacy_min_output_size_cost: 1 +bcs_failure_cost: 52 +hash_sha2_256_base_cost: 52 +hash_sha2_256_per_byte_cost: 2 +hash_sha2_256_legacy_min_input_len_cost: 1 +hash_sha3_256_base_cost: 52 +hash_sha3_256_per_byte_cost: 2 +hash_sha3_256_legacy_min_input_len_cost: 1 +type_name_get_base_cost: 52 +type_name_get_per_byte_cost: 2 +string_check_utf8_base_cost: 52 +string_check_utf8_per_byte_cost: 2 +string_is_char_boundary_base_cost: 52 +string_sub_string_base_cost: 52 +string_sub_string_per_byte_cost: 2 +string_index_of_base_cost: 52 +string_index_of_per_byte_pattern_cost: 2 +string_index_of_per_byte_searched_cost: 2 +vector_empty_base_cost: 52 +vector_length_base_cost: 52 +vector_push_back_base_cost: 52 +vector_push_back_legacy_per_abstract_memory_unit_cost: 2 +vector_borrow_base_cost: 52 +vector_pop_back_base_cost: 52 +vector_destroy_empty_base_cost: 52 +vector_swap_base_cost: 52 +debug_print_base_cost: 52 +debug_print_stack_trace_base_cost: 52 +execution_version: 3 +consensus_bad_nodes_stake_threshold: 20 +max_jwk_votes_per_validator_per_epoch: 240 +max_age_of_jwk_in_epochs: 1 +random_beacon_reduction_allowed_delta: 800 +random_beacon_reduction_lower_bound: 700 +random_beacon_dkg_timeout_round: 3000 +random_beacon_min_round_interval_ms: 500 +random_beacon_dkg_version: 1 +consensus_max_transaction_size_bytes: 262144 +consensus_max_transactions_in_block_bytes: 524288 +consensus_max_num_transactions_in_block: 512 +max_accumulated_txn_cost_per_object_in_narwhal_commit: 40 +max_deferral_rounds_for_congestion_control: 10 +min_checkpoint_interval_ms: 200 +checkpoint_summary_version_specific_data: 1 +max_soft_bundle_size: 5 +bridge_should_try_to_finalize_committee: true +max_accumulated_txn_cost_per_object_in_mysticeti_commit: 3 +gas_budget_based_txn_cost_cap_factor: 400000 + diff --git a/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_68.snap b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_68.snap new file mode 100644 index 0000000000000..68e55955524b4 --- /dev/null +++ b/crates/sui-protocol-config/src/snapshots/sui_protocol_config__test__version_68.snap @@ -0,0 +1,350 @@ +--- +source: crates/sui-protocol-config/src/lib.rs +expression: "ProtocolConfig::get_for_version(cur, *chain_id)" +--- +version: 68 +feature_flags: + package_upgrades: true + commit_root_state_digest: true + advance_epoch_start_time_in_safe_mode: true + loaded_child_objects_fixed: true + missing_type_is_compatibility_error: true + scoring_decision_with_validity_cutoff: true + consensus_order_end_of_epoch_last: true + disallow_adding_abilities_on_upgrade: true + disable_invariant_violation_check_in_swap_loc: true + advance_to_highest_supported_protocol_version: true + ban_entry_init: true + package_digest_hash_module: true + disallow_change_struct_type_params_on_upgrade: true + no_extraneous_module_bytes: true + narwhal_versioned_metadata: true + zklogin_auth: true + consensus_transaction_ordering: ByGasPrice + simplified_unwrap_then_delete: true + upgraded_multisig_supported: true + txn_base_cost_as_multiplier: true + shared_object_deletion: true + narwhal_new_leader_election_schedule: true + loaded_child_object_format: true + enable_jwk_consensus_updates: true + end_of_epoch_transaction_supported: true + simple_conservation_checks: true + loaded_child_object_format_type: true + receive_objects: true + random_beacon: true + bridge: true + enable_effects_v2: true + narwhal_certificate_v2: true + verify_legacy_zklogin_address: true + recompute_has_public_transfer_in_execution: true + accept_zklogin_in_multisig: true + include_consensus_digest_in_prologue: true + hardened_otw_check: true + allow_receiving_object_id: true + enable_poseidon: true + enable_coin_deny_list: true + enable_group_ops_native_functions: true + enable_group_ops_native_function_msm: true + reject_mutable_random_on_entry_functions: true + per_object_congestion_control_mode: TotalGasBudgetWithCap + consensus_choice: Mysticeti + consensus_network: Tonic + zklogin_max_epoch_upper_bound_delta: 30 + mysticeti_leader_scoring_and_schedule: true + reshare_at_same_initial_version: true + resolve_abort_locations_to_package_id: true + mysticeti_use_committed_subdag_digest: true + enable_vdf: true + record_consensus_determined_version_assignments_in_prologue: true + fresh_vm_on_framework_upgrade: true + prepend_prologue_tx_in_consensus_commit_in_checkpoints: true + mysticeti_num_leaders_per_round: 1 + soft_bundle: true + enable_coin_deny_list_v2: true + passkey_auth: true + authority_capabilities_v2: true + rethrow_serialization_type_layout_errors: true + consensus_distributed_vote_scoring_strategy: true + consensus_round_prober: true + validate_identifier_inputs: true + mysticeti_fastpath: true + relocate_event_module: true + uncompressed_g1_group_elements: true + disallow_new_modules_in_deps_only_packages: true +max_tx_size_bytes: 131072 +max_input_objects: 2048 +max_size_written_objects: 5000000 +max_size_written_objects_system_tx: 50000000 +max_serialized_tx_effects_size_bytes: 524288 +max_serialized_tx_effects_size_bytes_system_tx: 8388608 +max_gas_payment_objects: 256 +max_modules_in_publish: 64 +max_package_dependencies: 32 +max_arguments: 512 +max_type_arguments: 16 +max_type_argument_depth: 16 +max_pure_argument_size: 16384 +max_programmable_tx_commands: 1024 +move_binary_format_version: 7 +min_move_binary_format_version: 6 +binary_module_handles: 100 +binary_struct_handles: 300 +binary_function_handles: 1500 +binary_function_instantiations: 750 +binary_signatures: 1000 +binary_constant_pool: 4000 +binary_identifiers: 10000 +binary_address_identifiers: 100 +binary_struct_defs: 200 +binary_struct_def_instantiations: 100 +binary_function_defs: 1000 +binary_field_handles: 500 +binary_field_instantiations: 250 +binary_friend_decls: 100 +max_move_object_size: 256000 +max_move_package_size: 102400 +max_publish_or_upgrade_per_ptb: 5 +max_tx_gas: 50000000000 +max_gas_price: 100000 +max_gas_computation_bucket: 5000000 +gas_rounding_step: 1000 +max_loop_depth: 5 +max_generic_instantiation_length: 32 +max_function_parameters: 128 +max_basic_blocks: 1024 +max_value_stack_size: 1024 +max_type_nodes: 256 +max_push_size: 10000 +max_struct_definitions: 200 +max_function_definitions: 1000 +max_fields_in_struct: 32 +max_dependency_depth: 100 +max_num_event_emit: 1024 +max_num_new_move_object_ids: 2048 +max_num_new_move_object_ids_system_tx: 32768 +max_num_deleted_move_object_ids: 2048 +max_num_deleted_move_object_ids_system_tx: 32768 +max_num_transferred_move_object_ids: 2048 +max_num_transferred_move_object_ids_system_tx: 32768 +max_event_emit_size: 256000 +max_event_emit_size_total: 65536000 +max_move_vector_len: 262144 +max_move_identifier_len: 128 +max_move_value_depth: 128 +max_back_edges_per_function: 10000 +max_back_edges_per_module: 10000 +max_verifier_meter_ticks_per_function: 16000000 +max_meter_ticks_per_module: 16000000 +max_meter_ticks_per_package: 16000000 +object_runtime_max_num_cached_objects: 1000 +object_runtime_max_num_cached_objects_system_tx: 16000 +object_runtime_max_num_store_entries: 1000 +object_runtime_max_num_store_entries_system_tx: 16000 +base_tx_cost_fixed: 1000 +package_publish_cost_fixed: 1000 +base_tx_cost_per_byte: 0 +package_publish_cost_per_byte: 80 +obj_access_cost_read_per_byte: 15 +obj_access_cost_mutate_per_byte: 40 +obj_access_cost_delete_per_byte: 40 +obj_access_cost_verify_per_byte: 200 +max_type_to_layout_nodes: 512 +gas_model_version: 8 +obj_data_cost_refundable: 100 +obj_metadata_cost_non_refundable: 50 +storage_rebate_rate: 9900 +storage_fund_reinvest_rate: 500 +reward_slashing_rate: 10000 +storage_gas_price: 76 +max_transactions_per_checkpoint: 10000 +max_checkpoint_size_bytes: 31457280 +buffer_stake_for_protocol_upgrade_bps: 5000 +address_from_bytes_cost_base: 52 +address_to_u256_cost_base: 52 +address_from_u256_cost_base: 52 +config_read_setting_impl_cost_base: 100 +config_read_setting_impl_cost_per_byte: 40 +dynamic_field_hash_type_and_key_cost_base: 100 +dynamic_field_hash_type_and_key_type_cost_per_byte: 2 +dynamic_field_hash_type_and_key_value_cost_per_byte: 2 +dynamic_field_hash_type_and_key_type_tag_cost_per_byte: 2 +dynamic_field_add_child_object_cost_base: 100 +dynamic_field_add_child_object_type_cost_per_byte: 10 +dynamic_field_add_child_object_value_cost_per_byte: 10 +dynamic_field_add_child_object_struct_tag_cost_per_byte: 10 +dynamic_field_borrow_child_object_cost_base: 100 +dynamic_field_borrow_child_object_child_ref_cost_per_byte: 10 +dynamic_field_borrow_child_object_type_cost_per_byte: 10 +dynamic_field_remove_child_object_cost_base: 100 +dynamic_field_remove_child_object_child_cost_per_byte: 2 +dynamic_field_remove_child_object_type_cost_per_byte: 2 +dynamic_field_has_child_object_cost_base: 100 +dynamic_field_has_child_object_with_ty_cost_base: 100 +dynamic_field_has_child_object_with_ty_type_cost_per_byte: 2 +dynamic_field_has_child_object_with_ty_type_tag_cost_per_byte: 2 +event_emit_cost_base: 52 +event_emit_value_size_derivation_cost_per_byte: 2 +event_emit_tag_size_derivation_cost_per_byte: 5 +event_emit_output_cost_per_byte: 10 +object_borrow_uid_cost_base: 52 +object_delete_impl_cost_base: 52 +object_record_new_uid_cost_base: 52 +transfer_transfer_internal_cost_base: 52 +transfer_freeze_object_cost_base: 52 +transfer_share_object_cost_base: 52 +transfer_receive_object_cost_base: 52 +tx_context_derive_id_cost_base: 52 +types_is_one_time_witness_cost_base: 52 +types_is_one_time_witness_type_tag_cost_per_byte: 2 +types_is_one_time_witness_type_cost_per_byte: 2 +validator_validate_metadata_cost_base: 52 +validator_validate_metadata_data_cost_per_byte: 2 +crypto_invalid_arguments_cost: 100 +bls12381_bls12381_min_sig_verify_cost_base: 52 +bls12381_bls12381_min_sig_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_sig_verify_msg_cost_per_block: 2 +bls12381_bls12381_min_pk_verify_cost_base: 52 +bls12381_bls12381_min_pk_verify_msg_cost_per_byte: 2 +bls12381_bls12381_min_pk_verify_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_keccak256_cost_base: 52 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_k1_ecrecover_sha256_cost_base: 52 +ecdsa_k1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_k1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_k1_decompress_pubkey_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_cost_base: 52 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_k1_secp256k1_verify_sha256_cost_base: 52 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_k1_secp256k1_verify_sha256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_keccak256_cost_base: 52 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_keccak256_msg_cost_per_block: 2 +ecdsa_r1_ecrecover_sha256_cost_base: 52 +ecdsa_r1_ecrecover_sha256_msg_cost_per_byte: 2 +ecdsa_r1_ecrecover_sha256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_keccak256_cost_base: 52 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_keccak256_msg_cost_per_block: 2 +ecdsa_r1_secp256r1_verify_sha256_cost_base: 52 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_byte: 2 +ecdsa_r1_secp256r1_verify_sha256_msg_cost_per_block: 2 +ecvrf_ecvrf_verify_cost_base: 52 +ecvrf_ecvrf_verify_alpha_string_cost_per_byte: 2 +ecvrf_ecvrf_verify_alpha_string_cost_per_block: 2 +ed25519_ed25519_verify_cost_base: 52 +ed25519_ed25519_verify_msg_cost_per_byte: 2 +ed25519_ed25519_verify_msg_cost_per_block: 2 +groth16_prepare_verifying_key_bls12381_cost_base: 52 +groth16_prepare_verifying_key_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_base: 52 +groth16_verify_groth16_proof_internal_bls12381_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_bn254_cost_base: 52 +groth16_verify_groth16_proof_internal_bn254_cost_per_public_input: 2 +groth16_verify_groth16_proof_internal_public_input_cost_per_byte: 2 +hash_blake2b256_cost_base: 52 +hash_blake2b256_data_cost_per_byte: 2 +hash_blake2b256_data_cost_per_block: 2 +hash_keccak256_cost_base: 52 +hash_keccak256_data_cost_per_byte: 2 +hash_keccak256_data_cost_per_block: 2 +poseidon_bn254_cost_base: 260 +poseidon_bn254_cost_per_block: 10 +group_ops_bls12381_decode_scalar_cost: 52 +group_ops_bls12381_decode_g1_cost: 52 +group_ops_bls12381_decode_g2_cost: 52 +group_ops_bls12381_decode_gt_cost: 52 +group_ops_bls12381_scalar_add_cost: 52 +group_ops_bls12381_g1_add_cost: 52 +group_ops_bls12381_g2_add_cost: 52 +group_ops_bls12381_gt_add_cost: 52 +group_ops_bls12381_scalar_sub_cost: 52 +group_ops_bls12381_g1_sub_cost: 52 +group_ops_bls12381_g2_sub_cost: 52 +group_ops_bls12381_gt_sub_cost: 52 +group_ops_bls12381_scalar_mul_cost: 52 +group_ops_bls12381_g1_mul_cost: 52 +group_ops_bls12381_g2_mul_cost: 52 +group_ops_bls12381_gt_mul_cost: 52 +group_ops_bls12381_scalar_div_cost: 52 +group_ops_bls12381_g1_div_cost: 52 +group_ops_bls12381_g2_div_cost: 52 +group_ops_bls12381_gt_div_cost: 52 +group_ops_bls12381_g1_hash_to_base_cost: 52 +group_ops_bls12381_g2_hash_to_base_cost: 52 +group_ops_bls12381_g1_hash_to_cost_per_byte: 2 +group_ops_bls12381_g2_hash_to_cost_per_byte: 2 +group_ops_bls12381_g1_msm_base_cost: 52 +group_ops_bls12381_g2_msm_base_cost: 52 +group_ops_bls12381_g1_msm_base_cost_per_input: 52 +group_ops_bls12381_g2_msm_base_cost_per_input: 52 +group_ops_bls12381_msm_max_len: 32 +group_ops_bls12381_pairing_cost: 52 +group_ops_bls12381_g1_to_uncompressed_g1_cost: 26 +group_ops_bls12381_uncompressed_g1_to_g1_cost: 52 +group_ops_bls12381_uncompressed_g1_sum_base_cost: 26 +group_ops_bls12381_uncompressed_g1_sum_cost_per_term: 13 +group_ops_bls12381_uncompressed_g1_sum_max_terms: 2000 +hmac_hmac_sha3_256_cost_base: 52 +hmac_hmac_sha3_256_input_cost_per_byte: 2 +hmac_hmac_sha3_256_input_cost_per_block: 2 +check_zklogin_id_cost_base: 200 +check_zklogin_issuer_cost_base: 200 +vdf_verify_vdf_cost: 1500 +vdf_hash_to_input_cost: 100 +bcs_per_byte_serialized_cost: 2 +bcs_legacy_min_output_size_cost: 1 +bcs_failure_cost: 52 +hash_sha2_256_base_cost: 52 +hash_sha2_256_per_byte_cost: 2 +hash_sha2_256_legacy_min_input_len_cost: 1 +hash_sha3_256_base_cost: 52 +hash_sha3_256_per_byte_cost: 2 +hash_sha3_256_legacy_min_input_len_cost: 1 +type_name_get_base_cost: 52 +type_name_get_per_byte_cost: 2 +string_check_utf8_base_cost: 52 +string_check_utf8_per_byte_cost: 2 +string_is_char_boundary_base_cost: 52 +string_sub_string_base_cost: 52 +string_sub_string_per_byte_cost: 2 +string_index_of_base_cost: 52 +string_index_of_per_byte_pattern_cost: 2 +string_index_of_per_byte_searched_cost: 2 +vector_empty_base_cost: 52 +vector_length_base_cost: 52 +vector_push_back_base_cost: 52 +vector_push_back_legacy_per_abstract_memory_unit_cost: 2 +vector_borrow_base_cost: 52 +vector_pop_back_base_cost: 52 +vector_destroy_empty_base_cost: 52 +vector_swap_base_cost: 52 +debug_print_base_cost: 52 +debug_print_stack_trace_base_cost: 52 +execution_version: 3 +consensus_bad_nodes_stake_threshold: 20 +max_jwk_votes_per_validator_per_epoch: 240 +max_age_of_jwk_in_epochs: 1 +random_beacon_reduction_allowed_delta: 800 +random_beacon_reduction_lower_bound: 500 +random_beacon_dkg_timeout_round: 3000 +random_beacon_min_round_interval_ms: 500 +random_beacon_dkg_version: 1 +consensus_max_transaction_size_bytes: 262144 +consensus_max_transactions_in_block_bytes: 524288 +consensus_max_num_transactions_in_block: 512 +max_accumulated_txn_cost_per_object_in_narwhal_commit: 40 +max_deferral_rounds_for_congestion_control: 10 +max_txn_cost_overage_per_object_in_commit: 18446744073709551615 +min_checkpoint_interval_ms: 200 +checkpoint_summary_version_specific_data: 1 +max_soft_bundle_size: 5 +bridge_should_try_to_finalize_committee: true +max_accumulated_txn_cost_per_object_in_mysticeti_commit: 18500000 +max_accumulated_randomness_txn_cost_per_object_in_mysticeti_commit: 3700000 +gas_budget_based_txn_cost_cap_factor: 400000 +gas_budget_based_txn_cost_absolute_cap_commit_count: 50 + diff --git a/crates/sui-proxy/src/peers.rs b/crates/sui-proxy/src/peers.rs index b139865f3a4fa..aec70efa490df 100644 --- a/crates/sui-proxy/src/peers.rs +++ b/crates/sui-proxy/src/peers.rs @@ -404,14 +404,22 @@ async fn extract_bridge( } }; // Parse the URL - let mut bridge_url = match Url::parse(&url_str) { + let bridge_url = match Url::parse(&url_str) { Ok(url) => url, Err(_) => { warn!(url_str, "Unable to parse http_rest_url"); return None; } }; - bridge_url.set_path("/metrics_pub_key"); + + // Append "metrics_pub_key" to the path + let bridge_url = match append_path_segment(bridge_url, "metrics_pub_key") { + Some(url) => url, + None => { + warn!(url_str, "Unable to append path segment to URL"); + return None; + } + }; // Use the host portion of the http_rest_url as the "name" let bridge_host = match bridge_url.host_str() { @@ -524,6 +532,11 @@ fn fallback_to_cached_key( } } +fn append_path_segment(mut url: Url, segment: &str) -> Option { + url.path_segments_mut().ok()?.pop_if_empty().push(segment); + Some(url) +} + #[cfg(test)] mod tests { use super::*; @@ -647,4 +660,62 @@ mod tests { "Cache should still contain the original key" ); } + + #[test] + fn test_append_path_segment() { + let test_cases = vec![ + ( + "https://example.com", + "metrics_pub_key", + "https://example.com/metrics_pub_key", + ), + ( + "https://example.com/api", + "metrics_pub_key", + "https://example.com/api/metrics_pub_key", + ), + ( + "https://example.com/", + "metrics_pub_key", + "https://example.com/metrics_pub_key", + ), + ( + "https://example.com/api/", + "metrics_pub_key", + "https://example.com/api/metrics_pub_key", + ), + ( + "https://example.com:8080", + "metrics_pub_key", + "https://example.com:8080/metrics_pub_key", + ), + ( + "https://example.com?param=value", + "metrics_pub_key", + "https://example.com/metrics_pub_key?param=value", + ), + ( + "https://example.com:8080/api/v1?param=value", + "metrics_pub_key", + "https://example.com:8080/api/v1/metrics_pub_key?param=value", + ), + ]; + + for (input_url, segment, expected_output) in test_cases { + let url = Url::parse(input_url).unwrap(); + let result = append_path_segment(url, segment); + assert!( + result.is_some(), + "Failed to append segment for URL: {}", + input_url + ); + let result_url = result.unwrap(); + assert_eq!( + result_url.as_str(), + expected_output, + "Unexpected result for input URL: {}", + input_url + ); + } + } } diff --git a/crates/sui-replay/src/data_fetcher.rs b/crates/sui-replay/src/data_fetcher.rs index 897e9cb511a6f..f7d5fcfcb0cc5 100644 --- a/crates/sui-replay/src/data_fetcher.rs +++ b/crates/sui-replay/src/data_fetcher.rs @@ -6,7 +6,7 @@ use crate::types::EPOCH_CHANGE_STRUCT_TAG; use async_trait::async_trait; use futures::future::join_all; use lru::LruCache; -use move_core_types::parser::parse_struct_tag; +use move_core_types::language_storage::StructTag; use parking_lot::RwLock; use rand::Rng; use std::collections::BTreeMap; @@ -568,7 +568,7 @@ impl DataFetcher for RemoteFetcher { reverse: bool, ) -> Result, ReplayEngineError> { let struct_tag_str = EPOCH_CHANGE_STRUCT_TAG.to_string(); - let struct_tag = parse_struct_tag(&struct_tag_str)?; + let struct_tag = StructTag::from_str(&struct_tag_str)?; let mut epoch_change_events: Vec = vec![]; let mut has_next_page = true; diff --git a/crates/sui-replay/src/replay.rs b/crates/sui-replay/src/replay.rs index b41e0eddf3b0e..6f140a122c57d 100644 --- a/crates/sui-replay/src/replay.rs +++ b/crates/sui-replay/src/replay.rs @@ -473,8 +473,13 @@ impl LocalExec { objs: Vec, protocol_version: u64, ) -> Result, ReplayEngineError> { - let syst_packages = self.system_package_versions_for_protocol_version(protocol_version)?; - let syst_packages_objs = self.multi_download(&syst_packages).await?; + let syst_packages_objs = if self.protocol_version.is_some_and(|i| i < 0) { + BuiltInFramework::genesis_objects().collect() + } else { + let syst_packages = + self.system_package_versions_for_protocol_version(protocol_version)?; + self.multi_download(&syst_packages).await? + }; // Download latest version of all packages that are not system packages // This is okay since the versions can never change @@ -707,17 +712,6 @@ impl LocalExec { expensive_safety_check_config: ExpensiveSafetyCheckConfig, ) -> Result { let tx_digest = &tx_info.tx_digest; - // TODO: Support system transactions. - if tx_info.sender_signed_data.transaction_data().is_system_tx() { - warn!( - "System TX replay not supported: {}, skipping transaction", - tx_digest - ); - return Err(ReplayEngineError::TransactionNotSupported { - digest: *tx_digest, - reason: "System transaction".to_string(), - }); - } // Before protocol version 16, the generation of effects depends on the wrapped tombstones. // It is not possible to retrieve such data for replay. if tx_info.protocol_version.as_u64() < 16 { @@ -759,30 +753,32 @@ impl LocalExec { let expensive_checks = true; let transaction_kind = override_transaction_kind.unwrap_or(tx_info.kind.clone()); let certificate_deny_set = HashSet::new(); - let (inner_store, gas_status, effects, result) = if let Ok(gas_status) = SuiGasStatus::new( - tx_info.gas_budget, - tx_info.gas_price, - tx_info.reference_gas_price, - protocol_config, - ) { - executor.execute_transaction_to_effects( - &self, + let gas_status = if tx_info.kind.is_system_tx() { + SuiGasStatus::new_unmetered() + } else { + SuiGasStatus::new( + tx_info.gas_budget, + tx_info.gas_price, + tx_info.reference_gas_price, protocol_config, - metrics.clone(), - expensive_checks, - &certificate_deny_set, - &tx_info.executed_epoch, - tx_info.epoch_start_timestamp, - CheckedInputObjects::new_for_replay(input_objects.clone()), - tx_info.gas.clone(), - gas_status, - transaction_kind.clone(), - tx_info.sender, - *tx_digest, ) - } else { - unreachable!("Transaction was valid so gas status must be valid"); + .expect("Failed to create gas status") }; + let (inner_store, gas_status, effects, result) = executor.execute_transaction_to_effects( + &self, + protocol_config, + metrics.clone(), + expensive_checks, + &certificate_deny_set, + &tx_info.executed_epoch, + tx_info.epoch_start_timestamp, + CheckedInputObjects::new_for_replay(input_objects.clone()), + tx_info.gas.clone(), + gas_status, + transaction_kind.clone(), + tx_info.sender, + *tx_digest, + ); if let Err(err) = self.pretty_print_for_tracing( &gas_status, @@ -1798,7 +1794,11 @@ impl LocalExec { self.multi_download_and_store(&shared_refs).await?; // Download gas (although this should already be in cache from modified at versions?) - let gas_refs: Vec<_> = tx_info.gas.iter().map(|w| (w.0, w.1)).collect(); + let gas_refs: Vec<_> = tx_info + .gas + .iter() + .filter_map(|w| (w.0 != ObjectID::ZERO).then_some((w.0, w.1))) + .collect(); self.multi_download_and_store(&gas_refs).await?; // Fetch the input objects we know from the raw transaction diff --git a/crates/sui-rest-api/Cargo.toml b/crates/sui-rest-api/Cargo.toml index 3f6412fa11af6..249c5569b7e6b 100644 --- a/crates/sui-rest-api/Cargo.toml +++ b/crates/sui-rest-api/Cargo.toml @@ -27,6 +27,7 @@ prometheus.workspace = true openapiv3 = { git = "https://github.com/bmwill/openapiv3.git", rev = "ca4b4845b7c159a39f5c68ad8f7f76cb6f4d6963" } schemars.workspace = true documented = "0.6.0" +prost.workspace = true fastcrypto.workspace = true sui-types.workspace = true @@ -37,3 +38,4 @@ move-core-types.workspace = true [dev-dependencies] diffy = "0.3" +prost-build = "0.13.3" diff --git a/crates/sui-rest-api/openapi/openapi.json b/crates/sui-rest-api/openapi/openapi.json index 494555500fef9..3e264a26e6c3a 100644 --- a/crates/sui-rest-api/openapi/openapi.json +++ b/crates/sui-rest-api/openapi/openapi.json @@ -77,6 +77,150 @@ } } }, + "/checkpoints": { + "get": { + "tags": [ + "Checkpoint" + ], + "description": "[![stable](https://img.shields.io/badge/api-stable-53b576?style=for-the-badge)](#)\n\nList Checkpoints\n\nRequest a page of checkpoints, and optionally their contents, ordered by\n`CheckpointSequenceNumber`.\n\nIf the requested page is below the Node's `lowest_available_checkpoint`, a 410 will be\nreturned.", + "operationId": "List Checkpoints", + "parameters": [ + { + "in": "query", + "name": "contents", + "description": "Request `CheckpointContents` be included in the response", + "schema": { + "description": "Request `CheckpointContents` be included in the response", + "default": false, + "type": "boolean" + }, + "style": "form" + }, + { + "in": "query", + "name": "direction", + "description": "The direction to paginate in.\n\nDefaults to `descending` if not provided.", + "schema": { + "description": "The direction to paginate in.\n\nDefaults to `descending` if not provided.", + "allOf": [ + { + "$ref": "#/components/schemas/Direction" + } + ] + }, + "style": "form" + }, + { + "in": "query", + "name": "limit", + "description": "Page size limit for the response.\n\nDefaults to `50` if not provided with a maximum page size of `100`.", + "schema": { + "description": "Page size limit for the response.\n\nDefaults to `50` if not provided with a maximum page size of `100`.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "style": "form" + }, + { + "in": "query", + "name": "start", + "description": "The checkpoint to start listing from.\n\nDefaults to the latest checkpoint if not provided.", + "schema": { + "description": "The checkpoint to start listing from.\n\nDefaults to the latest checkpoint if not provided.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "style": "form" + } + ], + "responses": { + "200": { + "description": "", + "headers": { + "x-sui-cursor": { + "style": "simple", + "schema": { + "type": "string" + } + } + }, + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/CheckpointResponse" + } + } + }, + "application/bcs": {}, + "application/x-protobuf": {} + } + }, + "410": { + "description": "" + }, + "500": { + "description": "" + } + } + } + }, + "/checkpoints/{checkpoint}": { + "get": { + "tags": [ + "Checkpoint" + ], + "description": "[![stable](https://img.shields.io/badge/api-stable-53b576?style=for-the-badge)](#)\n\nFetch a Checkpoint\n\nFetch a checkpoint either by `CheckpointSequenceNumber` (checkpoint height) or by\n`CheckpointDigest` and optionally request its contents.\n\nIf the checkpoint has been pruned and is not available, a 410 will be returned.", + "operationId": "Get Checkpoint", + "parameters": [ + { + "in": "path", + "name": "checkpoint", + "required": true, + "schema": { + "$ref": "#/components/schemas/CheckpointId" + }, + "style": "simple" + }, + { + "in": "query", + "name": "contents", + "description": "Request `CheckpointContents` be included in the response", + "schema": { + "description": "Request `CheckpointContents` be included in the response", + "default": false, + "type": "boolean" + }, + "style": "form" + } + ], + "responses": { + "200": { + "description": "", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CheckpointResponse" + } + }, + "application/bcs": {} + } + }, + "404": { + "description": "" + }, + "410": { + "description": "" + }, + "500": { + "description": "" + } + } + } + }, "/accounts/{account}/objects": { "get": { "tags": [ @@ -162,9 +306,10 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Object" + "$ref": "#/components/schemas/ObjectResponse" } }, + "application/x-protobuf": {}, "application/bcs": {} } }, @@ -209,9 +354,10 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Object" + "$ref": "#/components/schemas/ObjectResponse" } }, + "application/x-protobuf": {}, "application/bcs": {} } }, @@ -282,90 +428,20 @@ } } }, - "/checkpoints": { - "get": { - "tags": [ - "Checkpoint" - ], - "description": "[![unstable](https://img.shields.io/badge/api-unstable-red?style=for-the-badge)](#) _Api subject to change; use at your own risk_\n\n", - "operationId": "ListCheckpoints", - "parameters": [ - { - "in": "query", - "name": "direction", - "schema": { - "$ref": "#/components/schemas/Direction" - }, - "style": "form" - }, - { - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "format": "uint32", - "minimum": 0.0 - }, - "style": "form" - }, - { - "in": "query", - "name": "start", - "description": "The checkpoint to start listing from.\n\nDefaults to the latest checkpoint if not provided.", - "schema": { - "description": "The checkpoint to start listing from.\n\nDefaults to the latest checkpoint if not provided.", - "type": "integer", - "format": "uint64", - "minimum": 0.0 - }, - "style": "form" - } - ], - "responses": { - "200": { - "description": "", - "headers": { - "x-sui-cursor": { - "style": "simple", - "schema": { - "type": "string" - } - } - }, - "content": { - "application/json": { - "schema": { - "type": "array", - "items": { - "$ref": "#/components/schemas/SignedCheckpointSummary" - } - } - }, - "application/bcs": {} - } - }, - "410": { - "description": "" - } - } - } - }, - "/checkpoints/{checkpoint}": { + "/checkpoints/{checkpoint}/full": { "get": { "tags": [ "Checkpoint" ], - "description": "[![unstable](https://img.shields.io/badge/api-unstable-red?style=for-the-badge)](#) _Api subject to change; use at your own risk_\n\n", - "operationId": "GetCheckpoint", + "description": "[![unstable](https://img.shields.io/badge/api-unstable-red?style=for-the-badge)](#) _Api subject to change; use at your own risk_\n\nFetch a Full Checkpoint\n\nRequest a checkpoint and all data associated with it including:\n- CheckpointSummary\n- Validator Signature\n- CheckpointContents\n- Transactions, Effects, Events, as well as all input and output objects\n\nIf the requested checkpoint is below the Node's `lowest_available_checkpoint_objects`, a 410\nwill be returned.", + "operationId": "Get Full Checkpoint", "parameters": [ { "in": "path", "name": "checkpoint", "required": true, "schema": { - "type": "integer", - "format": "uint64", - "minimum": 0.0 + "$ref": "#/components/schemas/CheckpointId" }, "style": "simple" } @@ -374,11 +450,6 @@ "200": { "description": "", "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SignedCheckpointSummary" - } - }, "application/bcs": {} } }, @@ -387,6 +458,9 @@ }, "410": { "description": "" + }, + "500": { + "description": "" } } } @@ -418,6 +492,7 @@ "$ref": "#/components/schemas/TransactionResponse" } }, + "application/x-protobuf": {}, "application/bcs": {} } }, @@ -482,6 +557,7 @@ } } }, + "application/x-protobuf": {}, "application/bcs": {} } }, @@ -556,6 +632,7 @@ "$ref": "#/components/schemas/TransactionExecutionResponse" } }, + "application/x-protobuf": {}, "application/bcs": {} } } @@ -591,6 +668,7 @@ "$ref": "#/components/schemas/ValidatorCommittee" } }, + "application/x-protobuf": {}, "application/bcs": {} } }, @@ -616,6 +694,7 @@ "$ref": "#/components/schemas/ValidatorCommittee" } }, + "application/x-protobuf": {}, "application/bcs": {} } } @@ -806,6 +885,7 @@ "$ref": "#/components/schemas/TransactionSimulationResponse" } }, + "application/x-protobuf": {}, "application/bcs": {} } } @@ -883,6 +963,7 @@ "$ref": "#/components/schemas/ResolveTransactionResponse" } }, + "application/x-protobuf": {}, "application/bcs": {} } } @@ -1224,12 +1305,65 @@ } ] }, + "CheckpointContents": { + "type": "array", + "items": { + "$ref": "#/components/schemas/CheckpointTransactionInfo" + } + }, "CheckpointContentsDigest": { "$ref": "#/components/schemas/Digest" }, "CheckpointDigest": { "$ref": "#/components/schemas/Digest" }, + "CheckpointId": { + "anyOf": [ + { + "title": "SequenceNumber", + "description": "Sequence number or height of a Checkpoint", + "examples": [ + 0 + ], + "type": "string", + "format": "u64" + }, + { + "title": "Digest", + "description": "Base58 encoded 32-byte digest of a Checkpoint", + "examples": [ + "4btiuiMPvEENsttpZC7CZ53DruC3MAgfznDbASZ7DR6S" + ], + "allOf": [ + { + "$ref": "#/components/schemas/CheckpointDigest" + } + ] + } + ] + }, + "CheckpointResponse": { + "type": "object", + "required": [ + "digest", + "signature", + "summary" + ], + "properties": { + "contents": { + "$ref": "#/components/schemas/CheckpointContents" + }, + "digest": { + "$ref": "#/components/schemas/CheckpointDigest" + }, + "signature": { + "$ref": "#/components/schemas/ValidatorAggregatedSignature" + }, + "summary": { + "$ref": "#/components/schemas/CheckpointSummary" + } + } + }, "CheckpointSummary": { "type": "object", "required": [ @@ -1297,6 +1431,28 @@ } } }, + "CheckpointTransactionInfo": { + "type": "object", + "required": [ + "effects", + "signatures", + "transaction" + ], + "properties": { + "effects": { + "$ref": "#/components/schemas/TransactionEffectsDigest" + }, + "signatures": { + "type": "array", + "items": { + "$ref": "#/components/schemas/UserSignature" + } + }, + "transaction": { + "$ref": "#/components/schemas/TransactionDigest" + } + } + }, "CircomG1": { "description": "A G1 point in BN254 serialized as a vector of three strings which is the canonical decimal representation of the projective coordinates in Fq.", "type": "array", @@ -3728,6 +3884,21 @@ } } }, + "ObjectResponse": { + "type": "object", + "required": [ + "digest", + "object" + ], + "properties": { + "digest": { + "$ref": "#/components/schemas/ObjectDigest" + }, + "object": { + "$ref": "#/components/schemas/Object" + } + } + }, "Owner": { "oneOf": [ { @@ -3996,21 +4167,6 @@ "type": "string", "format": "base64" }, - "SignedCheckpointSummary": { - "type": "object", - "required": [ - "checkpoint", - "signature" - ], - "properties": { - "checkpoint": { - "$ref": "#/components/schemas/CheckpointSummary" - }, - "signature": { - "$ref": "#/components/schemas/ValidatorAggregatedSignature" - } - } - }, "SimpleSignature": { "oneOf": [ { @@ -4659,6 +4815,9 @@ } ] }, + "TransactionEffectsDigest": { + "$ref": "#/components/schemas/Digest" + }, "TransactionEvents": { "type": "array", "items": { diff --git a/crates/sui-rest-api/proto/rest.proto b/crates/sui-rest-api/proto/rest.proto new file mode 100644 index 0000000000000..099efa57e81bc --- /dev/null +++ b/crates/sui-rest-api/proto/rest.proto @@ -0,0 +1,156 @@ +syntax = "proto3"; + +package sui.rest; + +// Sui `TransactionData` type serialized as Bcs +message Transaction { + bytes transaction = 1; +} + +// Sui `TransactionEffects` type serialized as Bcs +message TransactionEffects { + bytes effects = 1; +} + +// Sui `TransactionEvents` type serialized as Bcs +message TransactionEvents { + bytes events = 1; +} + +// Sui `Object` type serialized as Bcs +message Object { + bytes object = 1; +} + +// Sui `CheckpointSummary` type serialized as Bcs +message CheckpointSummary { + bytes summary = 1; +} + +// Sui `CheckpointContents` type serialized as Bcs +message CheckpointContents { + bytes contents = 1; +} + +// Sui `UserSignature` type serialized as bytes +message UserSignature { + bytes signature = 1; +} + +// Sui `ValidatorAggregatedSignature` type serialized as Bcs +message ValidatorAggregatedSignature { + bytes signature = 1; +} + +message GetTransactionResponse { + // The digest of this transaction + bytes digest = 1; + optional Transaction transaction = 2; + repeated UserSignature signatures = 3; + optional TransactionEffects effects = 4; + optional TransactionEvents events = 5; + optional uint64 checkpoint = 6; + optional uint64 timestamp_ms = 7; +} + +message GetObjectResponse { + // The digest of this object + bytes digest = 1; + optional Object object = 2; +} + +message GetCheckpointResponse { + // The digest of this CheckpointSummary + bytes digest = 1; + optional CheckpointSummary summary = 2; + optional ValidatorAggregatedSignature signature = 3; + optional CheckpointContents contents = 4; +} + +message FullCheckpoint { + optional CheckpointSummary summary = 1; + optional ValidatorAggregatedSignature signature = 2; + optional CheckpointContents contents = 3; + repeated CheckpointTransaction transactions = 4; +} + +message CheckpointTransaction { + optional Transaction transaction = 1; + repeated UserSignature signatures = 2; + optional TransactionEffects effects = 3; + optional TransactionEvents events = 4; + repeated Object input_objects = 5; + repeated Object output_objects = 6; +} + +message ListCheckpointResponse { + repeated GetCheckpointResponse checkpoints = 1; +} + +message ListTransactionsResponse { + repeated GetTransactionResponse transactions = 1; +} + +message Address { + bytes address = 1; +} + +message TypeTag { + string type_tag = 1; +} + +message I128 { + bytes little_endian_bytes = 1; +} + +message BalanceChange { + Address address = 1; + TypeTag coin_type = 2; + I128 amount = 3; +} + +message EffectsFinality { + optional ValidatorAggregatedSignature signature = 1; + optional uint64 checkpoint = 2; +} + +message TransactionExecutionResponse { + optional TransactionEffects effects = 1; + optional EffectsFinality finality = 2; + optional TransactionEvents events = 3; + repeated BalanceChange balance_changes = 4; + repeated Object input_objects = 5; + repeated Object output_objects = 6; +} + +message TransactionSimulationResponse { + optional TransactionEffects effects = 1; + optional TransactionEvents events = 2; + repeated BalanceChange balance_changes = 3; + repeated Object input_objects = 4; + repeated Object output_objects = 5; +} + +message ResolveTransactionResponse { + optional Transaction transaction = 1; + optional TransactionSimulationResponse simulation = 2; +} + +message ExecuteTransactionRequest { + optional Transaction transaction = 1; + repeated UserSignature signatures = 2; +} + +message SimulateTransactionRequest { + optional Transaction transaction = 1; +} + +message ValidatorCommittee { + uint64 epoch = 1; + repeated ValidatorCommitteeMember members = 2; +} + +message ValidatorCommitteeMember { + bytes public_key = 1; + uint64 stake = 2; +} diff --git a/crates/sui-rest-api/src/accept.rs b/crates/sui-rest-api/src/accept.rs index 193158f1520c0..fbb93606c95c5 100644 --- a/crates/sui-rest-api/src/accept.rs +++ b/crates/sui-rest-api/src/accept.rs @@ -8,6 +8,7 @@ use mime::Mime; // include type information // "application/x.sui.+bcs" pub const APPLICATION_BCS: &str = "application/bcs"; +pub const APPLICATION_PROTOBUF: &str = "application/x-protobuf"; /// `Accept` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-5.3.2) #[derive(Debug, Clone)] @@ -51,6 +52,7 @@ where pub enum AcceptFormat { Json, Bcs, + // Protobuf, } #[axum::async_trait] @@ -80,6 +82,42 @@ where } } +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub enum AcceptJsonProtobufBcs { + Json, + Protobuf, + Bcs, +} + +#[axum::async_trait] +impl axum::extract::FromRequestParts for AcceptJsonProtobufBcs +where + S: Send + Sync, +{ + type Rejection = std::convert::Infallible; + + async fn from_request_parts( + parts: &mut http::request::Parts, + s: &S, + ) -> Result { + let accept = Accept::from_request_parts(parts, s).await?; + + for mime in accept.0 { + let essence = mime.essence_str(); + + if essence == mime::APPLICATION_JSON.essence_str() { + return Ok(Self::Json); + } else if essence == APPLICATION_PROTOBUF { + return Ok(Self::Protobuf); + } else if essence == APPLICATION_BCS { + return Ok(Self::Bcs); + } + } + + Ok(Self::Json) + } +} + #[cfg(test)] mod tests { use std::str::FromStr; diff --git a/crates/sui-rest-api/src/checkpoints.rs b/crates/sui-rest-api/src/checkpoints.rs index 425828d398c6a..b3f887fd817c2 100644 --- a/crates/sui-rest-api/src/checkpoints.rs +++ b/crates/sui-rest-api/src/checkpoints.rs @@ -4,96 +4,38 @@ use axum::extract::Query; use axum::extract::{Path, State}; use sui_sdk_types::types::{ - CheckpointData, CheckpointDigest, CheckpointSequenceNumber, SignedCheckpointSummary, + CheckpointContents, CheckpointDigest, CheckpointSequenceNumber, CheckpointSummary, + SignedCheckpointSummary, ValidatorAggregatedSignature, }; use sui_types::storage::ReadStore; use tap::Pipe; +use crate::accept::AcceptJsonProtobufBcs; use crate::openapi::{ApiEndpoint, OperationBuilder, ResponseBuilder, RouteHandler}; +use crate::proto; +use crate::proto::ListCheckpointResponse; use crate::reader::StateReader; -use crate::Page; -use crate::{accept::AcceptFormat, response::ResponseContent, Result}; +use crate::response::{JsonProtobufBcs, ProtobufBcs}; +use crate::PageCursor; use crate::{Direction, RestService}; - -pub struct GetCheckpointFull; - -impl ApiEndpoint for GetCheckpointFull { - fn method(&self) -> axum::http::Method { - axum::http::Method::GET - } - - fn path(&self) -> &'static str { - "/checkpoints/{checkpoint}/full" - } - - fn hidden(&self) -> bool { - true - } - - fn operation( - &self, - generator: &mut schemars::gen::SchemaGenerator, - ) -> openapiv3::v3_1::Operation { - OperationBuilder::new() - .tag("Checkpoint") - .operation_id("GetCheckpointFull") - .path_parameter::("checkpoint", generator) - .response( - 200, - ResponseBuilder::new() - .json_content::(generator) - .bcs_content() - .build(), - ) - .response(404, ResponseBuilder::new().build()) - .response(410, ResponseBuilder::new().build()) - .build() - } - - fn handler(&self) -> RouteHandler { - RouteHandler::new(self.method(), get_checkpoint_full) - } -} - -async fn get_checkpoint_full( - Path(checkpoint_id): Path, - accept: AcceptFormat, - State(state): State, -) -> Result> { - let verified_summary = match checkpoint_id { - CheckpointId::SequenceNumber(s) => { - // Since we need object contents we need to check for the lowest available checkpoint - // with objects that hasn't been pruned - let oldest_checkpoint = state.inner().get_lowest_available_checkpoint_objects()?; - if s < oldest_checkpoint { - return Err(crate::RestError::new( - axum::http::StatusCode::GONE, - "Old checkpoints have been pruned", - )); - } - - state.inner().get_checkpoint_by_sequence_number(s) - } - CheckpointId::Digest(d) => state.inner().get_checkpoint_by_digest(&d.into()), - }? - .ok_or(CheckpointNotFoundError(checkpoint_id))?; - - let checkpoint_contents = state - .inner() - .get_checkpoint_contents_by_digest(&verified_summary.content_digest)? - .ok_or(CheckpointNotFoundError(checkpoint_id))?; - - let checkpoint_data = state - .inner() - .get_checkpoint_data(verified_summary, checkpoint_contents)?; - - match accept { - AcceptFormat::Json => ResponseContent::Json(checkpoint_data), - AcceptFormat::Bcs => ResponseContent::Bcs(checkpoint_data), - } - .pipe(Ok) +use crate::{RestError, Result}; +use documented::Documented; + +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize, schemars::JsonSchema)] +pub struct CheckpointResponse { + pub digest: CheckpointDigest, + pub summary: CheckpointSummary, + pub signature: ValidatorAggregatedSignature, + pub contents: Option, } +/// Fetch a Checkpoint +/// +/// Fetch a checkpoint either by `CheckpointSequenceNumber` (checkpoint height) or by +/// `CheckpointDigest` and optionally request its contents. +/// +/// If the checkpoint has been pruned and is not available, a 410 will be returned. +#[derive(Documented)] pub struct GetCheckpoint; impl ApiEndpoint for GetCheckpoint { @@ -105,23 +47,30 @@ impl ApiEndpoint for GetCheckpoint { "/checkpoints/{checkpoint}" } + fn stable(&self) -> bool { + true + } + fn operation( &self, generator: &mut schemars::gen::SchemaGenerator, ) -> openapiv3::v3_1::Operation { OperationBuilder::new() .tag("Checkpoint") - .operation_id("GetCheckpoint") - .path_parameter::("checkpoint", generator) + .operation_id("Get Checkpoint") + .description(Self::DOCS) + .path_parameter::("checkpoint", generator) + .query_parameters::(generator) .response( 200, ResponseBuilder::new() - .json_content::(generator) + .json_content::(generator) .bcs_content() .build(), ) .response(404, ResponseBuilder::new().build()) .response(410, ResponseBuilder::new().build()) + .response(500, ResponseBuilder::new().build()) .build() } @@ -132,10 +81,14 @@ impl ApiEndpoint for GetCheckpoint { async fn get_checkpoint( Path(checkpoint_id): Path, - accept: AcceptFormat, + Query(parameters): Query, + accept: AcceptJsonProtobufBcs, State(state): State, -) -> Result> { - let summary = match checkpoint_id { +) -> Result> { + let SignedCheckpointSummary { + checkpoint, + signature, + } = match checkpoint_id { CheckpointId::SequenceNumber(s) => { let oldest_checkpoint = state.inner().get_lowest_available_checkpoint()?; if s < oldest_checkpoint { @@ -153,19 +106,53 @@ async fn get_checkpoint( .into_inner() .try_into()?; + let contents = if parameters.contents { + Some( + state + .inner() + .get_checkpoint_contents_by_sequence_number(checkpoint.sequence_number)? + .ok_or(CheckpointNotFoundError(checkpoint_id))? + .try_into()?, + ) + } else { + None + }; + + let response = CheckpointResponse { + digest: checkpoint.digest(), + summary: checkpoint, + signature, + contents, + }; + match accept { - AcceptFormat::Json => ResponseContent::Json(summary), - AcceptFormat::Bcs => ResponseContent::Bcs(summary), + AcceptJsonProtobufBcs::Json => JsonProtobufBcs::Json(response), + AcceptJsonProtobufBcs::Protobuf => JsonProtobufBcs::Protobuf(response.try_into()?), + AcceptJsonProtobufBcs::Bcs => JsonProtobufBcs::Bcs(response), } .pipe(Ok) } -#[derive(Debug, Copy, Clone, Eq, PartialEq)] +#[derive(Debug, Copy, Clone, Eq, PartialEq, schemars::JsonSchema)] +#[schemars(untagged)] pub enum CheckpointId { - SequenceNumber(CheckpointSequenceNumber), + #[schemars( + title = "SequenceNumber", + example = "CheckpointSequenceNumber::default" + )] + /// Sequence number or height of a Checkpoint + SequenceNumber(#[schemars(with = "crate::_schemars::U64")] CheckpointSequenceNumber), + #[schemars(title = "Digest", example = "example_digest")] + /// Base58 encoded 32-byte digest of a Checkpoint Digest(CheckpointDigest), } +fn example_digest() -> CheckpointDigest { + "4btiuiMPvEENsttpZC7CZ53DruC3MAgfznDbASZ7DR6S" + .parse() + .unwrap() +} + impl<'de> serde::Deserialize<'de> for CheckpointId { fn deserialize(deserializer: D) -> std::result::Result where @@ -221,6 +208,22 @@ impl From for crate::RestError { } } +/// Query parameters for the GetCheckpoint endpoint +#[derive(Debug, serde::Serialize, serde::Deserialize, schemars::JsonSchema)] +pub struct GetCheckpointQueryParameters { + /// Request `CheckpointContents` be included in the response + #[serde(default)] + pub contents: bool, +} + +/// List Checkpoints +/// +/// Request a page of checkpoints, and optionally their contents, ordered by +/// `CheckpointSequenceNumber`. +/// +/// If the requested page is below the Node's `lowest_available_checkpoint`, a 410 will be +/// returned. +#[derive(Documented)] pub struct ListCheckpoints; impl ApiEndpoint for ListCheckpoints { @@ -232,23 +235,30 @@ impl ApiEndpoint for ListCheckpoints { "/checkpoints" } + fn stable(&self) -> bool { + true + } + fn operation( &self, generator: &mut schemars::gen::SchemaGenerator, ) -> openapiv3::v3_1::Operation { OperationBuilder::new() .tag("Checkpoint") - .operation_id("ListCheckpoints") + .operation_id("List Checkpoints") + .description(Self::DOCS) .query_parameters::(generator) .response( 200, ResponseBuilder::new() - .json_content::>(generator) + .json_content::>(generator) .bcs_content() + .protobuf_content() .header::(crate::types::X_SUI_CURSOR, generator) .build(), ) .response(410, ResponseBuilder::new().build()) + .response(500, ResponseBuilder::new().build()) .build() } @@ -259,9 +269,12 @@ impl ApiEndpoint for ListCheckpoints { async fn list_checkpoints( Query(parameters): Query, - accept: AcceptFormat, + accept: AcceptJsonProtobufBcs, State(state): State, -) -> Result> { +) -> Result<( + PageCursor, + JsonProtobufBcs, ListCheckpointResponse, Vec>, +)> { let latest_checkpoint = state.inner().get_latest_checkpoint()?.sequence_number; let oldest_checkpoint = state.inner().get_lowest_available_checkpoint()?; let limit = parameters.limit(); @@ -281,33 +294,76 @@ async fn list_checkpoints( .map(|result| { result .map_err(Into::into) - .and_then(|(checkpoint, _contents)| { - SignedCheckpointSummary::try_from(checkpoint).map_err(Into::into) + .and_then(|(checkpoint, contents)| { + let SignedCheckpointSummary { + checkpoint, + signature, + } = checkpoint.try_into()?; + let contents = if parameters.contents { + Some(contents.try_into()?) + } else { + None + }; + Ok(CheckpointResponse { + digest: checkpoint.digest(), + summary: checkpoint, + signature, + contents, + }) }) }) .collect::>>()?; let cursor = checkpoints.last().and_then(|checkpoint| match direction { - Direction::Ascending => checkpoint.checkpoint.sequence_number.checked_add(1), - Direction::Descending => checkpoint.checkpoint.sequence_number.checked_sub(1), + Direction::Ascending => checkpoint.summary.sequence_number.checked_add(1), + Direction::Descending => { + let cursor = checkpoint.summary.sequence_number.checked_sub(1); + // If we've exhausted our available checkpoint range then there are no more pages left + if cursor < Some(oldest_checkpoint) { + None + } else { + cursor + } + } }); match accept { - AcceptFormat::Json => ResponseContent::Json(checkpoints), - AcceptFormat::Bcs => ResponseContent::Bcs(checkpoints), + AcceptJsonProtobufBcs::Json => JsonProtobufBcs::Json(checkpoints), + AcceptJsonProtobufBcs::Protobuf => JsonProtobufBcs::Protobuf(checkpoints.try_into()?), + // In order to work around compatibility issues with existing clients, keep the BCS form as + // the old format without contents + AcceptJsonProtobufBcs::Bcs => { + let checkpoints = checkpoints + .into_iter() + .map(|c| SignedCheckpointSummary { + checkpoint: c.summary, + signature: c.signature, + }) + .collect(); + JsonProtobufBcs::Bcs(checkpoints) + } } - .pipe(|entries| Page { entries, cursor }) + .pipe(|entries| (PageCursor(cursor), entries)) .pipe(Ok) } -#[derive(Debug, serde::Serialize, serde::Deserialize, schemars::JsonSchema)] +#[derive(Debug, Default, serde::Serialize, serde::Deserialize, schemars::JsonSchema)] pub struct ListCheckpointsQueryParameters { + /// Page size limit for the response. + /// + /// Defaults to `50` if not provided with a maximum page size of `100`. pub limit: Option, /// The checkpoint to start listing from. /// /// Defaults to the latest checkpoint if not provided. pub start: Option, + /// The direction to paginate in. + /// + /// Defaults to `descending` if not provided. pub direction: Option, + /// Request `CheckpointContents` be included in the response + #[serde(default)] + pub contents: bool, } impl ListCheckpointsQueryParameters { @@ -325,3 +381,110 @@ impl ListCheckpointsQueryParameters { self.direction.unwrap_or(Direction::Descending) } } + +/// Fetch a Full Checkpoint +/// +/// Request a checkpoint and all data associated with it including: +/// - CheckpointSummary +/// - Validator Signature +/// - CheckpointContents +/// - Transactions, Effects, Events, as well as all input and output objects +/// +/// If the requested checkpoint is below the Node's `lowest_available_checkpoint_objects`, a 410 +/// will be returned. +#[derive(Documented)] +pub struct GetFullCheckpoint; + +impl ApiEndpoint for GetFullCheckpoint { + fn method(&self) -> axum::http::Method { + axum::http::Method::GET + } + + fn path(&self) -> &'static str { + "/checkpoints/{checkpoint}/full" + } + + fn stable(&self) -> bool { + // TODO transactions are serialized with an intent message, do we want to change this + // format to remove it (and remove user signature duplication) prior to stabalizing the + // format? + false + } + + fn operation( + &self, + generator: &mut schemars::gen::SchemaGenerator, + ) -> openapiv3::v3_1::Operation { + OperationBuilder::new() + .tag("Checkpoint") + .operation_id("Get Full Checkpoint") + .description(Self::DOCS) + .path_parameter::("checkpoint", generator) + .response(200, ResponseBuilder::new().bcs_content().build()) + .response(404, ResponseBuilder::new().build()) + .response(410, ResponseBuilder::new().build()) + .response(500, ResponseBuilder::new().build()) + .build() + } + + fn handler(&self) -> RouteHandler { + RouteHandler::new(self.method(), get_full_checkpoint) + } +} + +async fn get_full_checkpoint( + Path(checkpoint_id): Path, + accept: AcceptJsonProtobufBcs, + State(state): State, +) -> Result> +{ + match accept { + AcceptJsonProtobufBcs::Protobuf => {} + AcceptJsonProtobufBcs::Bcs => {} + _ => { + return Err(RestError::new( + axum::http::StatusCode::BAD_REQUEST, + "invalid accept type; only 'application/x-protobuf' is supported", + )) + } + } + + let verified_summary = match checkpoint_id { + CheckpointId::SequenceNumber(s) => { + // Since we need object contents we need to check for the lowest available checkpoint + // with objects that hasn't been pruned + let oldest_checkpoint = state.inner().get_lowest_available_checkpoint_objects()?; + if s < oldest_checkpoint { + return Err(crate::RestError::new( + axum::http::StatusCode::GONE, + "Old checkpoints have been pruned", + )); + } + + state.inner().get_checkpoint_by_sequence_number(s) + } + CheckpointId::Digest(d) => state.inner().get_checkpoint_by_digest(&d.into()), + }? + .ok_or(CheckpointNotFoundError(checkpoint_id))?; + + let checkpoint_contents = state + .inner() + .get_checkpoint_contents_by_digest(&verified_summary.content_digest)? + .ok_or(CheckpointNotFoundError(checkpoint_id))?; + + let checkpoint_data = state + .inner() + .get_checkpoint_data(verified_summary, checkpoint_contents)?; + + match accept { + AcceptJsonProtobufBcs::Protobuf => ProtobufBcs::Protobuf(checkpoint_data.try_into()?), + AcceptJsonProtobufBcs::Bcs => ProtobufBcs::Bcs(checkpoint_data), + _ => { + return Err(RestError::new( + axum::http::StatusCode::BAD_REQUEST, + "invalid accept type; only 'application/x-protobuf' is supported", + )) + } + } + .pipe(Ok) +} diff --git a/crates/sui-rest-api/src/client/mod.rs b/crates/sui-rest-api/src/client/mod.rs index a5582a0928662..d9cbe0f5ca7f0 100644 --- a/crates/sui-rest-api/src/client/mod.rs +++ b/crates/sui-rest-api/src/client/mod.rs @@ -51,15 +51,15 @@ impl Client { .url() .join(&format!("checkpoints/{checkpoint_sequence_number}/full"))?; - let response = self - .inner - .client() - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .send() - .await?; - - self.inner.bcs(response).await.map(Response::into_inner) + let request = self.inner.client().get(url); + + self.inner.bcs(request).await.map(Response::into_inner) + // let proto = self + // .inner + // .protobuf::(request) + // .await? + // .into_inner(); + // proto.try_into().map_err(Into::into) } pub async fn get_checkpoint_summary( @@ -70,7 +70,14 @@ impl Client { .get_checkpoint(checkpoint_sequence_number) .await .map(Response::into_inner) - .and_then(|checkpoint| checkpoint.try_into().map_err(Into::into)) + .and_then(|checkpoint| { + sui_sdk_types::types::SignedCheckpointSummary { + checkpoint: checkpoint.summary, + signature: checkpoint.signature, + } + .try_into() + .map_err(Into::into) + }) } pub async fn get_object(&self, object_id: ObjectID) -> Result { @@ -110,18 +117,15 @@ impl Client { signatures: &transaction.inner().tx_signatures, })?; - let response = self + let request = self .inner .client() .post(url) .query(parameters) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) .header(reqwest::header::CONTENT_TYPE, crate::APPLICATION_BCS) - .body(body) - .send() - .await?; + .body(body); - self.inner.bcs(response).await.map(Response::into_inner) + self.inner.bcs(request).await.map(Response::into_inner) } } diff --git a/crates/sui-rest-api/src/client/sdk.rs b/crates/sui-rest-api/src/client/sdk.rs index 36702a316cc17..76d6227f0215f 100644 --- a/crates/sui-rest-api/src/client/sdk.rs +++ b/crates/sui-rest-api/src/client/sdk.rs @@ -23,6 +23,7 @@ use tap::Pipe; use crate::accounts::AccountOwnedObjectInfo; use crate::accounts::ListAccountOwnedObjectsQueryParameters; +use crate::checkpoints::CheckpointResponse; use crate::checkpoints::ListCheckpointsQueryParameters; use crate::coins::CoinInfo; use crate::health::Threshold; @@ -93,14 +94,9 @@ impl Client { pub async fn node_info(&self) -> Result> { let url = self.url().join("")?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_JSON) - .send() - .await?; + let request = self.inner.get(url); - self.json(response).await + self.json(request).await } pub async fn health_check(&self, threshold_seconds: Option) -> Result> { @@ -115,14 +111,9 @@ impl Client { pub async fn get_coin_info(&self, coin_type: &StructTag) -> Result> { let url = self.url().join(&format!("coins/{coin_type}"))?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_JSON) - .send() - .await?; + let request = self.inner.get(url); - self.json(response).await + self.json(request).await } pub async fn list_account_objects( @@ -132,28 +123,17 @@ impl Client { ) -> Result>> { let url = self.url().join(&format!("account/{account}/objects"))?; - let response = self - .inner - .get(url) - .query(parameters) - .header(reqwest::header::ACCEPT, crate::APPLICATION_JSON) - .send() - .await?; + let request = self.inner.get(url).query(parameters); - self.json(response).await + self.json(request).await } pub async fn get_object(&self, object_id: ObjectId) -> Result> { let url = self.url().join(&format!("objects/{object_id}"))?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .send() - .await?; + let request = self.inner.get(url); - self.bcs(response).await + self.bcs(request).await } pub async fn get_object_with_version( @@ -165,14 +145,9 @@ impl Client { .url() .join(&format!("objects/{object_id}/version/{version}"))?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .send() - .await?; + let request = self.inner.get(url); - self.bcs(response).await + self.bcs(request).await } pub async fn list_dynamic_fields( @@ -182,28 +157,17 @@ impl Client { ) -> Result>> { let url = self.url().join(&format!("objects/{object_id}"))?; - let response = self - .inner - .get(url) - .query(parameters) - .header(reqwest::header::ACCEPT, crate::APPLICATION_JSON) - .send() - .await?; + let request = self.inner.get(url).query(parameters); - self.json(response).await + self.json(request).await } pub async fn get_gas_info(&self) -> Result> { let url = self.url().join("system/gas")?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_JSON) - .send() - .await?; + let request = self.inner.get(url); - self.json(response).await + self.json(request).await } pub async fn get_reference_gas_price(&self) -> Result { @@ -216,14 +180,9 @@ impl Client { pub async fn get_current_protocol_config(&self) -> Result> { let url = self.url().join("system/protocol")?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_JSON) - .send() - .await?; + let request = self.inner.get(url); - self.json(response).await + self.json(request).await } pub async fn get_protocol_config( @@ -232,71 +191,46 @@ impl Client { ) -> Result> { let url = self.url().join(&format!("system/protocol/{version}"))?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_JSON) - .send() - .await?; + let request = self.inner.get(url); - self.json(response).await + self.json(request).await } pub async fn get_system_state_summary(&self) -> Result> { let url = self.url().join("system")?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_JSON) - .send() - .await?; + let request = self.inner.get(url); - self.json(response).await + self.json(request).await } pub async fn get_current_committee(&self) -> Result> { let url = self.url().join("system/committee")?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .send() - .await?; + let request = self.inner.get(url); - self.bcs(response).await + self.bcs(request).await } pub async fn get_committee(&self, epoch: EpochId) -> Result> { let url = self.url().join(&format!("system/committee/{epoch}"))?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .send() - .await?; + let request = self.inner.get(url); - self.bcs(response).await + self.bcs(request).await } pub async fn get_checkpoint( &self, checkpoint_sequence_number: CheckpointSequenceNumber, - ) -> Result> { + ) -> Result> { let url = self .url() .join(&format!("checkpoints/{checkpoint_sequence_number}"))?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .send() - .await?; + let request = self.inner.get(url); - self.bcs(response).await + self.bcs(request).await } pub async fn get_latest_checkpoint(&self) -> Result> { @@ -304,6 +238,7 @@ impl Client { limit: Some(1), start: None, direction: None, + contents: false, }; let (mut page, parts) = self.list_checkpoints(¶meters).await?.into_parts(); @@ -321,15 +256,17 @@ impl Client { ) -> Result>> { let url = self.url().join("checkpoints")?; - let response = self - .inner - .get(url) - .query(parameters) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .send() - .await?; + let request = self.inner.get(url).query(parameters); - self.bcs(response).await + self.bcs(request).await + // self.protobuf::(request) + // .await? + // .try_map(|page| { + // page.checkpoints + // .into_iter() + // .map(TryInto::try_into) + // .collect() + // }) } pub async fn get_full_checkpoint( @@ -340,14 +277,16 @@ impl Client { .url() .join(&format!("checkpoints/{checkpoint_sequence_number}/full"))?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .send() - .await?; + let request = self.inner.get(url); - self.bcs(response).await + self.bcs(request).await + // self.protobuf::(request) + // .await? + // // TODO make this more efficient and convert directly into the sui-sdk-types version + // .try_map(|proto| { + // sui_types::full_checkpoint_content::CheckpointData::try_from(proto) + // .and_then(TryInto::try_into) + // }) } pub async fn get_transaction( @@ -356,14 +295,9 @@ impl Client { ) -> Result> { let url = self.url().join(&format!("transactions/{transaction}"))?; - let response = self - .inner - .get(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .send() - .await?; + let request = self.inner.get(url); - self.bcs(response).await + self.bcs(request).await } pub async fn list_transactions( @@ -372,15 +306,9 @@ impl Client { ) -> Result>> { let url = self.url().join("transactions")?; - let response = self - .inner - .get(url) - .query(parameters) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .send() - .await?; + let request = self.inner.get(url).query(parameters); - self.bcs(response).await + self.bcs(request).await } pub async fn execute_transaction( @@ -392,17 +320,14 @@ impl Client { let body = bcs::to_bytes(transaction)?; - let response = self + let request = self .inner .post(url) .query(parameters) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) .header(reqwest::header::CONTENT_TYPE, crate::APPLICATION_BCS) - .body(body) - .send() - .await?; + .body(body); - self.bcs(response).await + self.bcs(request).await } pub async fn simulate_transaction( @@ -413,16 +338,13 @@ impl Client { let body = bcs::to_bytes(transaction)?; - let response = self + let request = self .inner .post(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) .header(reqwest::header::CONTENT_TYPE, crate::APPLICATION_BCS) - .body(body) - .send() - .await?; + .body(body); - self.bcs(response).await + self.bcs(request).await } pub async fn resolve_transaction( @@ -431,15 +353,9 @@ impl Client { ) -> Result> { let url = self.url.join("transactions/resolve")?; - let response = self - .inner - .post(url) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .json(unresolved_transaction) - .send() - .await?; + let request = self.inner.post(url).json(unresolved_transaction); - self.bcs(response).await + self.bcs(request).await } pub async fn resolve_transaction_with_parameters( @@ -449,16 +365,13 @@ impl Client { ) -> Result> { let url = self.url.join("transactions/resolve")?; - let response = self + let request = self .inner .post(url) .query(¶meters) - .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) - .json(unresolved_transaction) - .send() - .await?; + .json(unresolved_transaction); - self.bcs(response).await + self.bcs(request).await } async fn check_response( @@ -487,8 +400,13 @@ impl Client { async fn json( &self, - response: reqwest::Response, + request: reqwest::RequestBuilder, ) -> Result> { + let response = request + .header(reqwest::header::ACCEPT, crate::APPLICATION_JSON) + .send() + .await?; + let (response, parts) = self.check_response(response).await?; let json = response.json().await?; @@ -497,8 +415,13 @@ impl Client { pub(super) async fn bcs( &self, - response: reqwest::Response, + request: reqwest::RequestBuilder, ) -> Result> { + let response = request + .header(reqwest::header::ACCEPT, crate::APPLICATION_BCS) + .send() + .await?; + let (response, parts) = self.check_response(response).await?; let bytes = response.bytes().await?; @@ -507,6 +430,25 @@ impl Client { Err(e) => Err(Error::from_error(e).with_parts(parts)), } } + + #[allow(unused)] + pub(super) async fn protobuf( + &self, + request: reqwest::RequestBuilder, + ) -> Result> { + let response = request + .header(reqwest::header::ACCEPT, crate::APPLICATION_PROTOBUF) + .send() + .await?; + + let (response, parts) = self.check_response(response).await?; + + let bytes = response.bytes().await?; + match T::decode(bytes) { + Ok(v) => Ok(Response::new(v, parts)), + Err(e) => Err(Error::from_error(e).with_parts(parts)), + } + } } #[derive(Debug)] @@ -617,8 +559,20 @@ impl Response { where F: FnOnce(T) -> U, { - let (inner, state) = self.into_parts(); - Response::new(f(inner), state) + let (inner, parts) = self.into_parts(); + Response::new(f(inner), parts) + } + + pub fn try_map(self, f: F) -> Result> + where + F: FnOnce(T) -> Result, + E: Into, + { + let (inner, parts) = self.into_parts(); + match f(inner) { + Ok(out) => Ok(Response::new(out, parts)), + Err(e) => Err(Error::from_error(e).with_parts(parts)), + } } } diff --git a/crates/sui-rest-api/src/committee.rs b/crates/sui-rest-api/src/committee.rs index 8ec03dbb81688..25e653c9c1f7b 100644 --- a/crates/sui-rest-api/src/committee.rs +++ b/crates/sui-rest-api/src/committee.rs @@ -2,10 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - accept::AcceptFormat, + accept::AcceptJsonProtobufBcs, openapi::{ApiEndpoint, OperationBuilder, ResponseBuilder, RouteHandler}, + proto, reader::StateReader, - response::ResponseContent, + response::JsonProtobufBcs, RestService, Result, }; use axum::extract::{Path, State}; @@ -35,6 +36,7 @@ impl ApiEndpoint for GetLatestCommittee { 200, ResponseBuilder::new() .json_content::(generator) + .protobuf_content() .bcs_content() .build(), ) @@ -47,17 +49,18 @@ impl ApiEndpoint for GetLatestCommittee { } async fn get_latest_committee( - accept: AcceptFormat, + accept: AcceptJsonProtobufBcs, State(state): State, -) -> Result> { +) -> Result> { let current_epoch = state.inner().get_latest_checkpoint()?.epoch(); let committee = state .get_committee(current_epoch)? .ok_or_else(|| CommitteeNotFoundError::new(current_epoch))?; match accept { - AcceptFormat::Json => ResponseContent::Json(committee), - AcceptFormat::Bcs => ResponseContent::Bcs(committee), + AcceptJsonProtobufBcs::Json => JsonProtobufBcs::Json(committee), + AcceptJsonProtobufBcs::Protobuf => JsonProtobufBcs::Protobuf(committee.into()), + AcceptJsonProtobufBcs::Bcs => JsonProtobufBcs::Bcs(committee), } .pipe(Ok) } @@ -85,6 +88,7 @@ impl ApiEndpoint for GetCommittee { 200, ResponseBuilder::new() .json_content::(generator) + .protobuf_content() .bcs_content() .build(), ) @@ -99,16 +103,17 @@ impl ApiEndpoint for GetCommittee { async fn get_committee( Path(epoch): Path, - accept: AcceptFormat, + accept: AcceptJsonProtobufBcs, State(state): State, -) -> Result> { +) -> Result> { let committee = state .get_committee(epoch)? .ok_or_else(|| CommitteeNotFoundError::new(epoch))?; match accept { - AcceptFormat::Json => ResponseContent::Json(committee), - AcceptFormat::Bcs => ResponseContent::Bcs(committee), + AcceptJsonProtobufBcs::Json => JsonProtobufBcs::Json(committee), + AcceptJsonProtobufBcs::Protobuf => JsonProtobufBcs::Protobuf(committee.into()), + AcceptJsonProtobufBcs::Bcs => JsonProtobufBcs::Bcs(committee), } .pipe(Ok) } diff --git a/crates/sui-rest-api/src/lib.rs b/crates/sui-rest-api/src/lib.rs index b0bd1696a4f3b..c2151b5f554a2 100644 --- a/crates/sui-rest-api/src/lib.rs +++ b/crates/sui-rest-api/src/lib.rs @@ -1,7 +1,11 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use axum::{response::Redirect, routing::get, Router}; +use axum::{ + response::{Redirect, ResponseParts}, + routing::get, + Router, +}; use mysten_network::callback::CallbackLayer; use openapi::ApiEndpoint; use reader::StateReader; @@ -23,21 +27,26 @@ mod info; mod metrics; mod objects; pub mod openapi; +pub mod proto; mod reader; mod response; mod system; pub mod transactions; pub mod types; +pub use checkpoints::CheckpointResponse; +pub use checkpoints::ListCheckpointsQueryParameters; pub use client::Client; pub use error::{RestError, Result}; pub use metrics::RestMetrics; +pub use objects::ObjectResponse; pub use sui_types::full_checkpoint_content::{CheckpointData, CheckpointTransaction}; pub use transactions::ExecuteTransactionQueryParameters; pub const TEXT_PLAIN_UTF_8: &str = "text/plain; charset=utf-8"; pub const APPLICATION_BCS: &str = "application/bcs"; pub const APPLICATION_JSON: &str = "application/json"; +pub const APPLICATION_PROTOBUF: &str = "application/x-protobuf"; #[derive(Debug, Copy, Clone, serde::Serialize, serde::Deserialize, schemars::JsonSchema)] #[serde(rename_all = "lowercase")] @@ -46,12 +55,40 @@ pub enum Direction { Descending, } +impl Direction { + pub fn is_descending(self) -> bool { + matches!(self, Self::Descending) + } +} + #[derive(Debug)] pub struct Page { pub entries: response::ResponseContent>, pub cursor: Option, } +pub struct PageCursor(pub Option); + +impl axum::response::IntoResponseParts for PageCursor { + type Error = (axum::http::StatusCode, String); + + fn into_response_parts( + self, + res: ResponseParts, + ) -> std::result::Result { + self.0 + .map(|cursor| [(crate::types::X_SUI_CURSOR, cursor.to_string())]) + .into_response_parts(res) + .map_err(|e| (axum::http::StatusCode::INTERNAL_SERVER_ERROR, e.to_string())) + } +} + +impl axum::response::IntoResponse for PageCursor { + fn into_response(self) -> axum::response::Response { + (self, ()).into_response() + } +} + pub const DEFAULT_PAGE_SIZE: usize = 50; pub const MAX_PAGE_SIZE: usize = 100; @@ -69,14 +106,14 @@ const ENDPOINTS: &[&dyn ApiEndpoint] = &[ // stable APIs &info::GetNodeInfo, &health::HealthCheck, + &checkpoints::ListCheckpoints, + &checkpoints::GetCheckpoint, // unstable APIs &accounts::ListAccountObjects, &objects::GetObject, &objects::GetObjectWithVersion, &objects::ListDynamicFields, - &checkpoints::ListCheckpoints, - &checkpoints::GetCheckpoint, - &checkpoints::GetCheckpointFull, + &checkpoints::GetFullCheckpoint, &transactions::GetTransaction, &transactions::ListTransactions, &committee::GetCommittee, diff --git a/crates/sui-rest-api/src/objects.rs b/crates/sui-rest-api/src/objects.rs index 714f652f14a78..5bd04e0f8ad1a 100644 --- a/crates/sui-rest-api/src/objects.rs +++ b/crates/sui-rest-api/src/objects.rs @@ -2,16 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - accept::AcceptFormat, + accept::{AcceptFormat, AcceptJsonProtobufBcs}, openapi::{ApiEndpoint, OperationBuilder, ResponseBuilder, RouteHandler}, + proto::GetObjectResponse, reader::StateReader, - response::ResponseContent, + response::{JsonProtobufBcs, ResponseContent}, Page, RestError, RestService, Result, }; use axum::extract::Query; use axum::extract::{Path, State}; use serde::{Deserialize, Serialize}; -use sui_sdk_types::types::{Object, ObjectId, TypeTag, Version}; +use sui_sdk_types::types::{Object, ObjectDigest, ObjectId, TypeTag, Version}; use sui_types::sui_sdk_types_conversions::type_tag_core_to_sdk; use sui_types::{ storage::{DynamicFieldIndexInfo, DynamicFieldKey}, @@ -19,6 +20,12 @@ use sui_types::{ }; use tap::Pipe; +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize, schemars::JsonSchema)] +pub struct ObjectResponse { + pub digest: ObjectDigest, + pub object: Object, +} + pub struct GetObject; impl ApiEndpoint for GetObject { @@ -41,7 +48,8 @@ impl ApiEndpoint for GetObject { .response( 200, ResponseBuilder::new() - .json_content::(generator) + .json_content::(generator) + .protobuf_content() .bcs_content() .build(), ) @@ -56,16 +64,24 @@ impl ApiEndpoint for GetObject { pub async fn get_object( Path(object_id): Path, - accept: AcceptFormat, + accept: AcceptJsonProtobufBcs, State(state): State, -) -> Result> { +) -> Result> { let object = state .get_object(object_id)? .ok_or_else(|| ObjectNotFoundError::new(object_id))?; + let object = ObjectResponse { + digest: object.digest(), + object, + }; + match accept { - AcceptFormat::Json => ResponseContent::Json(object), - AcceptFormat::Bcs => ResponseContent::Bcs(object), + AcceptJsonProtobufBcs::Json => JsonProtobufBcs::Json(object), + AcceptJsonProtobufBcs::Protobuf => { + JsonProtobufBcs::Protobuf(GetObjectResponse::try_from(object)?) + } + AcceptJsonProtobufBcs::Bcs => JsonProtobufBcs::Bcs(object.object), } .pipe(Ok) } @@ -93,7 +109,8 @@ impl ApiEndpoint for GetObjectWithVersion { .response( 200, ResponseBuilder::new() - .json_content::(generator) + .json_content::(generator) + .protobuf_content() .bcs_content() .build(), ) @@ -108,16 +125,24 @@ impl ApiEndpoint for GetObjectWithVersion { pub async fn get_object_with_version( Path((object_id, version)): Path<(ObjectId, Version)>, - accept: AcceptFormat, + accept: AcceptJsonProtobufBcs, State(state): State, -) -> Result> { +) -> Result> { let object = state .get_object_with_version(object_id, version)? .ok_or_else(|| ObjectNotFoundError::new_with_version(object_id, version))?; + let object = ObjectResponse { + digest: object.digest(), + object, + }; + match accept { - AcceptFormat::Json => ResponseContent::Json(object), - AcceptFormat::Bcs => ResponseContent::Bcs(object), + AcceptJsonProtobufBcs::Json => JsonProtobufBcs::Json(object), + AcceptJsonProtobufBcs::Protobuf => { + JsonProtobufBcs::Protobuf(GetObjectResponse::try_from(object)?) + } + AcceptJsonProtobufBcs::Bcs => JsonProtobufBcs::Bcs(object.object), } .pipe(Ok) } diff --git a/crates/sui-rest-api/src/openapi.rs b/crates/sui-rest-api/src/openapi.rs index 424be1d2f8048..cebcaf69a9e06 100644 --- a/crates/sui-rest-api/src/openapi.rs +++ b/crates/sui-rest-api/src/openapi.rs @@ -652,6 +652,10 @@ impl ResponseBuilder { self.content(crate::APPLICATION_BCS, MediaType::default()) } + pub fn protobuf_content(&mut self) -> &mut Self { + self.content(crate::APPLICATION_PROTOBUF, MediaType::default()) + } + pub fn text_content(&mut self) -> &mut Self { self.content(mime::TEXT_PLAIN_UTF_8.as_ref(), MediaType::default()) } diff --git a/crates/sui-rest-api/src/proto/generated/sui.rest.rs b/crates/sui-rest-api/src/proto/generated/sui.rest.rs new file mode 100644 index 0000000000000..6a0f29adb7181 --- /dev/null +++ b/crates/sui-rest-api/src/proto/generated/sui.rest.rs @@ -0,0 +1,215 @@ +// This file is @generated by prost-build. +/// Sui `TransactionData` type serialized as Bcs +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Transaction { + #[prost(bytes = "bytes", tag = "1")] + pub transaction: ::prost::bytes::Bytes, +} +/// Sui `TransactionEffects` type serialized as Bcs +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransactionEffects { + #[prost(bytes = "bytes", tag = "1")] + pub effects: ::prost::bytes::Bytes, +} +/// Sui `TransactionEvents` type serialized as Bcs +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransactionEvents { + #[prost(bytes = "bytes", tag = "1")] + pub events: ::prost::bytes::Bytes, +} +/// Sui `Object` type serialized as Bcs +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Object { + #[prost(bytes = "bytes", tag = "1")] + pub object: ::prost::bytes::Bytes, +} +/// Sui `CheckpointSummary` type serialized as Bcs +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CheckpointSummary { + #[prost(bytes = "bytes", tag = "1")] + pub summary: ::prost::bytes::Bytes, +} +/// Sui `CheckpointContents` type serialized as Bcs +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CheckpointContents { + #[prost(bytes = "bytes", tag = "1")] + pub contents: ::prost::bytes::Bytes, +} +/// Sui `UserSignature` type serialized as bytes +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UserSignature { + #[prost(bytes = "bytes", tag = "1")] + pub signature: ::prost::bytes::Bytes, +} +/// Sui `ValidatorAggregatedSignature` type serialized as Bcs +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ValidatorAggregatedSignature { + #[prost(bytes = "bytes", tag = "1")] + pub signature: ::prost::bytes::Bytes, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTransactionResponse { + /// The digest of this transaction + #[prost(bytes = "bytes", tag = "1")] + pub digest: ::prost::bytes::Bytes, + #[prost(message, optional, tag = "2")] + pub transaction: ::core::option::Option, + #[prost(message, repeated, tag = "3")] + pub signatures: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "4")] + pub effects: ::core::option::Option, + #[prost(message, optional, tag = "5")] + pub events: ::core::option::Option, + #[prost(uint64, optional, tag = "6")] + pub checkpoint: ::core::option::Option, + #[prost(uint64, optional, tag = "7")] + pub timestamp_ms: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetObjectResponse { + /// The digest of this object + #[prost(bytes = "bytes", tag = "1")] + pub digest: ::prost::bytes::Bytes, + #[prost(message, optional, tag = "2")] + pub object: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetCheckpointResponse { + /// The digest of this CheckpointSummary + #[prost(bytes = "bytes", tag = "1")] + pub digest: ::prost::bytes::Bytes, + #[prost(message, optional, tag = "2")] + pub summary: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub signature: ::core::option::Option, + #[prost(message, optional, tag = "4")] + pub contents: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FullCheckpoint { + #[prost(message, optional, tag = "1")] + pub summary: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub signature: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub contents: ::core::option::Option, + #[prost(message, repeated, tag = "4")] + pub transactions: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CheckpointTransaction { + #[prost(message, optional, tag = "1")] + pub transaction: ::core::option::Option, + #[prost(message, repeated, tag = "2")] + pub signatures: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub effects: ::core::option::Option, + #[prost(message, optional, tag = "4")] + pub events: ::core::option::Option, + #[prost(message, repeated, tag = "5")] + pub input_objects: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "6")] + pub output_objects: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListCheckpointResponse { + #[prost(message, repeated, tag = "1")] + pub checkpoints: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListTransactionsResponse { + #[prost(message, repeated, tag = "1")] + pub transactions: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Address { + #[prost(bytes = "bytes", tag = "1")] + pub address: ::prost::bytes::Bytes, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TypeTag { + #[prost(string, tag = "1")] + pub type_tag: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct I128 { + #[prost(bytes = "bytes", tag = "1")] + pub little_endian_bytes: ::prost::bytes::Bytes, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BalanceChange { + #[prost(message, optional, tag = "1")] + pub address: ::core::option::Option
, + #[prost(message, optional, tag = "2")] + pub coin_type: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub amount: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EffectsFinality { + #[prost(message, optional, tag = "1")] + pub signature: ::core::option::Option, + #[prost(uint64, optional, tag = "2")] + pub checkpoint: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransactionExecutionResponse { + #[prost(message, optional, tag = "1")] + pub effects: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub finality: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub events: ::core::option::Option, + #[prost(message, repeated, tag = "4")] + pub balance_changes: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "5")] + pub input_objects: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "6")] + pub output_objects: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransactionSimulationResponse { + #[prost(message, optional, tag = "1")] + pub effects: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub events: ::core::option::Option, + #[prost(message, repeated, tag = "3")] + pub balance_changes: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "4")] + pub input_objects: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "5")] + pub output_objects: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResolveTransactionResponse { + #[prost(message, optional, tag = "1")] + pub transaction: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub simulation: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecuteTransactionRequest { + #[prost(message, optional, tag = "1")] + pub transaction: ::core::option::Option, + #[prost(message, repeated, tag = "2")] + pub signatures: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SimulateTransactionRequest { + #[prost(message, optional, tag = "1")] + pub transaction: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ValidatorCommittee { + #[prost(uint64, tag = "1")] + pub epoch: u64, + #[prost(message, repeated, tag = "2")] + pub members: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ValidatorCommitteeMember { + #[prost(bytes = "bytes", tag = "1")] + pub public_key: ::prost::bytes::Bytes, + #[prost(uint64, tag = "2")] + pub stake: u64, +} diff --git a/crates/sui-rest-api/src/proto/mod.rs b/crates/sui-rest-api/src/proto/mod.rs new file mode 100644 index 0000000000000..546cd7f0d01b0 --- /dev/null +++ b/crates/sui-rest-api/src/proto/mod.rs @@ -0,0 +1,1196 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +#[path = "generated/sui.rest.rs"] +mod generated; +pub use generated::*; +use tap::Pipe; + +// +// Transaction +// + +impl TryFrom<&sui_sdk_types::types::Transaction> for Transaction { + type Error = bcs::Error; + + fn try_from(value: &sui_sdk_types::types::Transaction) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + transaction: bytes.into(), + }) + } +} + +impl TryFrom<&Transaction> for sui_sdk_types::types::Transaction { + type Error = bcs::Error; + + fn try_from(value: &Transaction) -> Result { + bcs::from_bytes(&value.transaction) + } +} + +impl TryFrom<&sui_types::transaction::TransactionData> for Transaction { + type Error = bcs::Error; + + fn try_from(value: &sui_types::transaction::TransactionData) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + transaction: bytes.into(), + }) + } +} + +impl TryFrom<&Transaction> for sui_types::transaction::TransactionData { + type Error = bcs::Error; + + fn try_from(value: &Transaction) -> Result { + bcs::from_bytes(&value.transaction) + } +} + +// +// TransactionEffects +// + +impl TryFrom<&sui_sdk_types::types::TransactionEffects> for TransactionEffects { + type Error = bcs::Error; + + fn try_from(value: &sui_sdk_types::types::TransactionEffects) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + effects: bytes.into(), + }) + } +} + +impl TryFrom<&TransactionEffects> for sui_sdk_types::types::TransactionEffects { + type Error = bcs::Error; + + fn try_from(value: &TransactionEffects) -> Result { + bcs::from_bytes(&value.effects) + } +} + +impl TryFrom<&sui_types::effects::TransactionEffects> for TransactionEffects { + type Error = bcs::Error; + + fn try_from(value: &sui_types::effects::TransactionEffects) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + effects: bytes.into(), + }) + } +} + +impl TryFrom<&TransactionEffects> for sui_types::effects::TransactionEffects { + type Error = bcs::Error; + + fn try_from(value: &TransactionEffects) -> Result { + bcs::from_bytes(&value.effects) + } +} + +// +// TransactionEvents +// + +impl TryFrom<&sui_sdk_types::types::TransactionEvents> for TransactionEvents { + type Error = bcs::Error; + + fn try_from(value: &sui_sdk_types::types::TransactionEvents) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + events: bytes.into(), + }) + } +} + +impl TryFrom<&TransactionEvents> for sui_sdk_types::types::TransactionEvents { + type Error = bcs::Error; + + fn try_from(value: &TransactionEvents) -> Result { + bcs::from_bytes(&value.events) + } +} + +impl TryFrom<&sui_types::effects::TransactionEvents> for TransactionEvents { + type Error = bcs::Error; + + fn try_from(value: &sui_types::effects::TransactionEvents) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + events: bytes.into(), + }) + } +} + +impl TryFrom<&TransactionEvents> for sui_types::effects::TransactionEvents { + type Error = bcs::Error; + + fn try_from(value: &TransactionEvents) -> Result { + bcs::from_bytes(&value.events) + } +} + +// +// Object +// + +impl TryFrom<&sui_sdk_types::types::Object> for Object { + type Error = bcs::Error; + + fn try_from(value: &sui_sdk_types::types::Object) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + object: bytes.into(), + }) + } +} + +impl TryFrom<&Object> for sui_sdk_types::types::Object { + type Error = bcs::Error; + + fn try_from(value: &Object) -> Result { + bcs::from_bytes(&value.object) + } +} + +impl TryFrom<&sui_types::object::Object> for Object { + type Error = bcs::Error; + + fn try_from(value: &sui_types::object::Object) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + object: bytes.into(), + }) + } +} + +impl TryFrom<&Object> for sui_types::object::Object { + type Error = bcs::Error; + + fn try_from(value: &Object) -> Result { + bcs::from_bytes(&value.object) + } +} + +// +// CheckpointSummary +// + +impl TryFrom<&sui_sdk_types::types::CheckpointSummary> for CheckpointSummary { + type Error = bcs::Error; + + fn try_from(value: &sui_sdk_types::types::CheckpointSummary) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + summary: bytes.into(), + }) + } +} + +impl TryFrom<&CheckpointSummary> for sui_sdk_types::types::CheckpointSummary { + type Error = bcs::Error; + + fn try_from(value: &CheckpointSummary) -> Result { + bcs::from_bytes(&value.summary) + } +} + +impl TryFrom<&sui_types::messages_checkpoint::CheckpointSummary> for CheckpointSummary { + type Error = bcs::Error; + + fn try_from( + value: &sui_types::messages_checkpoint::CheckpointSummary, + ) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + summary: bytes.into(), + }) + } +} + +impl TryFrom<&CheckpointSummary> for sui_types::messages_checkpoint::CheckpointSummary { + type Error = bcs::Error; + + fn try_from(value: &CheckpointSummary) -> Result { + bcs::from_bytes(&value.summary) + } +} + +// +// CheckpointContents +// + +impl TryFrom<&sui_sdk_types::types::CheckpointContents> for CheckpointContents { + type Error = bcs::Error; + + fn try_from(value: &sui_sdk_types::types::CheckpointContents) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + contents: bytes.into(), + }) + } +} + +impl TryFrom<&CheckpointContents> for sui_sdk_types::types::CheckpointContents { + type Error = bcs::Error; + + fn try_from(value: &CheckpointContents) -> Result { + bcs::from_bytes(&value.contents) + } +} + +impl TryFrom<&sui_types::messages_checkpoint::CheckpointContents> for CheckpointContents { + type Error = bcs::Error; + + fn try_from( + value: &sui_types::messages_checkpoint::CheckpointContents, + ) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + contents: bytes.into(), + }) + } +} + +impl TryFrom<&CheckpointContents> for sui_types::messages_checkpoint::CheckpointContents { + type Error = bcs::Error; + + fn try_from(value: &CheckpointContents) -> Result { + bcs::from_bytes(&value.contents) + } +} + +// +// ValidatorAggregatedSignature +// + +impl TryFrom<&sui_sdk_types::types::ValidatorAggregatedSignature> for ValidatorAggregatedSignature { + type Error = bcs::Error; + + fn try_from( + value: &sui_sdk_types::types::ValidatorAggregatedSignature, + ) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + signature: bytes.into(), + }) + } +} + +impl TryFrom<&ValidatorAggregatedSignature> for sui_sdk_types::types::ValidatorAggregatedSignature { + type Error = bcs::Error; + + fn try_from(value: &ValidatorAggregatedSignature) -> Result { + bcs::from_bytes(&value.signature) + } +} + +impl TryFrom<&sui_types::crypto::AuthorityStrongQuorumSignInfo> for ValidatorAggregatedSignature { + type Error = bcs::Error; + + fn try_from( + value: &sui_types::crypto::AuthorityStrongQuorumSignInfo, + ) -> Result { + bcs::to_bytes(&value).map(|bytes| Self { + signature: bytes.into(), + }) + } +} + +impl TryFrom<&ValidatorAggregatedSignature> for sui_types::crypto::AuthorityStrongQuorumSignInfo { + type Error = bcs::Error; + + fn try_from(value: &ValidatorAggregatedSignature) -> Result { + bcs::from_bytes(&value.signature) + } +} + +// +// UserSignature +// + +impl TryFrom<&sui_sdk_types::types::UserSignature> for UserSignature { + type Error = bcs::Error; + + fn try_from(value: &sui_sdk_types::types::UserSignature) -> Result { + Ok(Self { + signature: value.to_bytes().into(), + }) + } +} + +impl TryFrom<&UserSignature> for sui_sdk_types::types::UserSignature { + type Error = bcs::Error; + + fn try_from(value: &UserSignature) -> Result { + Self::from_bytes(&value.signature).map_err(|e| bcs::Error::Custom(e.to_string())) + } +} + +impl TryFrom<&sui_types::signature::GenericSignature> for UserSignature { + type Error = bcs::Error; + + fn try_from(value: &sui_types::signature::GenericSignature) -> Result { + Ok(Self { + signature: sui_types::crypto::ToFromBytes::as_bytes(value) + .to_vec() + .into(), + }) + } +} + +impl TryFrom<&UserSignature> for sui_types::signature::GenericSignature { + type Error = bcs::Error; + + fn try_from(value: &UserSignature) -> Result { + sui_types::crypto::ToFromBytes::from_bytes(&value.signature) + .map_err(|e| bcs::Error::Custom(e.to_string())) + } +} + +// +// GetObjectResponse +// + +impl TryFrom for GetObjectResponse { + type Error = bcs::Error; + + fn try_from(value: crate::objects::ObjectResponse) -> Result { + Ok(Self { + digest: value.digest.as_bytes().to_vec().into(), + object: Some(Object::try_from(&value.object)?), + }) + } +} + +impl TryFrom for crate::objects::ObjectResponse { + type Error = bcs::Error; + + fn try_from(value: GetObjectResponse) -> Result { + Ok(Self { + digest: sui_sdk_types::types::ObjectDigest::from_bytes(&value.digest) + .map_err(|e| bcs::Error::Custom(e.to_string()))?, + object: value + .object + .ok_or_else(|| bcs::Error::Custom("missing object".into()))? + .pipe_ref(TryInto::try_into)?, + }) + } +} + +// +// GetCheckpointResponse +// + +impl TryFrom for GetCheckpointResponse { + type Error = bcs::Error; + + fn try_from(c: crate::checkpoints::CheckpointResponse) -> Result { + Ok(Self { + digest: c.digest.as_bytes().to_vec().into(), + summary: Some(CheckpointSummary::try_from(&c.summary)?), + signature: Some(ValidatorAggregatedSignature::try_from(&c.signature)?), + contents: c + .contents + .as_ref() + .map(CheckpointContents::try_from) + .transpose()?, + }) + } +} + +impl TryFrom for crate::checkpoints::CheckpointResponse { + type Error = bcs::Error; + + fn try_from(value: GetCheckpointResponse) -> Result { + let summary = value + .summary + .ok_or_else(|| bcs::Error::Custom("missing summary".into()))? + .pipe_ref(TryInto::try_into)?; + let signature = value + .signature + .ok_or_else(|| bcs::Error::Custom("missing signature".into()))? + .pipe_ref(TryInto::try_into)?; + + let contents = value.contents.as_ref().map(TryInto::try_into).transpose()?; + + Ok(Self { + digest: sui_sdk_types::types::CheckpointDigest::from_bytes(&value.digest) + .map_err(|e| bcs::Error::Custom(e.to_string()))?, + summary, + signature, + contents, + }) + } +} + +impl TryFrom> for ListCheckpointResponse { + type Error = bcs::Error; + fn try_from(value: Vec) -> Result { + let checkpoints = value + .into_iter() + .map(TryInto::try_into) + .collect::>()?; + + Ok(Self { checkpoints }) + } +} + +// +// GetTransactionResponse +// + +impl TryFrom for GetTransactionResponse { + type Error = bcs::Error; + + fn try_from(value: crate::transactions::TransactionResponse) -> Result { + Ok(Self { + digest: value.digest.as_bytes().to_vec().into(), + transaction: Some(Transaction::try_from(&value.transaction)?), + signatures: value + .signatures + .iter() + .map(UserSignature::try_from) + .collect::>()?, + effects: Some(TransactionEffects::try_from(&value.effects)?), + events: value + .events + .as_ref() + .map(TransactionEvents::try_from) + .transpose()?, + checkpoint: value.checkpoint, + timestamp_ms: value.timestamp_ms, + }) + } +} + +impl TryFrom for crate::transactions::TransactionResponse { + type Error = bcs::Error; + + fn try_from(value: GetTransactionResponse) -> Result { + Ok(Self { + digest: sui_sdk_types::types::TransactionDigest::from_bytes(&value.digest) + .map_err(|e| bcs::Error::Custom(e.to_string()))?, + transaction: value + .transaction + .ok_or_else(|| bcs::Error::Custom("missing transaction".into()))? + .pipe_ref(TryInto::try_into)?, + signatures: value + .signatures + .iter() + .map(TryInto::try_into) + .collect::>()?, + effects: value + .effects + .ok_or_else(|| bcs::Error::Custom("missing effects".into()))? + .pipe_ref(TryInto::try_into)?, + events: value.events.as_ref().map(TryInto::try_into).transpose()?, + checkpoint: value.checkpoint, + timestamp_ms: value.timestamp_ms, + }) + } +} + +// +// CheckpointTransaction +// + +impl TryFrom for CheckpointTransaction { + type Error = bcs::Error; + + fn try_from( + transaction: sui_types::full_checkpoint_content::CheckpointTransaction, + ) -> Result { + Ok(Self { + transaction: Some(Transaction::try_from( + &transaction.transaction.intent_message().value, + )?), + signatures: transaction + .transaction + .tx_signatures() + .iter() + .map(UserSignature::try_from) + .collect::>()?, + effects: Some(TransactionEffects::try_from(&transaction.effects)?), + events: transaction + .events + .as_ref() + .map(TransactionEvents::try_from) + .transpose()?, + input_objects: transaction + .input_objects + .iter() + .map(Object::try_from) + .collect::>()?, + output_objects: transaction + .output_objects + .iter() + .map(Object::try_from) + .collect::>()?, + }) + } +} + +impl TryFrom for sui_types::full_checkpoint_content::CheckpointTransaction { + type Error = bcs::Error; + + fn try_from(transaction: CheckpointTransaction) -> Result { + let transaction_data = transaction + .transaction + .ok_or_else(|| bcs::Error::Custom("missing transaction".into()))? + .pipe_ref(TryInto::try_into)?; + let user_signatures = transaction + .signatures + .iter() + .map(TryInto::try_into) + .collect::>()?; + + Ok(Self { + transaction: sui_types::transaction::Transaction::new( + sui_types::transaction::SenderSignedData::new(transaction_data, user_signatures), + ), + effects: transaction + .effects + .ok_or_else(|| bcs::Error::Custom("missing Effects".into()))? + .pipe_ref(TryInto::try_into)?, + events: transaction + .events + .as_ref() + .map(TryInto::try_into) + .transpose()?, + input_objects: transaction + .input_objects + .iter() + .map(TryInto::try_into) + .collect::>()?, + output_objects: transaction + .output_objects + .iter() + .map(TryInto::try_into) + .collect::>()?, + }) + } +} + +// +// FullCheckpoint +// + +impl TryFrom for FullCheckpoint { + type Error = bcs::Error; + + fn try_from( + c: sui_types::full_checkpoint_content::CheckpointData, + ) -> Result { + Ok(Self { + summary: Some(CheckpointSummary::try_from(c.checkpoint_summary.data())?), + signature: Some(ValidatorAggregatedSignature::try_from( + c.checkpoint_summary.auth_sig(), + )?), + contents: Some(CheckpointContents::try_from(&c.checkpoint_contents)?), + transactions: c + .transactions + .into_iter() + .map(TryInto::try_into) + .collect::>()?, + }) + } +} + +impl TryFrom for sui_types::full_checkpoint_content::CheckpointData { + type Error = bcs::Error; + + fn try_from(checkpoint: FullCheckpoint) -> Result { + let summary = checkpoint + .summary + .ok_or_else(|| bcs::Error::Custom("missing summary".into()))? + .pipe_ref(TryInto::try_into)?; + let signature = checkpoint + .signature + .ok_or_else(|| bcs::Error::Custom("missing signature".into()))? + .pipe_ref(TryInto::try_into)?; + let checkpoint_summary = + sui_types::messages_checkpoint::CertifiedCheckpointSummary::new_from_data_and_sig( + summary, signature, + ); + + let contents = checkpoint + .contents + .ok_or_else(|| bcs::Error::Custom("missing checkpoint contents".into()))? + .pipe_ref(TryInto::try_into)?; + + let transactions = checkpoint + .transactions + .into_iter() + .map(TryInto::try_into) + .collect::>()?; + + Ok(Self { + checkpoint_summary, + checkpoint_contents: contents, + transactions, + }) + } +} + +// +// Address +// + +impl From<&sui_sdk_types::types::Address> for Address { + fn from(value: &sui_sdk_types::types::Address) -> Self { + Self { + address: value.as_bytes().to_vec().into(), + } + } +} + +impl TryFrom<&Address> for sui_sdk_types::types::Address { + type Error = bcs::Error; + + fn try_from(value: &Address) -> Result { + Self::from_bytes(&value.address).map_err(|e| bcs::Error::Custom(e.to_string())) + } +} + +impl TryFrom<&Address> for sui_types::base_types::SuiAddress { + type Error = bcs::Error; + + fn try_from(value: &Address) -> Result { + Self::from_bytes(&value.address).map_err(|e| bcs::Error::Custom(e.to_string())) + } +} + +// +// TypeTag +// + +impl From<&sui_sdk_types::types::TypeTag> for TypeTag { + fn from(value: &sui_sdk_types::types::TypeTag) -> Self { + Self { + type_tag: value.to_string(), + } + } +} + +impl TryFrom<&TypeTag> for sui_sdk_types::types::TypeTag { + type Error = sui_sdk_types::types::TypeParseError; + + fn try_from(value: &TypeTag) -> Result { + value.type_tag.parse() + } +} + +impl TryFrom<&TypeTag> for sui_types::TypeTag { + type Error = bcs::Error; + + fn try_from(value: &TypeTag) -> Result { + value + .type_tag + .parse::() + .map_err(|e| bcs::Error::Custom(e.to_string())) + } +} + +// +// I128 +// + +impl From for I128 { + fn from(value: i128) -> Self { + Self { + little_endian_bytes: value.to_le_bytes().to_vec().into(), + } + } +} + +impl TryFrom<&I128> for i128 { + type Error = std::array::TryFromSliceError; + + fn try_from(value: &I128) -> Result { + Ok(i128::from_le_bytes( + value.little_endian_bytes.as_ref().try_into()?, + )) + } +} + +// +// BalanceChange +// + +impl From<&sui_sdk_types::types::BalanceChange> for BalanceChange { + fn from(value: &sui_sdk_types::types::BalanceChange) -> Self { + Self { + address: Some(Address::from(&value.address)), + coin_type: Some(TypeTag::from(&value.coin_type)), + amount: Some(I128::from(value.amount)), + } + } +} + +impl TryFrom<&BalanceChange> for sui_sdk_types::types::BalanceChange { + type Error = bcs::Error; + + fn try_from(value: &BalanceChange) -> Result { + let address = value + .address + .as_ref() + .ok_or_else(|| bcs::Error::Custom("missing address".into()))? + .try_into()?; + + let coin_type = value + .coin_type + .as_ref() + .ok_or_else(|| bcs::Error::Custom("missing coin_type".into()))? + .pipe(sui_sdk_types::types::TypeTag::try_from) + .map_err(|e| bcs::Error::Custom(e.to_string()))?; + + let amount = value + .amount + .as_ref() + .ok_or_else(|| bcs::Error::Custom("missing amount".into()))? + .pipe(i128::try_from) + .map_err(|e| bcs::Error::Custom(e.to_string()))?; + + Ok(Self { + address, + coin_type, + amount, + }) + } +} + +impl TryFrom<&BalanceChange> for crate::client::BalanceChange { + type Error = bcs::Error; + + fn try_from(value: &BalanceChange) -> Result { + let address = value + .address + .as_ref() + .ok_or_else(|| bcs::Error::Custom("missing address".into()))? + .try_into()?; + + let coin_type = value + .coin_type + .as_ref() + .ok_or_else(|| bcs::Error::Custom("missing coin_type".into()))? + .pipe(sui_types::TypeTag::try_from) + .map_err(|e| bcs::Error::Custom(e.to_string()))?; + + let amount = value + .amount + .as_ref() + .ok_or_else(|| bcs::Error::Custom("missing amount".into()))? + .pipe(i128::try_from) + .map_err(|e| bcs::Error::Custom(e.to_string()))?; + + Ok(Self { + address, + coin_type, + amount, + }) + } +} +// +// EffectsFinality +// + +impl TryFrom<&crate::transactions::EffectsFinality> for EffectsFinality { + type Error = bcs::Error; + + fn try_from(value: &crate::transactions::EffectsFinality) -> Result { + let (signature, checkpoint) = match value { + crate::transactions::EffectsFinality::Certified { signature } => { + (Some(signature.try_into()?), None) + } + crate::transactions::EffectsFinality::Checkpointed { checkpoint } => { + (None, Some(*checkpoint)) + } + }; + + Ok(Self { + signature, + checkpoint, + }) + } +} + +impl TryFrom<&EffectsFinality> for crate::transactions::EffectsFinality { + type Error = bcs::Error; + + fn try_from(value: &EffectsFinality) -> Result { + let signature = value + .signature + .as_ref() + .map(sui_sdk_types::types::ValidatorAggregatedSignature::try_from) + .transpose()?; + match (signature, value.checkpoint) { + (Some(signature), _) => crate::transactions::EffectsFinality::Certified { signature }, + (None, Some(checkpoint)) => { + crate::transactions::EffectsFinality::Checkpointed { checkpoint } + } + (None, None) => { + return Err(bcs::Error::Custom( + "missing signature or checkpoint field".into(), + )) + } + } + .pipe(Ok) + } +} + +impl TryFrom<&EffectsFinality> for crate::client::EffectsFinality { + type Error = bcs::Error; + + fn try_from(value: &EffectsFinality) -> Result { + let signature = value + .signature + .as_ref() + .map(sui_types::crypto::AuthorityStrongQuorumSignInfo::try_from) + .transpose()?; + match (signature, value.checkpoint) { + (Some(signature), _) => crate::client::EffectsFinality::Certified { signature }, + (None, Some(checkpoint)) => crate::client::EffectsFinality::Checkpointed { checkpoint }, + (None, None) => { + return Err(bcs::Error::Custom( + "missing signature or checkpoint field".into(), + )) + } + } + .pipe(Ok) + } +} + +// +// TransactionExecutionResponse +// + +impl TryFrom for TransactionExecutionResponse { + type Error = bcs::Error; + + fn try_from( + value: crate::transactions::TransactionExecutionResponse, + ) -> Result { + Ok(Self { + effects: Some(TransactionEffects::try_from(&value.effects)?), + finality: Some(EffectsFinality::try_from(&value.finality)?), + events: value + .events + .as_ref() + .map(TransactionEvents::try_from) + .transpose()?, + balance_changes: value + .balance_changes + .iter() + .flat_map(|balance_changes| balance_changes.iter()) + .map(BalanceChange::from) + .collect(), + input_objects: value + .input_objects + .iter() + .flat_map(|objects| objects.iter()) + .map(Object::try_from) + .collect::>()?, + output_objects: value + .output_objects + .iter() + .flat_map(|objects| objects.iter()) + .map(Object::try_from) + .collect::>()?, + }) + } +} + +impl TryFrom for crate::transactions::TransactionExecutionResponse { + type Error = bcs::Error; + + fn try_from(value: TransactionExecutionResponse) -> Result { + Ok(Self { + effects: value + .effects + .ok_or_else(|| bcs::Error::Custom("missing Effects".into()))? + .pipe_ref(TryInto::try_into)?, + events: value.events.as_ref().map(TryInto::try_into).transpose()?, + input_objects: Some( + value + .input_objects + .iter() + .map(TryInto::try_into) + .collect::>()?, + ), + output_objects: Some( + value + .output_objects + .iter() + .map(TryInto::try_into) + .collect::>()?, + ), + finality: value + .finality + .ok_or_else(|| bcs::Error::Custom("missing finality".into()))? + .pipe_ref(TryInto::try_into)?, + balance_changes: Some( + value + .balance_changes + .iter() + .map(TryInto::try_into) + .collect::>()?, + ), + }) + } +} + +impl TryFrom for crate::client::TransactionExecutionResponse { + type Error = bcs::Error; + + fn try_from(value: TransactionExecutionResponse) -> Result { + Ok(Self { + effects: value + .effects + .ok_or_else(|| bcs::Error::Custom("missing Effects".into()))? + .pipe_ref(TryInto::try_into)?, + events: value.events.as_ref().map(TryInto::try_into).transpose()?, + input_objects: Some( + value + .input_objects + .iter() + .map(TryInto::try_into) + .collect::>()?, + ), + output_objects: Some( + value + .output_objects + .iter() + .map(TryInto::try_into) + .collect::>()?, + ), + finality: value + .finality + .ok_or_else(|| bcs::Error::Custom("missing finality".into()))? + .pipe_ref(TryInto::try_into)?, + balance_changes: Some( + value + .balance_changes + .iter() + .map(TryInto::try_into) + .collect::>()?, + ), + }) + } +} + +// +// TransactionSimulationResponse +// + +impl TryFrom for TransactionSimulationResponse { + type Error = bcs::Error; + + fn try_from( + value: crate::transactions::TransactionSimulationResponse, + ) -> Result { + Ok(Self { + effects: Some(TransactionEffects::try_from(&value.effects)?), + events: value + .events + .as_ref() + .map(TransactionEvents::try_from) + .transpose()?, + balance_changes: value + .balance_changes + .iter() + .flat_map(|balance_changes| balance_changes.iter()) + .map(BalanceChange::from) + .collect(), + input_objects: value + .input_objects + .iter() + .flat_map(|objects| objects.iter()) + .map(Object::try_from) + .collect::>()?, + output_objects: value + .output_objects + .iter() + .flat_map(|objects| objects.iter()) + .map(Object::try_from) + .collect::>()?, + }) + } +} + +impl TryFrom for crate::transactions::TransactionSimulationResponse { + type Error = bcs::Error; + + fn try_from(value: TransactionSimulationResponse) -> Result { + Ok(Self { + effects: value + .effects + .ok_or_else(|| bcs::Error::Custom("missing Effects".into()))? + .pipe_ref(TryInto::try_into)?, + events: value.events.as_ref().map(TryInto::try_into).transpose()?, + input_objects: Some( + value + .input_objects + .iter() + .map(TryInto::try_into) + .collect::>()?, + ), + output_objects: Some( + value + .output_objects + .iter() + .map(TryInto::try_into) + .collect::>()?, + ), + balance_changes: Some( + value + .balance_changes + .iter() + .map(TryInto::try_into) + .collect::>()?, + ), + }) + } +} + +// +// ResolveTransactionResponse +// + +impl TryFrom for ResolveTransactionResponse { + type Error = bcs::Error; + + fn try_from( + value: crate::transactions::ResolveTransactionResponse, + ) -> Result { + Ok(Self { + transaction: Some(Transaction::try_from(&value.transaction)?), + simulation: value + .simulation + .map(TransactionSimulationResponse::try_from) + .transpose()?, + }) + } +} + +impl TryFrom for crate::transactions::ResolveTransactionResponse { + type Error = bcs::Error; + + fn try_from(value: ResolveTransactionResponse) -> Result { + Ok(Self { + transaction: value + .transaction + .ok_or_else(|| bcs::Error::Custom("missing transaction".into()))? + .pipe_ref(TryInto::try_into)?, + simulation: value.simulation.map(TryInto::try_into).transpose()?, + }) + } +} + +// +// ExecuteTransactionRequest +// + +impl TryFrom for ExecuteTransactionRequest { + type Error = bcs::Error; + + fn try_from(value: sui_sdk_types::types::SignedTransaction) -> Result { + Ok(Self { + transaction: Some(Transaction::try_from(&value.transaction)?), + signatures: value + .signatures + .iter() + .map(UserSignature::try_from) + .collect::>()?, + }) + } +} + +impl TryFrom for sui_sdk_types::types::SignedTransaction { + type Error = bcs::Error; + + fn try_from(value: ExecuteTransactionRequest) -> Result { + Ok(Self { + transaction: value + .transaction + .ok_or_else(|| bcs::Error::Custom("missing transaction".into()))? + .pipe_ref(TryInto::try_into)?, + signatures: value + .signatures + .iter() + .map(TryInto::try_into) + .collect::>()?, + }) + } +} + +// +// SimulateTransactionRequest +// + +impl TryFrom for SimulateTransactionRequest { + type Error = bcs::Error; + + fn try_from(value: sui_sdk_types::types::Transaction) -> Result { + Ok(Self { + transaction: Some(Transaction::try_from(&value)?), + }) + } +} + +impl TryFrom for sui_sdk_types::types::Transaction { + type Error = bcs::Error; + + fn try_from(value: SimulateTransactionRequest) -> Result { + value + .transaction + .ok_or_else(|| bcs::Error::Custom("missing transaction".into()))? + .pipe_ref(TryInto::try_into) + } +} + +// +// ValidatorCommitteeMember +// + +impl From<&sui_sdk_types::types::ValidatorCommitteeMember> for ValidatorCommitteeMember { + fn from(value: &sui_sdk_types::types::ValidatorCommitteeMember) -> Self { + Self { + public_key: value.public_key.as_bytes().to_vec().into(), + stake: value.stake, + } + } +} + +impl TryFrom for sui_sdk_types::types::ValidatorCommitteeMember { + type Error = bcs::Error; + + fn try_from(value: ValidatorCommitteeMember) -> Result { + Ok(Self { + public_key: sui_sdk_types::types::Bls12381PublicKey::from_bytes(&value.public_key) + .map_err(|e| bcs::Error::Custom(e.to_string()))?, + stake: value.stake, + }) + } +} + +// +// ValidatorCommittee +// + +impl From for ValidatorCommittee { + fn from(value: sui_sdk_types::types::ValidatorCommittee) -> Self { + Self { + epoch: value.epoch, + members: value + .members + .iter() + .map(ValidatorCommitteeMember::from) + .collect(), + } + } +} + +impl TryFrom for sui_sdk_types::types::ValidatorCommittee { + type Error = bcs::Error; + + fn try_from(value: ValidatorCommittee) -> Result { + Ok(Self { + epoch: value.epoch, + members: value + .members + .into_iter() + .map(TryInto::try_into) + .collect::>()?, + }) + } +} diff --git a/crates/sui-rest-api/src/response.rs b/crates/sui-rest-api/src/response.rs index 1863bc6c45d54..0a6df29b0b00a 100644 --- a/crates/sui-rest-api/src/response.rs +++ b/crates/sui-rest-api/src/response.rs @@ -6,8 +6,10 @@ use axum::{ http::{HeaderMap, StatusCode}, response::{IntoResponse, Response}, }; +use prost::bytes::BytesMut; use crate::{ + accept::APPLICATION_PROTOBUF, content_type::ContentType, types::{ X_SUI_CHAIN, X_SUI_CHAIN_ID, X_SUI_CHECKPOINT_HEIGHT, X_SUI_EPOCH, @@ -19,12 +21,6 @@ use crate::{ pub struct Bcs(pub T); -#[derive(Debug)] -pub enum ResponseContent { - Bcs(T), - Json(J), -} - impl axum::response::IntoResponse for Bcs where T: serde::Serialize, @@ -112,6 +108,12 @@ impl axum::response::IntoResponse for BcsRejection { } } +#[derive(Debug)] +pub enum ResponseContent { + Bcs(T), + Json(J), +} + impl axum::response::IntoResponse for ResponseContent where T: serde::Serialize, @@ -125,6 +127,47 @@ where } } +#[derive(Debug)] +pub enum JsonProtobufBcs { + Json(J), + Protobuf(P), + Bcs(T), +} + +impl axum::response::IntoResponse for JsonProtobufBcs +where + J: serde::Serialize, + P: prost::Message + std::default::Default, + T: serde::Serialize, +{ + fn into_response(self) -> axum::response::Response { + match self { + JsonProtobufBcs::Json(inner) => axum::Json(inner).into_response(), + JsonProtobufBcs::Protobuf(inner) => Protobuf(inner).into_response(), + JsonProtobufBcs::Bcs(inner) => Bcs(inner).into_response(), + } + } +} + +#[derive(Debug)] +pub enum ProtobufBcs { + Protobuf(P), + Bcs(T), +} + +impl axum::response::IntoResponse for ProtobufBcs +where + P: prost::Message + std::default::Default, + T: serde::Serialize, +{ + fn into_response(self) -> axum::response::Response { + match self { + Self::Protobuf(inner) => Protobuf(inner).into_response(), + Self::Bcs(inner) => Bcs(inner).into_response(), + } + } +} + pub async fn append_info_headers( State(state): State, response: Response, @@ -169,3 +212,90 @@ pub async fn append_info_headers( (headers, response) } + +pub struct Protobuf(pub T); + +impl axum::response::IntoResponse for Protobuf +where + T: prost::Message, +{ + fn into_response(self) -> axum::response::Response { + let mut buf = BytesMut::new(); + match self.0.encode(&mut buf) { + Ok(()) => ( + [( + axum::http::header::CONTENT_TYPE, + axum::http::HeaderValue::from_static(APPLICATION_PROTOBUF), + )], + buf, + ) + .into_response(), + Err(err) => ( + StatusCode::INTERNAL_SERVER_ERROR, + [( + axum::http::header::CONTENT_TYPE, + axum::http::HeaderValue::from_static(TEXT_PLAIN_UTF_8), + )], + err.to_string(), + ) + .into_response(), + } + } +} + +#[axum::async_trait] +impl axum::extract::FromRequest for Protobuf +where + T: prost::Message + std::default::Default, + S: Send + Sync, +{ + type Rejection = ProtobufRejection; + + async fn from_request( + req: axum::http::Request, + state: &S, + ) -> Result { + if protobuf_content_type(req.headers()) { + let bytes = axum::body::Bytes::from_request(req, state) + .await + .map_err(ProtobufRejection::BytesRejection)?; + T::decode(bytes) + .map(Self) + .map_err(ProtobufRejection::DeserializationError) + } else { + Err(ProtobufRejection::MissingProtobufContentType) + } + } +} + +fn protobuf_content_type(headers: &HeaderMap) -> bool { + let Some(ContentType(mime)) = ContentType::from_headers(headers) else { + return false; + }; + + mime.essence_str() == APPLICATION_PROTOBUF +} + +pub enum ProtobufRejection { + MissingProtobufContentType, + DeserializationError(prost::DecodeError), + BytesRejection(axum::extract::rejection::BytesRejection), +} + +impl axum::response::IntoResponse for ProtobufRejection { + fn into_response(self) -> axum::response::Response { + match self { + ProtobufRejection::MissingProtobufContentType => ( + StatusCode::UNSUPPORTED_MEDIA_TYPE, + "Expected request with `Content-Type: application/x-protobuf`", + ) + .into_response(), + ProtobufRejection::DeserializationError(e) => ( + StatusCode::UNPROCESSABLE_ENTITY, + format!("Failed to deserialize the protobuf body into the target type: {e}"), + ) + .into_response(), + ProtobufRejection::BytesRejection(bytes_rejection) => bytes_rejection.into_response(), + } + } +} diff --git a/crates/sui-rest-api/src/transactions/execution.rs b/crates/sui-rest-api/src/transactions/execution.rs index d1994b83e4d40..0e445f687cb6b 100644 --- a/crates/sui-rest-api/src/transactions/execution.rs +++ b/crates/sui-rest-api/src/transactions/execution.rs @@ -1,12 +1,12 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +use crate::accept::AcceptJsonProtobufBcs; use crate::openapi::{ ApiEndpoint, OperationBuilder, RequestBodyBuilder, ResponseBuilder, RouteHandler, }; -use crate::response::Bcs; -use crate::{accept::AcceptFormat, response::ResponseContent}; -use crate::{RestError, RestService, Result}; +use crate::response::{Bcs, JsonProtobufBcs}; +use crate::{proto, RestError, RestService, Result}; use axum::extract::{Query, State}; use schemars::JsonSchema; use std::net::SocketAddr; @@ -43,6 +43,7 @@ impl ApiEndpoint for ExecuteTransaction { 200, ResponseBuilder::new() .json_content::(generator) + .protobuf_content() .bcs_content() .build(), ) @@ -65,9 +66,15 @@ async fn execute_transaction( State(state): State>>, Query(parameters): Query, client_address: Option>, - accept: AcceptFormat, + accept: AcceptJsonProtobufBcs, Bcs(transaction): Bcs, -) -> Result> { +) -> Result< + JsonProtobufBcs< + TransactionExecutionResponse, + proto::TransactionExecutionResponse, + TransactionExecutionResponse, + >, +> { let executor = state.ok_or_else(|| anyhow::anyhow!("No Transaction Executor"))?; let request = sui_types::quorum_driver_types::ExecuteTransactionRequestV3 { transaction: transaction.try_into()?, @@ -161,8 +168,9 @@ async fn execute_transaction( }; match accept { - AcceptFormat::Json => ResponseContent::Json(response), - AcceptFormat::Bcs => ResponseContent::Bcs(response), + AcceptJsonProtobufBcs::Json => JsonProtobufBcs::Json(response), + AcceptJsonProtobufBcs::Protobuf => JsonProtobufBcs::Protobuf(response.try_into()?), + AcceptJsonProtobufBcs::Bcs => JsonProtobufBcs::Bcs(response), } .pipe(Ok) } @@ -191,13 +199,13 @@ pub struct ExecuteTransactionQueryParameters { /// Response type for the execute transaction endpoint #[derive(Debug, serde::Serialize, serde::Deserialize, JsonSchema)] pub struct TransactionExecutionResponse { - effects: TransactionEffects, + pub effects: TransactionEffects, - finality: EffectsFinality, - events: Option, - balance_changes: Option>, - input_objects: Option>, - output_objects: Option>, + pub finality: EffectsFinality, + pub events: Option, + pub balance_changes: Option>, + pub input_objects: Option>, + pub output_objects: Option>, } #[derive(Clone, Debug)] @@ -377,6 +385,7 @@ impl ApiEndpoint for SimulateTransaction { 200, ResponseBuilder::new() .json_content::(generator) + .protobuf_content() .bcs_content() .build(), ) @@ -391,16 +400,26 @@ impl ApiEndpoint for SimulateTransaction { async fn simulate_transaction( State(state): State>>, Query(parameters): Query, - accept: AcceptFormat, + accept: AcceptJsonProtobufBcs, //TODO allow accepting JSON as well as BCS Bcs(transaction): Bcs, -) -> Result> { +) -> Result< + JsonProtobufBcs< + TransactionSimulationResponse, + proto::TransactionSimulationResponse, + TransactionSimulationResponse, + >, +> { let executor = state.ok_or_else(|| anyhow::anyhow!("No Transaction Executor"))?; - simulate_transaction_impl(&executor, ¶meters, transaction).map(|response| match accept { - AcceptFormat::Json => ResponseContent::Json(response), - AcceptFormat::Bcs => ResponseContent::Bcs(response), - }) + let response = simulate_transaction_impl(&executor, ¶meters, transaction)?; + + match accept { + AcceptJsonProtobufBcs::Json => JsonProtobufBcs::Json(response), + AcceptJsonProtobufBcs::Protobuf => JsonProtobufBcs::Protobuf(response.try_into()?), + AcceptJsonProtobufBcs::Bcs => JsonProtobufBcs::Bcs(response), + } + .pipe(Ok) } pub(super) fn simulate_transaction_impl( diff --git a/crates/sui-rest-api/src/transactions/mod.rs b/crates/sui-rest-api/src/transactions/mod.rs index 362aa4c40ed2c..261790855353e 100644 --- a/crates/sui-rest-api/src/transactions/mod.rs +++ b/crates/sui-rest-api/src/transactions/mod.rs @@ -24,17 +24,20 @@ use sui_sdk_types::types::{ }; use tap::Pipe; +use crate::accept::AcceptJsonProtobufBcs; use crate::openapi::ApiEndpoint; use crate::openapi::OperationBuilder; use crate::openapi::ResponseBuilder; use crate::openapi::RouteHandler; +use crate::proto; +use crate::proto::ListTransactionsResponse; use crate::reader::StateReader; +use crate::response::JsonProtobufBcs; use crate::Direction; -use crate::Page; +use crate::PageCursor; use crate::RestError; use crate::RestService; use crate::Result; -use crate::{accept::AcceptFormat, response::ResponseContent}; pub struct GetTransaction; @@ -59,6 +62,7 @@ impl ApiEndpoint for GetTransaction { 200, ResponseBuilder::new() .json_content::(generator) + .protobuf_content() .bcs_content() .build(), ) @@ -73,14 +77,16 @@ impl ApiEndpoint for GetTransaction { async fn get_transaction( Path(transaction_digest): Path, - accept: AcceptFormat, + accept: AcceptJsonProtobufBcs, State(state): State, -) -> Result> { +) -> Result> +{ let response = state.get_transaction_response(transaction_digest)?; match accept { - AcceptFormat::Json => ResponseContent::Json(response), - AcceptFormat::Bcs => ResponseContent::Bcs(response), + AcceptJsonProtobufBcs::Json => JsonProtobufBcs::Json(response), + AcceptJsonProtobufBcs::Protobuf => JsonProtobufBcs::Protobuf(response.try_into()?), + AcceptJsonProtobufBcs::Bcs => JsonProtobufBcs::Bcs(response), } .pipe(Ok) } @@ -145,6 +151,7 @@ impl ApiEndpoint for ListTransactions { 200, ResponseBuilder::new() .json_content::>(generator) + .protobuf_content() .bcs_content() .header::(crate::types::X_SUI_CURSOR, generator) .build(), @@ -160,9 +167,12 @@ impl ApiEndpoint for ListTransactions { async fn list_transactions( Query(parameters): Query, - accept: AcceptFormat, + accept: AcceptJsonProtobufBcs, State(state): State, -) -> Result> { +) -> Result<( + PageCursor, + JsonProtobufBcs, ListTransactionsResponse, Vec>, +)> { let latest_checkpoint = state.inner().get_latest_checkpoint()?.sequence_number; let oldest_checkpoint = state.inner().get_lowest_available_checkpoint()?; let limit = parameters.limit(); @@ -195,12 +205,7 @@ async fn list_transactions( timestamp_ms: Some(cursor_info.timestamp_ms), }) }) - .collect::>()?; - - let entries = match accept { - AcceptFormat::Json => ResponseContent::Json(transactions), - AcceptFormat::Bcs => ResponseContent::Bcs(transactions), - }; + .collect::, _>>()?; let cursor = next_cursor.and_then(|(checkpoint, index)| { if checkpoint < oldest_checkpoint { @@ -210,7 +215,21 @@ async fn list_transactions( } }); - Ok(Page { entries, cursor }) + match accept { + AcceptJsonProtobufBcs::Json => JsonProtobufBcs::Json(transactions), + AcceptJsonProtobufBcs::Protobuf => { + let proto = ListTransactionsResponse { + transactions: transactions + .into_iter() + .map(TryInto::try_into) + .collect::>()?, + }; + JsonProtobufBcs::Protobuf(proto) + } + AcceptJsonProtobufBcs::Bcs => JsonProtobufBcs::Bcs(transactions), + } + .pipe(|entries| (PageCursor(cursor), entries)) + .pipe(Ok) } /// A Cursor that points at a specific transaction in history. @@ -278,7 +297,7 @@ impl serde::Serialize for TransactionCursor { } } -#[derive(Debug, serde::Serialize, serde::Deserialize, schemars::JsonSchema)] +#[derive(Debug, Default, serde::Serialize, serde::Deserialize, schemars::JsonSchema)] pub struct ListTransactionsQueryParameters { pub limit: Option, #[schemars(with = "Option")] diff --git a/crates/sui-rest-api/src/transactions/resolve/mod.rs b/crates/sui-rest-api/src/transactions/resolve/mod.rs index dcb905d8f9104..a8373a226875e 100644 --- a/crates/sui-rest-api/src/transactions/resolve/mod.rs +++ b/crates/sui-rest-api/src/transactions/resolve/mod.rs @@ -6,15 +6,16 @@ use std::collections::HashMap; use super::execution::SimulateTransactionQueryParameters; use super::TransactionSimulationResponse; -use crate::accept::AcceptFormat; +use crate::accept::AcceptJsonProtobufBcs; use crate::objects::ObjectNotFoundError; use crate::openapi::ApiEndpoint; use crate::openapi::OperationBuilder; use crate::openapi::RequestBodyBuilder; use crate::openapi::ResponseBuilder; use crate::openapi::RouteHandler; +use crate::proto; use crate::reader::StateReader; -use crate::response::ResponseContent; +use crate::response::JsonProtobufBcs; use crate::RestError; use crate::RestService; use crate::Result; @@ -79,6 +80,7 @@ impl ApiEndpoint for ResolveTransaction { 200, ResponseBuilder::new() .json_content::(generator) + .protobuf_content() .bcs_content() .build(), ) @@ -93,9 +95,15 @@ impl ApiEndpoint for ResolveTransaction { async fn resolve_transaction( State(state): State, Query(parameters): Query, - accept: AcceptFormat, + accept: AcceptJsonProtobufBcs, Json(unresolved_transaction): Json, -) -> Result> { +) -> Result< + JsonProtobufBcs< + ResolveTransactionResponse, + proto::ResolveTransactionResponse, + ResolveTransactionResponse, + >, +> { let executor = state .executor .as_ref() @@ -183,14 +191,16 @@ async fn resolve_transaction( None }; - ResolveTransactionResponse { + let response = ResolveTransactionResponse { transaction: resolved_transaction.try_into()?, simulation, + }; + + match accept { + AcceptJsonProtobufBcs::Json => JsonProtobufBcs::Json(response), + AcceptJsonProtobufBcs::Protobuf => JsonProtobufBcs::Protobuf(response.try_into()?), + AcceptJsonProtobufBcs::Bcs => JsonProtobufBcs::Bcs(response), } - .pipe(|response| match accept { - AcceptFormat::Json => ResponseContent::Json(response), - AcceptFormat::Bcs => ResponseContent::Bcs(response), - }) .pipe(Ok) } diff --git a/crates/sui-rest-api/tests/bootstrap.rs b/crates/sui-rest-api/tests/bootstrap.rs new file mode 100644 index 0000000000000..d5e606dc0a446 --- /dev/null +++ b/crates/sui-rest-api/tests/bootstrap.rs @@ -0,0 +1,56 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::{fs, path::PathBuf, process::Command}; + +#[test] +fn bootstrap() { + let root_dir = PathBuf::from(std::env!("CARGO_MANIFEST_DIR")); + let proto_dir = root_dir.join("proto"); + let proto_ext = std::ffi::OsStr::new("proto"); + let proto_files = fs::read_dir(&proto_dir).and_then(|dir| { + dir.filter_map(|entry| { + (|| { + let entry = entry?; + if entry.file_type()?.is_dir() { + return Ok(None); + } + + let path = entry.path(); + if path.extension() != Some(proto_ext) { + return Ok(None); + } + + Ok(Some(path)) + })() + .transpose() + }) + .collect::, _>>() + }); + let proto_files = match proto_files { + Ok(files) => files, + Err(error) => panic!("failed to list proto files: {}", error), + }; + + let out_dir = root_dir.join("src").join("proto").join("generated"); + + if let Err(error) = prost_build::Config::new() + .bytes(["."]) + .out_dir(&out_dir) + .compile_protos(&proto_files[..], &[proto_dir]) + { + panic!("failed to compile `rest` protobuf: {}", error); + } + + let status = Command::new("git") + .arg("diff") + .arg("--exit-code") + .arg("--") + .arg(out_dir) + .status(); + match status { + Ok(status) if !status.success() => panic!("You should commit the protobuf files"), + Err(error) => panic!("failed to run `git diff`: {}", error), + Ok(_) => {} + } +} diff --git a/crates/sui-single-node-benchmark/src/mock_storage.rs b/crates/sui-single-node-benchmark/src/mock_storage.rs index c6f03fe1bb8bd..78d9eedb0ec82 100644 --- a/crates/sui-single-node-benchmark/src/mock_storage.rs +++ b/crates/sui-single-node-benchmark/src/mock_storage.rs @@ -48,7 +48,7 @@ impl InMemoryObjectStore { tx_key: &TransactionKey, input_object_kinds: &[InputObjectKind], ) -> SuiResult { - let shared_locks_cell: OnceCell> = OnceCell::new(); + let shared_locks_cell: OnceCell>> = OnceCell::new(); let mut input_objects = Vec::new(); for kind in input_object_kinds { let obj: Option = match kind { @@ -58,11 +58,17 @@ impl InMemoryObjectStore { } InputObjectKind::SharedMoveObject { id, .. } => { - let shared_locks = shared_locks_cell.get_or_try_init(|| { - Ok::, SuiError>( - shared_locks.get_shared_locks(tx_key)?.into_iter().collect(), - ) - })?; + let shared_locks = shared_locks_cell + .get_or_init(|| { + shared_locks + .get_shared_locks(tx_key) + .expect("get_shared_locks should not fail") + .map(|l| l.into_iter().collect()) + }) + .as_ref() + .ok_or_else(|| SuiError::GenericAuthorityError { + error: "Shared object locks should have been set.".to_string(), + })?; let version = shared_locks.get(id).unwrap_or_else(|| { panic!("Shared object locks should have been set. key: {tx_key:?}, obj id: {id:?}") }); @@ -174,7 +180,7 @@ impl GetSharedLocks for InMemoryObjectStore { fn get_shared_locks( &self, _key: &TransactionKey, - ) -> Result, SuiError> { + ) -> SuiResult>> { unreachable!() } } diff --git a/crates/sui-storage/Cargo.toml b/crates/sui-storage/Cargo.toml index c56fd99370f45..4c388f65c428c 100644 --- a/crates/sui-storage/Cargo.toml +++ b/crates/sui-storage/Cargo.toml @@ -66,5 +66,5 @@ pretty_assertions.workspace = true once_cell.workspace = true sui-test-transaction-builder.workspace = true sui-types = { workspace = true, features = ["test-utils"] } -sui-macros = { workspace = true } -sui-simulator = { workspace = true } +sui-macros.workspace = true +sui-simulator.workspace = true diff --git a/crates/sui-swarm-config/tests/snapshots/snapshot_tests__genesis_config_snapshot_matches.snap b/crates/sui-swarm-config/tests/snapshots/snapshot_tests__genesis_config_snapshot_matches.snap index 74ac933740527..2fcba9da75085 100644 --- a/crates/sui-swarm-config/tests/snapshots/snapshot_tests__genesis_config_snapshot_matches.snap +++ b/crates/sui-swarm-config/tests/snapshots/snapshot_tests__genesis_config_snapshot_matches.snap @@ -6,7 +6,7 @@ ssfn_config_info: ~ validator_config_info: ~ parameters: chain_start_timestamp_ms: 0 - protocol_version: 65 + protocol_version: 68 allow_insertion_of_extra_objects: true epoch_duration_ms: 86400000 stake_subsidy_start_epoch: 0 @@ -49,3 +49,4 @@ accounts: - 30000000000000000 - 30000000000000000 - 30000000000000000 + diff --git a/crates/sui-swarm-config/tests/snapshots/snapshot_tests__populated_genesis_snapshot_matches-2.snap b/crates/sui-swarm-config/tests/snapshots/snapshot_tests__populated_genesis_snapshot_matches-2.snap index d36c1fad5d370..7e39140036125 100644 --- a/crates/sui-swarm-config/tests/snapshots/snapshot_tests__populated_genesis_snapshot_matches-2.snap +++ b/crates/sui-swarm-config/tests/snapshots/snapshot_tests__populated_genesis_snapshot_matches-2.snap @@ -3,7 +3,7 @@ source: crates/sui-swarm-config/tests/snapshot_tests.rs expression: genesis.sui_system_object().into_genesis_version_for_tooling() --- epoch: 0 -protocol_version: 65 +protocol_version: 68 system_state_version: 1 validators: total_stake: 20000000000000000 @@ -240,13 +240,13 @@ validators: next_epoch_worker_address: ~ extra_fields: id: - id: "0x834dab6f0617450d0dcfc6dd58e2a918d439fe50d2f914ac6e60dbbc38328ad3" + id: "0x2f1c192f30b36b0d0a47520ff814ace58ce8a73580c9bf86c0fc729781351bcc" size: 0 voting_power: 10000 - operation_cap_id: "0x72b130e2d852f3468d46e67179268cf2b1a000855a549b0dcab002426836a768" + operation_cap_id: "0x0e83ac0a1c9938e12a692c734f8f38dfe5858076b17611402d46afcd5887ba8e" gas_price: 1000 staking_pool: - id: "0xdc1b1962050243cbe5efd781c560b88d1c4d43da28ddc1b3f1b558210ca24009" + id: "0x222477b804c11404854c3c14cf29a2840472651c91d8870e07ae852a98c0a2e3" activation_epoch: 0 deactivation_epoch: ~ sui_balance: 20000000000000000 @@ -254,14 +254,14 @@ validators: value: 0 pool_token_balance: 20000000000000000 exchange_rates: - id: "0xb972b09a2f5914997289ba4ebbff641d7f0a3faae622ee29997c1f6713fe7e78" + id: "0xf532945be4e9eb7ef597867c6dee34dc1d89f55f711d084bc6aa01c7c99ea179" size: 1 pending_stake: 0 pending_total_sui_withdraw: 0 pending_pool_token_withdraw: 0 extra_fields: id: - id: "0xc6dec0733287765e9f8600169f776566db59a0f6cb1a854de1865db22cda913d" + id: "0x4b5abcdcefc7404834889f2890b2b626ab85c15a20b19130b56cbee9bbe2b0af" size: 0 commission_rate: 200 next_epoch_stake: 20000000000000000 @@ -269,27 +269,27 @@ validators: next_epoch_commission_rate: 200 extra_fields: id: - id: "0xe6c77a880c82d4f3e1b8a5d503f3a8e88881de8c7b722e66569ff35f8f505d29" + id: "0xeb9ab0c31391cb533e672f2aa1a919f474a988620c5eac625dab9e28e15a7661" size: 0 pending_active_validators: contents: - id: "0xb84831d86c7697202c857ede215fb5739e4c68e1aee6051efb99496833578d22" + id: "0x1e0beb565adb7f908bce1bb65d14b5da4c6e4e0ff281e91e4c79fd7a20947d35" size: 0 pending_removals: [] staking_pool_mappings: - id: "0xb892dd544e8efe2b3c9c89be5689a297ca4ef59804308a81b11c1d89b90f6e18" + id: "0xabce5d04c1673e4e317e5c0f78bc346c4960d25473e095c9fb668ac32f5e216d" size: 1 inactive_validators: - id: "0xe285cf22b5d3c56a32961398e8f64a9f4282eb94782aef9080d9a6954e85c7d5" + id: "0x9069998be467d392b0a8ce1f598a353c415729af75bb2ebafbe66d26114ad52f" size: 0 validator_candidates: - id: "0x207f4b15b8cd26b0af90e308b677c2589bd914280198b2e8e8528a37f7240c35" + id: "0x68667de51bea6086d3fd60059841df6da32a6fd475ad52ad10597797ec6a3ca9" size: 0 at_risk_validators: contents: [] extra_fields: id: - id: "0x41921a36773858d7ea5e092810acf3e1ecbd5927a34ec4f460a2988390a57969" + id: "0xfc98b9ca99540332ff24894fd810f83a85e726542c2119bc1325d350b0399434" size: 0 storage_fund: total_object_storage_rebates: @@ -306,7 +306,7 @@ parameters: validator_low_stake_grace_period: 7 extra_fields: id: - id: "0xe96139872f584b831f86b074cf24c6158f23dd472df821c8b75a5777463d1c3d" + id: "0x16212fe3db87d96453a048041166f3f491c06f00c45a4efe181bf7708c292d3f" size: 0 reference_gas_price: 1000 validator_report_records: @@ -320,7 +320,7 @@ stake_subsidy: stake_subsidy_decrease_rate: 1000 extra_fields: id: - id: "0xe1172cf766a6e4d4fb8d0a228d794e097462e114626bdedce942087b1c029965" + id: "0x3110ada5ccc4394928c0116629587c1ad110099430f19ea183d799689eb5a8df" size: 0 safe_mode: false safe_mode_storage_rewards: @@ -332,5 +332,6 @@ safe_mode_non_refundable_storage_fee: 0 epoch_start_timestamp_ms: 10 extra_fields: id: - id: "0x531d74b5c7080de67c235dd165095164784ab991a92932bc878c60eaf4fa2a3d" + id: "0x34587a89960874da16d01bb778a02f7603278b0da8ec9258668982948f9b9535" size: 0 + diff --git a/crates/sui-swarm/Cargo.toml b/crates/sui-swarm/Cargo.toml index 51caeadf3b797..6823246c0cd0a 100644 --- a/crates/sui-swarm/Cargo.toml +++ b/crates/sui-swarm/Cargo.toml @@ -25,6 +25,7 @@ sui-swarm-config.workspace = true sui-macros.workspace = true sui-node.workspace = true sui-protocol-config.workspace = true +sui-tls.workspace = true sui-types.workspace = true mysten-metrics.workspace = true mysten-network.workspace = true diff --git a/crates/sui-swarm/src/memory/node.rs b/crates/sui-swarm/src/memory/node.rs index 5cc10a6b1f7b5..541c2bc962850 100644 --- a/crates/sui-swarm/src/memory/node.rs +++ b/crates/sui-swarm/src/memory/node.rs @@ -9,6 +9,7 @@ use sui_config::NodeConfig; use sui_node::SuiNodeHandle; use sui_types::base_types::AuthorityName; use sui_types::base_types::ConciseableName; +use sui_types::crypto::KeypairTraits; use tap::TapFallible; use tracing::{error, info}; @@ -106,7 +107,12 @@ impl Node { if is_validator { let network_address = self.config().network_address().clone(); - let channel = mysten_network::client::connect(&network_address) + let tls_config = sui_tls::create_rustls_client_config( + self.config().network_key_pair().public().to_owned(), + sui_tls::SUI_VALIDATOR_SERVER_NAME.to_string(), + None, + ); + let channel = mysten_network::client::connect(&network_address, Some(tls_config)) .await .map_err(|err| anyhow!(err.to_string())) .map_err(HealthCheckError::Failure) diff --git a/crates/sui-synthetic-ingestion/Cargo.toml b/crates/sui-synthetic-ingestion/Cargo.toml new file mode 100644 index 0000000000000..07391de552efe --- /dev/null +++ b/crates/sui-synthetic-ingestion/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "sui-synthetic-ingestion" +version = "0.0.0" +authors = ["Mysten Labs "] +license = "Apache-2.0" +publish = false +edition = "2021" + +[dependencies] +async-trait.workspace = true +simulacrum.workspace = true +sui-test-transaction-builder.workspace = true +sui-types = { workspace = true, features = ["test-utils"] } +tokio.workspace = true +tracing.workspace = true + +[dev-dependencies] +tempfile.workspace = true diff --git a/crates/sui-synthetic-ingestion/src/benchmark.rs b/crates/sui-synthetic-ingestion/src/benchmark.rs new file mode 100644 index 0000000000000..5eafe1348de94 --- /dev/null +++ b/crates/sui-synthetic-ingestion/src/benchmark.rs @@ -0,0 +1,160 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::synthetic_ingestion::generate_ingestion; +use crate::tps_tracker::TpsTracker; +use crate::{IndexerProgress, SyntheticIngestionConfig}; +use std::time::Duration; +use tokio::sync::watch; +use tracing::{error, info}; + +/// A trait that can be implemented on top of any indexer to benchmark its throughput. +/// It will generate synthetic transactions and checkpoints as ingestion source. +#[async_trait::async_trait] +pub trait BenchmarkableIndexer { + /// Allows the benchmark to subscribe and monitor the committed checkpoints progress. + /// This is needed both in order to log periodic throughput, but also + /// to know when the benchmark can stop. + fn subscribe_to_committed_checkpoints(&self) -> watch::Receiver>; + /// Start the indexer. Note that we only start a timer before calling this function. + /// So the implementation should only start the indexer when this function is called. + async fn start(&mut self); + /// Stop the indexer. This would allow the benchmark to exit. + async fn stop(mut self); +} + +pub async fn run_benchmark( + config: SyntheticIngestionConfig, + mut indexer: I, +) -> u64 { + assert!( + config.starting_checkpoint > 0, + "Checkpoint 0 is reserved for genesis checkpoint" + ); + let expected_last_checkpoint = config.starting_checkpoint + config.num_checkpoints - 1; + if dir_not_empty(&config.ingestion_dir) { + info!("Starting from an existing ingestion workload directory"); + } else { + generate_ingestion(config.clone()); + } + + let mut rx = indexer.subscribe_to_committed_checkpoints(); + let mut tps_tracker = TpsTracker::new(Duration::from_secs(1)); + info!("Starting benchmark..."); + indexer.start().await; + + loop { + if let Err(err) = rx.changed().await { + error!("Error polling from watch channel, exiting early: {:?}", err); + break; + } + let committed_checkpoint = rx.borrow_and_update().clone(); + if let Some(checkpoint) = committed_checkpoint { + tps_tracker.update(checkpoint.clone()); + if checkpoint.checkpoint == expected_last_checkpoint { + break; + } + } + } + let seq = tps_tracker.finish(); + indexer.stop().await; + seq +} + +fn dir_not_empty(dir: &std::path::Path) -> bool { + dir.read_dir() + .map(|mut it| it.next().is_some()) + .unwrap_or(false) +} + +#[cfg(test)] +mod test { + use crate::benchmark::{run_benchmark, BenchmarkableIndexer}; + use crate::{IndexerProgress, SyntheticIngestionConfig}; + use std::path::PathBuf; + use std::time::Duration; + use sui_types::messages_checkpoint::CheckpointSequenceNumber; + use tokio::sync::watch; + + struct MockIndexer { + starting_checkpoint: CheckpointSequenceNumber, + ingestion_dir: PathBuf, + committed_checkpoint_tx: Option>>, + committed_checkpoint_rx: watch::Receiver>, + } + + impl MockIndexer { + fn new(starting_checkpoint: CheckpointSequenceNumber, ingestion_dir: PathBuf) -> Self { + let (committed_checkpoint_tx, committed_checkpoint_rx) = watch::channel(None); + Self { + starting_checkpoint, + ingestion_dir, + committed_checkpoint_tx: Some(committed_checkpoint_tx), + committed_checkpoint_rx, + } + } + } + + #[async_trait::async_trait] + impl BenchmarkableIndexer for MockIndexer { + fn subscribe_to_committed_checkpoints(&self) -> watch::Receiver> { + self.committed_checkpoint_rx.clone() + } + + async fn start(&mut self) { + let tx = self.committed_checkpoint_tx.take().unwrap(); + let mut checkpoint = self.starting_checkpoint; + let dir = self.ingestion_dir.clone(); + tokio::task::spawn(async move { + loop { + tokio::time::sleep(Duration::from_millis(100)).await; + let path = dir.join(format!("{}.chk", checkpoint)); + if std::fs::metadata(&path).is_err() { + break; + } + tx.send(Some(IndexerProgress { + checkpoint, + network_total_transactions: 0, + })) + .unwrap(); + checkpoint += 1; + } + }); + } + + async fn stop(mut self) {} + } + + #[tokio::test] + async fn test_run_ingestion_benchmark() { + let tmp_dir = tempfile::tempdir().unwrap(); + let config = SyntheticIngestionConfig { + ingestion_dir: tmp_dir.path().to_path_buf(), + checkpoint_size: 10, + num_checkpoints: 10, + starting_checkpoint: 1, + }; + let indexer = MockIndexer::new(config.starting_checkpoint, tmp_dir.path().to_path_buf()); + let last_checkpoint = + tokio::time::timeout(Duration::from_secs(10), run_benchmark(config, indexer)) + .await + .unwrap(); + assert_eq!(last_checkpoint, 10); + } + #[tokio::test] + async fn test_run_ingestion_benchmark_custom_starting_checkpoint() { + let tmp_dir = tempfile::tempdir().unwrap(); + let config = SyntheticIngestionConfig { + ingestion_dir: tmp_dir.path().to_path_buf(), + checkpoint_size: 10, + num_checkpoints: 10, + starting_checkpoint: 1000, + }; + let indexer = MockIndexer::new(config.starting_checkpoint, tmp_dir.path().to_path_buf()); + let last_checkpoint = + tokio::time::timeout(Duration::from_secs(10), run_benchmark(config, indexer)) + .await + .unwrap(); + assert_eq!(last_checkpoint, 1009); + } +} diff --git a/crates/sui-synthetic-ingestion/src/lib.rs b/crates/sui-synthetic-ingestion/src/lib.rs new file mode 100644 index 0000000000000..d7f52b8d34b3a --- /dev/null +++ b/crates/sui-synthetic-ingestion/src/lib.rs @@ -0,0 +1,29 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::path::PathBuf; +use sui_types::messages_checkpoint::CheckpointSequenceNumber; + +pub mod benchmark; +mod synthetic_ingestion; +mod tps_tracker; + +#[derive(Clone, Debug)] +pub struct SyntheticIngestionConfig { + /// Directory to write the ingestion data to. + pub ingestion_dir: PathBuf, + /// Number of transactions in a checkpoint. + pub checkpoint_size: u64, + /// Total number of synthetic checkpoints to generate. + pub num_checkpoints: u64, + /// Customize the first checkpoint sequence number to be committed. + /// This is useful if we want to benchmark on a non-empty database. + /// Note that this must be > 0, because the genesis checkpoint is always 0. + pub starting_checkpoint: CheckpointSequenceNumber, +} + +#[derive(Clone, Debug)] +pub struct IndexerProgress { + pub checkpoint: CheckpointSequenceNumber, + pub network_total_transactions: u64, +} diff --git a/crates/sui-synthetic-ingestion/src/synthetic_ingestion.rs b/crates/sui-synthetic-ingestion/src/synthetic_ingestion.rs new file mode 100644 index 0000000000000..1ce4848506a84 --- /dev/null +++ b/crates/sui-synthetic-ingestion/src/synthetic_ingestion.rs @@ -0,0 +1,56 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::SyntheticIngestionConfig; +use simulacrum::Simulacrum; +use sui_test_transaction_builder::TestTransactionBuilder; +use sui_types::crypto::get_account_key_pair; +use sui_types::effects::TransactionEffectsAPI; +use sui_types::gas_coin::MIST_PER_SUI; +use sui_types::utils::to_sender_signed_transaction; +use tracing::info; + +// TODO: Simulacrum does serial execution which could be slow if +// we need to generate a large number of transactions. +// We may want to make Simulacrum support parallel execution. + +pub(crate) fn generate_ingestion(config: SyntheticIngestionConfig) { + info!("Generating synthetic ingestion data. config: {:?}", config); + let timer = std::time::Instant::now(); + let mut sim = Simulacrum::new(); + let SyntheticIngestionConfig { + ingestion_dir, + checkpoint_size, + num_checkpoints, + starting_checkpoint, + } = config; + sim.set_data_ingestion_path(ingestion_dir); + sim.override_last_checkpoint_number(starting_checkpoint - 1); + + let gas_price = sim.reference_gas_price(); + let (sender, keypair) = get_account_key_pair(); + let effects = sim.request_gas(sender, MIST_PER_SUI * 1000000).unwrap(); + let mut gas_object = effects.created()[0].0; + let mut tx_count = 0; + for i in 0..num_checkpoints { + for _ in 0..checkpoint_size { + let tx_data = TestTransactionBuilder::new(sender, gas_object, gas_price) + .transfer_sui(Some(1), sender) + .build(); + let tx = to_sender_signed_transaction(tx_data, &keypair); + let (effects, _) = sim.execute_transaction(tx).unwrap(); + gas_object = effects.gas_object().0; + tx_count += 1; + } + sim.create_checkpoint(); + if (i + 1) % 100 == 0 { + info!("Generated {} checkpoints, {} transactions", i + 1, tx_count); + } + } + info!( + "Generated {} transactions in {} checkpoints. Total time: {:?}", + tx_count, + num_checkpoints, + timer.elapsed() + ); +} diff --git a/crates/sui-synthetic-ingestion/src/tps_tracker.rs b/crates/sui-synthetic-ingestion/src/tps_tracker.rs new file mode 100644 index 0000000000000..481e92e6fc8b9 --- /dev/null +++ b/crates/sui-synthetic-ingestion/src/tps_tracker.rs @@ -0,0 +1,80 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::IndexerProgress; +use std::time::{Duration, Instant}; +use sui_types::messages_checkpoint::CheckpointSequenceNumber; +use tracing::info; + +pub(crate) struct TpsTracker { + start_time: Instant, + starting_state: Option, + + prev_time: Instant, + prev_timed_state: Option, + + cur_state: Option, + + peak_tps: f64, + + /// Log time elapsed and TPS every log_frequency duration. + log_frequency: Duration, +} + +impl TpsTracker { + pub fn new(log_frequency: Duration) -> Self { + let start_time = Instant::now(); + Self { + start_time, + starting_state: None, + prev_time: start_time, + prev_timed_state: None, + cur_state: None, + peak_tps: 0.0, + log_frequency, + } + } + + pub fn update(&mut self, cur_state: IndexerProgress) { + self.cur_state = Some(cur_state.clone()); + let cur_time = Instant::now(); + let Some(prev_timed_state) = self.prev_timed_state.clone() else { + self.prev_time = cur_time; + self.prev_timed_state = Some(cur_state.clone()); + self.start_time = cur_time; + self.starting_state = Some(cur_state); + return; + }; + let elapsed = cur_time - self.prev_time; + if elapsed < self.log_frequency { + return; + } + let tps = (cur_state.network_total_transactions + - prev_timed_state.network_total_transactions) as f64 + / elapsed.as_secs_f64(); + let cps = + (cur_state.checkpoint - prev_timed_state.checkpoint) as f64 / elapsed.as_secs_f64(); + info!( + "Last processed checkpoint: {}, Current TPS: {:.2}, CPS: {:.2}", + cur_state.checkpoint, tps, cps + ); + self.peak_tps = self.peak_tps.max(tps); + self.prev_time = cur_time; + self.prev_timed_state = Some(cur_state); + } + + pub fn finish(&mut self) -> CheckpointSequenceNumber { + let elapsed = Instant::now() - self.start_time; + let cur_state = self.cur_state.clone().unwrap(); + let starting_state = self.starting_state.clone().unwrap(); + let tps = (cur_state.network_total_transactions - starting_state.network_total_transactions) + as f64 + / elapsed.as_secs_f64(); + let cps = (cur_state.checkpoint - starting_state.checkpoint) as f64 / elapsed.as_secs_f64(); + info!( + "Benchmark completed. Total time: {:?}, Average TPS: {:.2}, CPS: {:.2}. Peak TPS: {:.2}", + elapsed, tps, cps, self.peak_tps, + ); + cur_state.checkpoint + } +} diff --git a/crates/sui-tls/src/lib.rs b/crates/sui-tls/src/lib.rs index 7b3b9c23f5796..7f40317d43303 100644 --- a/crates/sui-tls/src/lib.rs +++ b/crates/sui-tls/src/lib.rs @@ -5,10 +5,9 @@ mod acceptor; mod certgen; mod verifier; -pub const SUI_VALIDATOR_SERVER_NAME: &str = "sui"; - pub use acceptor::{TlsAcceptor, TlsConnectionInfo}; pub use certgen::SelfSignedCertificate; +use rustls::ClientConfig; pub use verifier::{ public_key_from_certificate, AllowAll, AllowPublicKeys, Allower, ClientCertVerifier, ServerCertVerifier, @@ -16,6 +15,46 @@ pub use verifier::{ pub use rustls; +use fastcrypto::ed25519::{Ed25519PrivateKey, Ed25519PublicKey}; +use tokio_rustls::rustls::ServerConfig; + +pub const SUI_VALIDATOR_SERVER_NAME: &str = "sui"; + +pub fn create_rustls_server_config( + private_key: Ed25519PrivateKey, + server_name: String, + allower: A, +) -> ServerConfig { + let verifier = ClientCertVerifier::new(allower, server_name.clone()); + // TODO: refactor to use key bytes + let self_signed_cert = SelfSignedCertificate::new(private_key, server_name.as_str()); + let tls_cert = self_signed_cert.rustls_certificate(); + let tls_private_key = self_signed_cert.rustls_private_key(); + let mut tls_config = verifier + .rustls_server_config(vec![tls_cert], tls_private_key) + .unwrap_or_else(|e| panic!("Failed to create TLS server config: {:?}", e)); + tls_config.alpn_protocols = vec![b"h2".to_vec()]; + tls_config +} + +pub fn create_rustls_client_config( + target_public_key: Ed25519PublicKey, + server_name: String, + client_key: Option, // optional self-signed cert for client verification +) -> ClientConfig { + let tls_config = ServerCertVerifier::new(target_public_key, server_name.clone()); + let tls_config = if let Some(private_key) = client_key { + let self_signed_cert = SelfSignedCertificate::new(private_key, server_name.as_str()); + let tls_cert = self_signed_cert.rustls_certificate(); + let tls_private_key = self_signed_cert.rustls_private_key(); + tls_config.rustls_client_config_with_client_auth(vec![tls_cert], tls_private_key) + } else { + tls_config.rustls_client_config_with_no_client_auth() + } + .unwrap_or_else(|e| panic!("Failed to create TLS client config: {e:?}")); + tls_config +} + #[cfg(test)] mod tests { use std::collections::BTreeSet; diff --git a/crates/sui-tls/src/verifier.rs b/crates/sui-tls/src/verifier.rs index 562cc34d48973..b1e87fbf88823 100644 --- a/crates/sui-tls/src/verifier.rs +++ b/crates/sui-tls/src/verifier.rs @@ -178,20 +178,30 @@ impl ServerCertVerifier { Self { public_key, name } } - pub fn rustls_client_config( + pub fn rustls_client_config_with_client_auth( self, certificates: Vec>, private_key: PrivateKeyDer<'static>, ) -> Result { - let mut config = rustls::ClientConfig::builder_with_provider(Arc::new( + rustls::ClientConfig::builder_with_provider(Arc::new( rustls::crypto::ring::default_provider(), )) .with_safe_default_protocol_versions()? .dangerous() .with_custom_certificate_verifier(std::sync::Arc::new(self)) - .with_client_auth_cert(certificates, private_key)?; - config.alpn_protocols = vec![b"h2".to_vec()]; - Ok(config) + .with_client_auth_cert(certificates, private_key) + } + + pub fn rustls_client_config_with_no_client_auth( + self, + ) -> Result { + Ok(rustls::ClientConfig::builder_with_provider(Arc::new( + rustls::crypto::ring::default_provider(), + )) + .with_safe_default_protocol_versions()? + .dangerous() + .with_custom_certificate_verifier(std::sync::Arc::new(self)) + .with_no_client_auth()) } } diff --git a/crates/sui-tool/Cargo.toml b/crates/sui-tool/Cargo.toml index cb9bff954df3e..e2d0ce18338df 100644 --- a/crates/sui-tool/Cargo.toml +++ b/crates/sui-tool/Cargo.toml @@ -47,4 +47,5 @@ sui-storage.workspace = true sui-types.workspace = true sui-archival.workspace = true sui-package-dump.workspace = true +sui-tls.workspace = true bin-version.workspace = true diff --git a/crates/sui-tool/src/lib.rs b/crates/sui-tool/src/lib.rs index 853445cf9f921..f73e73a9eef5a 100644 --- a/crates/sui-tool/src/lib.rs +++ b/crates/sui-tool/src/lib.rs @@ -106,8 +106,14 @@ async fn make_clients( for validator in active_validators { let net_addr = Multiaddr::try_from(validator.net_address).unwrap(); + // TODO: Enable TLS on this interface with below config, once support is rolled out to validators. + // let tls_config = sui_tls::create_rustls_client_config( + // sui_types::crypto::NetworkPublicKey::from_bytes(&validator.network_pubkey_bytes)?, + // sui_tls::SUI_VALIDATOR_SERVER_NAME.to_string(), + // None, + // ); let channel = net_config - .connect_lazy(&net_addr) + .connect_lazy(&net_addr, None) .map_err(|err| anyhow!(err.to_string()))?; let client = NetworkAuthorityClient::new(channel); let public_key_bytes = @@ -498,8 +504,8 @@ pub(crate) fn make_anemo_config() -> anemo_cli::Config { .add_service( "Discovery", anemo_cli::ServiceInfo::new().add_method( - "GetKnownPeers", - anemo_cli::ron_method!(DiscoveryClient, get_known_peers, ()), + "GetKnownPeersV2", + anemo_cli::ron_method!(DiscoveryClient, get_known_peers_v2, ()), ), ) // Sui state sync diff --git a/crates/sui-transactional-test-runner/src/args.rs b/crates/sui-transactional-test-runner/src/args.rs index 2d73a54ea99d9..5e0e12299942d 100644 --- a/crates/sui-transactional-test-runner/src/args.rs +++ b/crates/sui-transactional-test-runner/src/args.rs @@ -5,10 +5,13 @@ use crate::test_adapter::{FakeID, SuiTestAdapter}; use anyhow::{bail, ensure}; use clap; use clap::{Args, Parser}; -use move_command_line_common::parser::{parse_u256, parse_u64}; -use move_command_line_common::values::{ParsableValue, ParsedValue}; -use move_command_line_common::{parser::Parser as MoveCLParser, values::ValueToken}; use move_compiler::editions::Flavor; +use move_core_types::parsing::{ + parser::Parser as MoveCLParser, + parser::{parse_u256, parse_u64}, + values::ValueToken, + values::{ParsableValue, ParsedValue}, +}; use move_core_types::runtime_value::{MoveStruct, MoveValue}; use move_core_types::u256::U256; use move_symbol_pool::Symbol; diff --git a/crates/sui-transactional-test-runner/src/lib.rs b/crates/sui-transactional-test-runner/src/lib.rs index 854ab9d096d81..abf62845c2cb6 100644 --- a/crates/sui-transactional-test-runner/src/lib.rs +++ b/crates/sui-transactional-test-runner/src/lib.rs @@ -15,6 +15,7 @@ use simulacrum::SimulatorStore; use simulator_persisted_store::PersistedStore; use std::path::Path; use std::sync::Arc; +use sui_core::authority::authority_per_epoch_store::CertLockGuard; use sui_core::authority::authority_test_utils::send_and_confirm_transaction_with_execution_error; use sui_core::authority::AuthorityState; use sui_json_rpc::authority_state::StateRead; @@ -142,7 +143,11 @@ impl TransactionalAdapter for ValidatorWithFullnode { ); let epoch_store = self.validator.load_epoch_store_one_call_per_task().clone(); - self.validator.read_objects_for_execution(&tx, &epoch_store) + self.validator.read_objects_for_execution( + &CertLockGuard::dummy_for_tests(), + &tx, + &epoch_store, + ) } fn prepare_txn( diff --git a/crates/sui-transactional-test-runner/src/programmable_transaction_test_parser/parser.rs b/crates/sui-transactional-test-runner/src/programmable_transaction_test_parser/parser.rs index 3dfd6ec45ce6e..eb9397e40e5ea 100644 --- a/crates/sui-transactional-test-runner/src/programmable_transaction_test_parser/parser.rs +++ b/crates/sui-transactional-test-runner/src/programmable_transaction_test_parser/parser.rs @@ -3,7 +3,7 @@ use std::{borrow::BorrowMut, marker::PhantomData, str::FromStr}; -use move_command_line_common::{ +use move_core_types::parsing::{ parser::{Parser, Token}, types::{ParsedType, TypeToken}, }; diff --git a/crates/sui-transactional-test-runner/src/programmable_transaction_test_parser/token.rs b/crates/sui-transactional-test-runner/src/programmable_transaction_test_parser/token.rs index 145bc5347c499..d4d03475881ad 100644 --- a/crates/sui-transactional-test-runner/src/programmable_transaction_test_parser/token.rs +++ b/crates/sui-transactional-test-runner/src/programmable_transaction_test_parser/token.rs @@ -4,8 +4,8 @@ use std::fmt::{self, Display}; use anyhow::bail; -use move_command_line_common::parser::Token; use move_core_types::identifier; +use move_core_types::parsing::parser::Token; #[derive(Eq, PartialEq, Debug, Clone, Copy)] pub enum CommandToken { diff --git a/crates/sui-transactional-test-runner/src/test_adapter.rs b/crates/sui-transactional-test-runner/src/test_adapter.rs index 6ee27e331e740..f9113bb41b8da 100644 --- a/crates/sui-transactional-test-runner/src/test_adapter.rs +++ b/crates/sui-transactional-test-runner/src/test_adapter.rs @@ -15,15 +15,14 @@ use fastcrypto::encoding::{Base64, Encoding}; use fastcrypto::traits::ToFromBytes; use move_binary_format::CompiledModule; use move_bytecode_utils::module_cache::GetModule; -use move_command_line_common::{ - address::ParsedAddress, files::verify_and_create_named_address_mapping, -}; +use move_command_line_common::files::verify_and_create_named_address_mapping; use move_compiler::{ editions::{Edition, Flavor}, shared::{NumberFormat, NumericalAddress, PackageConfig, PackagePaths}, Flags, FullyCompiledProgram, }; use move_core_types::ident_str; +use move_core_types::parsing::address::ParsedAddress; use move_core_types::{ account_address::AccountAddress, identifier::IdentStr, diff --git a/crates/sui-types/Cargo.toml b/crates/sui-types/Cargo.toml index 3150c7c4238b7..fcb060ce2bd7c 100644 --- a/crates/sui-types/Cargo.toml +++ b/crates/sui-types/Cargo.toml @@ -101,8 +101,8 @@ harness = false [features] default = [] test-utils = [] -gas-profiler = [ - "move-vm-profiler/gas-profiler", - "move-vm-test-utils/gas-profiler", +tracing = [ + "move-vm-profiler/tracing", + "move-vm-test-utils/tracing", ] fuzzing = ["move-core-types/fuzzing"] diff --git a/crates/sui-types/src/coin.rs b/crates/sui-types/src/coin.rs index d1f19defc6cc3..6c2fc3abd6ece 100644 --- a/crates/sui-types/src/coin.rs +++ b/crates/sui-types/src/coin.rs @@ -67,18 +67,17 @@ impl Coin { /// If the given object is a Coin, deserialize its contents and extract the balance Ok(Some(u64)). /// If it's not a Coin, return Ok(None). /// The cost is 2 comparisons if not a coin, and deserialization if its a Coin. - pub fn extract_balance_if_coin(object: &Object) -> Result, bcs::Error> { - match &object.data { - Data::Move(move_obj) => { - if !move_obj.is_coin() { - return Ok(None); - } + pub fn extract_balance_if_coin(object: &Object) -> Result, bcs::Error> { + let Data::Move(obj) = &object.data else { + return Ok(None); + }; - let coin = Self::from_bcs_bytes(move_obj.contents())?; - Ok(Some(coin.value())) - } - _ => Ok(None), // package - } + let Some(type_) = obj.type_().coin_type_maybe() else { + return Ok(None); + }; + + let coin = Self::from_bcs_bytes(obj.contents())?; + Ok(Some((type_, coin.value()))) } pub fn id(&self) -> &ObjectID { diff --git a/crates/sui-types/src/committee.rs b/crates/sui-types/src/committee.rs index 906fd6ba1c94c..fea794ffd259c 100644 --- a/crates/sui-types/src/committee.rs +++ b/crates/sui-types/src/committee.rs @@ -3,7 +3,9 @@ // SPDX-License-Identifier: Apache-2.0 use super::base_types::*; -use crate::crypto::{random_committee_key_pairs_of_size, AuthorityKeyPair, AuthorityPublicKey}; +use crate::crypto::{ + random_committee_key_pairs_of_size, AuthorityKeyPair, AuthorityPublicKey, NetworkPublicKey, +}; use crate::error::{SuiError, SuiResult}; use crate::multiaddr::Multiaddr; use fastcrypto::traits::KeyPair; @@ -353,18 +355,17 @@ pub trait CommitteeTrait { fn weight(&self, author: &K) -> StakeUnit; } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug)] pub struct NetworkMetadata { pub network_address: Multiaddr, pub narwhal_primary_address: Multiaddr, + pub network_public_key: Option, } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug)] pub struct CommitteeWithNetworkMetadata { epoch_id: EpochId, validators: BTreeMap, - - #[serde(skip)] committee: OnceCell, } diff --git a/crates/sui-types/src/error.rs b/crates/sui-types/src/error.rs index 7d97c8ffa7509..f89e948aef12a 100644 --- a/crates/sui-types/src/error.rs +++ b/crates/sui-types/src/error.rs @@ -412,8 +412,9 @@ pub enum SuiError { }, #[error("Signatures in a certificate must form a quorum")] CertificateRequiresQuorum, - #[error("Transaction certificate processing failed: {err}")] - ErrorWhileProcessingCertificate { err: String }, + #[allow(non_camel_case_types)] + #[error("DEPRECATED")] + DEPRECATED_ErrorWhileProcessingCertificate, #[error( "Failed to get a quorum of signed effects when processing transaction: {effects_map:?}" )] diff --git a/crates/sui-types/src/executable_transaction.rs b/crates/sui-types/src/executable_transaction.rs index 964bf9b235947..493204a8e918f 100644 --- a/crates/sui-types/src/executable_transaction.rs +++ b/crates/sui-types/src/executable_transaction.rs @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 use crate::messages_checkpoint::CheckpointSequenceNumber; -use crate::messages_consensus::{AuthorityIndex, Round, TransactionIndex}; use crate::{committee::EpochId, crypto::AuthorityStrongQuorumSignInfo}; use crate::message_envelope::{Envelope, TrustedEnvelope, VerifiedEnvelope}; @@ -24,9 +23,8 @@ pub enum CertificateProof { QuorumExecuted(EpochId), /// Transaction generated by the system, for example Clock update transaction SystemTransaction(EpochId), - /// Validity was proven through consensus. Round, authority and transaction index indicate - /// the position of the transaction in the consensus DAG for debugging. - Consensus(EpochId, Round, AuthorityIndex, TransactionIndex), + /// Validity was proven through voting in consensus. + Consensus(EpochId), } impl CertificateProof { @@ -42,13 +40,8 @@ impl CertificateProof { Self::SystemTransaction(epoch) } - pub fn new_from_consensus( - epoch: EpochId, - round: Round, - authority: AuthorityIndex, - transaction_index: TransactionIndex, - ) -> Self { - Self::Consensus(epoch, round, authority, transaction_index) + pub fn new_from_consensus(epoch: EpochId) -> Self { + Self::Consensus(epoch) } pub fn epoch(&self) -> EpochId { @@ -56,7 +49,7 @@ impl CertificateProof { Self::Checkpoint(epoch, _) | Self::QuorumExecuted(epoch) | Self::SystemTransaction(epoch) - | Self::Consensus(epoch, _, _, _) => *epoch, + | Self::Consensus(epoch) => *epoch, Self::Certified(sig) => sig.epoch, } } diff --git a/crates/sui-types/src/lib.rs b/crates/sui-types/src/lib.rs index 4823253ab985f..0a3c4dc45fe90 100644 --- a/crates/sui-types/src/lib.rs +++ b/crates/sui-types/src/lib.rs @@ -153,7 +153,7 @@ pub fn sui_framework_address_concat_string(suffix: &str) -> String { /// Parsing succeeds if and only if `s` matches one of these formats exactly, with no remaining /// suffix. This function is intended for use within the authority codebases. pub fn parse_sui_address(s: &str) -> anyhow::Result { - use move_command_line_common::address::ParsedAddress; + use move_core_types::parsing::address::ParsedAddress; Ok(ParsedAddress::parse(s)? .into_account_address(&resolve_address)? .into()) @@ -163,7 +163,7 @@ pub fn parse_sui_address(s: &str) -> anyhow::Result { /// module name (an identifier). Parsing succeeds if and only if `s` matches this format exactly, /// with no remaining input. This function is intended for use within the authority codebases. pub fn parse_sui_module_id(s: &str) -> anyhow::Result { - use move_command_line_common::types::ParsedModuleId; + use move_core_types::parsing::types::ParsedModuleId; ParsedModuleId::parse(s)?.into_module_id(&resolve_address) } @@ -172,7 +172,7 @@ pub fn parse_sui_module_id(s: &str) -> anyhow::Result { /// format exactly, with no remaining input. This function is intended for use within the authority /// codebases. pub fn parse_sui_fq_name(s: &str) -> anyhow::Result<(ModuleId, String)> { - use move_command_line_common::types::ParsedFqName; + use move_core_types::parsing::types::ParsedFqName; ParsedFqName::parse(s)?.into_fq_name(&resolve_address) } @@ -181,7 +181,7 @@ pub fn parse_sui_fq_name(s: &str) -> anyhow::Result<(ModuleId, String)> { /// brackets). Parsing succeeds if and only if `s` matches this format exactly, with no remaining /// input. This function is intended for use within the authority codebase. pub fn parse_sui_struct_tag(s: &str) -> anyhow::Result { - use move_command_line_common::types::ParsedStructType; + use move_core_types::parsing::types::ParsedStructType; ParsedStructType::parse(s)?.into_struct_tag(&resolve_address) } @@ -189,7 +189,7 @@ pub fn parse_sui_struct_tag(s: &str) -> anyhow::Result { /// vector with a type parameter. Parsing succeeds if and only if `s` matches this format exactly, /// with no remaining input. This function is intended for use within the authority codebase. pub fn parse_sui_type_tag(s: &str) -> anyhow::Result { - use move_command_line_common::types::ParsedType; + use move_core_types::parsing::types::ParsedType; ParsedType::parse(s)?.into_type_tag(&resolve_address) } @@ -384,7 +384,7 @@ mod tests { #[test] fn test_parse_sui_struct_tag_long_account_addr() { let result = parse_sui_struct_tag( - "0x00000000000000000000000000000000000000000000000000000000000000002::sui::SUI", + "0x0000000000000000000000000000000000000000000000000000000000000002::sui::SUI", ) .expect("should not error"); diff --git a/crates/sui-types/src/message_envelope.rs b/crates/sui-types/src/message_envelope.rs index 4a15a39ccef97..37abb210c344f 100644 --- a/crates/sui-types/src/message_envelope.rs +++ b/crates/sui-types/src/message_envelope.rs @@ -10,7 +10,6 @@ use crate::crypto::{ use crate::error::SuiResult; use crate::executable_transaction::CertificateProof; use crate::messages_checkpoint::CheckpointSequenceNumber; -use crate::messages_consensus::{AuthorityIndex, Round, TransactionIndex}; use crate::transaction::SenderSignedData; use fastcrypto::traits::KeyPair; use once_cell::sync::OnceCell; @@ -456,9 +455,6 @@ impl VerifiedEnvelope { pub fn new_from_consensus( transaction: VerifiedEnvelope, epoch: EpochId, - round: Round, - authority: AuthorityIndex, - transaction_index: TransactionIndex, ) -> Self { let inner = transaction.into_inner(); let Envelope { @@ -469,12 +465,7 @@ impl VerifiedEnvelope { VerifiedEnvelope::new_unchecked(Envelope { digest, data, - auth_signature: CertificateProof::new_from_consensus( - epoch, - round, - authority, - transaction_index, - ), + auth_signature: CertificateProof::new_from_consensus(epoch), }) } diff --git a/crates/sui-types/src/mock_checkpoint_builder.rs b/crates/sui-types/src/mock_checkpoint_builder.rs index d97a186c46ce9..da086f770b53b 100644 --- a/crates/sui-types/src/mock_checkpoint_builder.rs +++ b/crates/sui-types/src/mock_checkpoint_builder.rs @@ -63,6 +63,17 @@ impl MockCheckpointBuilder { .push(VerifiedExecutionData::new(transaction, effects)) } + pub fn override_last_checkpoint_number( + &mut self, + checkpoint_number: u64, + validator_keys: &impl ValidatorKeypairProvider, + ) { + let mut summary = self.previous_checkpoint.data().clone(); + summary.sequence_number = checkpoint_number; + let checkpoint = Self::create_certified_checkpoint(validator_keys, summary); + self.previous_checkpoint = checkpoint; + } + /// Builds a checkpoint using internally buffered transactions. pub fn build( &mut self, diff --git a/crates/sui-types/src/move_package.rs b/crates/sui-types/src/move_package.rs index 787c83adc3da6..8b12643e4ec16 100644 --- a/crates/sui-types/src/move_package.rs +++ b/crates/sui-types/src/move_package.rs @@ -483,6 +483,10 @@ impl MovePackage { /// The ObjectID that this package's modules believe they are from, at runtime (can differ from /// `MovePackage::id()` in the case of package upgrades). pub fn original_package_id(&self) -> ObjectID { + if self.version == OBJECT_START_VERSION { + // for a non-upgraded package, original ID is just the package ID + return self.id; + } let bytes = self.module_map.values().next().expect("Empty module map"); let module = CompiledModule::deserialize_with_defaults(bytes) .expect("A Move package contains a module that cannot be deserialized"); diff --git a/crates/sui-types/src/passkey_authenticator.rs b/crates/sui-types/src/passkey_authenticator.rs index d67ac5a057741..1c7a5dcd5f38f 100644 --- a/crates/sui-types/src/passkey_authenticator.rs +++ b/crates/sui-types/src/passkey_authenticator.rs @@ -20,8 +20,7 @@ use once_cell::sync::OnceCell; use passkey_types::webauthn::{ClientDataType, CollectedClientData}; use schemars::JsonSchema; use serde::{Deserialize, Deserializer, Serialize}; -use shared_crypto::intent::Intent; -use shared_crypto::intent::{IntentMessage, INTENT_PREFIX_LENGTH}; +use shared_crypto::intent::IntentMessage; use std::hash::Hash; use std::hash::Hasher; use std::sync::Arc; @@ -55,13 +54,10 @@ pub struct PasskeyAuthenticator { #[serde(skip)] pk: Secp256r1PublicKey, - /// Valid intent parsed from the first 3 bytes of `client_data_json.challenge`. + /// Decoded `client_data_json.challenge` which is expected to be the signing message + /// `hash(Intent | bcs_message)` #[serde(skip)] - intent: Intent, - - /// Valid digest parsed from the last 32 bytes of `client_data_json.challenge`. - #[serde(skip)] - digest: [u8; DefaultHash::OUTPUT_SIZE], + challenge: [u8; DefaultHash::OUTPUT_SIZE], /// Initialization of bytes for passkey in serialized form. #[serde(skip)] @@ -94,22 +90,13 @@ impl TryFrom for PasskeyAuthenticator { }); }; - let parsed_challenge = Base64UrlUnpadded::decode_vec(&client_data_json_parsed.challenge) + let challenge = Base64UrlUnpadded::decode_vec(&client_data_json_parsed.challenge) .map_err(|_| SuiError::InvalidSignature { error: "Invalid encoded challenge".to_string(), - })?; - - let intent = - Intent::from_bytes(&parsed_challenge[..INTENT_PREFIX_LENGTH]).map_err(|_| { - SuiError::InvalidSignature { - error: "Invalid intent from challenge".to_string(), - } - })?; - - let digest = parsed_challenge[INTENT_PREFIX_LENGTH..] + })? .try_into() .map_err(|_| SuiError::InvalidSignature { - error: "Invalid digest from challenge".to_string(), + error: "Invalid encoded challenge".to_string(), })?; if raw.user_signature.scheme() != SignatureScheme::Secp256r1 { @@ -134,8 +121,7 @@ impl TryFrom for PasskeyAuthenticator { client_data_json: raw.client_data_json, signature, pk, - intent, - digest, + challenge, bytes: OnceCell::new(), }) } @@ -235,7 +221,7 @@ impl AuthenticatorTrait for PasskeyAuthenticator { T: Serialize, { // Check the intent and signing is consisted from what's parsed from client_data_json.challenge - if intent_msg.intent != self.intent || to_signing_digest(intent_msg) != self.digest { + if self.challenge != to_signing_message(intent_msg) { return Err(SuiError::InvalidSignature { error: "Invalid challenge".to_string(), }); @@ -289,26 +275,12 @@ impl AsRef<[u8]> for PasskeyAuthenticator { .expect("OnceCell invariant violated") } } -/// Compute the digest that the signature committed over as `intent || hash(tx_data)`, total -/// of 3 + 32 = 35 bytes. -pub fn to_signing_message( - intent_msg: &IntentMessage, -) -> [u8; INTENT_PREFIX_LENGTH + DefaultHash::OUTPUT_SIZE] { - let mut extended = [0; INTENT_PREFIX_LENGTH + DefaultHash::OUTPUT_SIZE]; - extended[..INTENT_PREFIX_LENGTH].copy_from_slice(&intent_msg.intent.to_bytes()); - extended[INTENT_PREFIX_LENGTH..].copy_from_slice(&to_signing_digest(intent_msg)); - extended -} -/// Compute the BCS hash of the value in intent message. In the case of transaction data, -/// this is the BCS hash of `struct TransactionData`, different from the transaction digest -/// itself that computes the BCS hash of the Rust type prefix and `struct TransactionData`. -/// (See `fn digest` in `impl Message for SenderSignedData`). -pub fn to_signing_digest( +/// Compute the signing digest that the signature committed over as `hash(intent || tx_data)` +pub fn to_signing_message( intent_msg: &IntentMessage, ) -> [u8; DefaultHash::OUTPUT_SIZE] { let mut hasher = DefaultHash::default(); - bcs::serialize_into(&mut hasher, &intent_msg.value) - .expect("Message serialization should not fail"); + bcs::serialize_into(&mut hasher, intent_msg).expect("Message serialization should not fail"); hasher.finalize().digest } diff --git a/crates/sui-types/src/storage/mod.rs b/crates/sui-types/src/storage/mod.rs index 93cb31330eb24..bdb9462b0fa3e 100644 --- a/crates/sui-types/src/storage/mod.rs +++ b/crates/sui-types/src/storage/mod.rs @@ -609,5 +609,5 @@ pub trait GetSharedLocks: Send + Sync { fn get_shared_locks( &self, key: &TransactionKey, - ) -> Result, SuiError>; + ) -> SuiResult>>; } diff --git a/crates/sui-types/src/sui_system_state/epoch_start_sui_system_state.rs b/crates/sui-types/src/sui_system_state/epoch_start_sui_system_state.rs index 9c7736ec470b9..118f15c89b420 100644 --- a/crates/sui-types/src/sui_system_state/epoch_start_sui_system_state.rs +++ b/crates/sui-types/src/sui_system_state/epoch_start_sui_system_state.rs @@ -159,6 +159,7 @@ impl EpochStartSystemStateTrait for EpochStartSystemStateV1 { NetworkMetadata { network_address: validator.sui_net_address.clone(), narwhal_primary_address: validator.narwhal_primary_address.clone(), + network_public_key: Some(validator.narwhal_network_pubkey.clone()), }, ), ) diff --git a/crates/sui-types/src/sui_system_state/simtest_sui_system_state_inner.rs b/crates/sui-types/src/sui_system_state/simtest_sui_system_state_inner.rs index 0a365fec9c972..380ce1708bda5 100644 --- a/crates/sui-types/src/sui_system_state/simtest_sui_system_state_inner.rs +++ b/crates/sui-types/src/sui_system_state/simtest_sui_system_state_inner.rs @@ -175,6 +175,7 @@ impl SuiSystemStateTrait for SimTestSuiSystemStateInnerV1 { NetworkMetadata { network_address: verified_metadata.net_address.clone(), narwhal_primary_address: verified_metadata.primary_address.clone(), + network_public_key: Some(verified_metadata.network_pubkey.clone()), }, ), ) @@ -291,6 +292,7 @@ impl SuiSystemStateTrait for SimTestSuiSystemStateInnerShallowV2 { NetworkMetadata { network_address: verified_metadata.net_address.clone(), narwhal_primary_address: verified_metadata.primary_address.clone(), + network_public_key: Some(verified_metadata.network_pubkey.clone()), }, ), ) @@ -436,6 +438,7 @@ impl SuiSystemStateTrait for SimTestSuiSystemStateInnerDeepV2 { NetworkMetadata { network_address: verified_metadata.net_address.clone(), narwhal_primary_address: verified_metadata.primary_address.clone(), + network_public_key: Some(verified_metadata.network_pubkey.clone()), }, ), ) diff --git a/crates/sui-types/src/sui_system_state/sui_system_state_inner_v1.rs b/crates/sui-types/src/sui_system_state/sui_system_state_inner_v1.rs index 9eeeb145f9828..c759b9254490e 100644 --- a/crates/sui-types/src/sui_system_state/sui_system_state_inner_v1.rs +++ b/crates/sui-types/src/sui_system_state/sui_system_state_inner_v1.rs @@ -553,6 +553,7 @@ impl SuiSystemStateTrait for SuiSystemStateInnerV1 { NetworkMetadata { network_address: verified_metadata.net_address.clone(), narwhal_primary_address: verified_metadata.primary_address.clone(), + network_public_key: Some(verified_metadata.network_pubkey.clone()), }, ), ) diff --git a/crates/sui-types/src/sui_system_state/sui_system_state_inner_v2.rs b/crates/sui-types/src/sui_system_state/sui_system_state_inner_v2.rs index f0863c2119466..1b8ce5f75d6b9 100644 --- a/crates/sui-types/src/sui_system_state/sui_system_state_inner_v2.rs +++ b/crates/sui-types/src/sui_system_state/sui_system_state_inner_v2.rs @@ -132,6 +132,7 @@ impl SuiSystemStateTrait for SuiSystemStateInnerV2 { NetworkMetadata { network_address: verified_metadata.net_address.clone(), narwhal_primary_address: verified_metadata.primary_address.clone(), + network_public_key: Some(verified_metadata.network_pubkey.clone()), }, ), ) diff --git a/crates/sui-types/src/sui_system_state/sui_system_state_summary.rs b/crates/sui-types/src/sui_system_state/sui_system_state_summary.rs index 0b76e4c344b02..525650a730157 100644 --- a/crates/sui-types/src/sui_system_state/sui_system_state_summary.rs +++ b/crates/sui-types/src/sui_system_state/sui_system_state_summary.rs @@ -4,6 +4,7 @@ use super::{SuiSystemState, SuiSystemStateTrait}; use crate::base_types::{AuthorityName, ObjectID, SuiAddress}; use crate::committee::{CommitteeWithNetworkMetadata, NetworkMetadata}; +use crate::crypto::NetworkPublicKey; use crate::dynamic_field::get_dynamic_field_from_store; use crate::error::SuiError; use crate::id::ID; @@ -202,6 +203,10 @@ impl SuiSystemStateSummary { validator.primary_address.clone(), ) .unwrap(), + network_public_key: NetworkPublicKey::from_bytes( + &validator.network_pubkey_bytes, + ) + .ok(), }, ), ) diff --git a/crates/sui-types/src/unit_tests/passkey_authenticator_test.rs b/crates/sui-types/src/unit_tests/passkey_authenticator_test.rs index a42d73f7f9eda..20e8b0f7fad63 100644 --- a/crates/sui-types/src/unit_tests/passkey_authenticator_test.rs +++ b/crates/sui-types/src/unit_tests/passkey_authenticator_test.rs @@ -1,7 +1,7 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::{str::FromStr, sync::Arc}; +use std::sync::Arc; use super::to_signing_message; use crate::crypto::DefaultHash; @@ -15,10 +15,9 @@ use crate::{ signature_verification::VerifiedDigestCache, transaction::{TransactionData, TEST_ONLY_GAS_UNIT_FOR_TRANSFER}, }; -use fastcrypto::encoding::{Encoding, Hex}; use fastcrypto::hash::HashFunction; use fastcrypto::rsa::{Base64UrlUnpadded, Encoding as _}; -use fastcrypto::{encoding::Base64, traits::ToFromBytes}; +use fastcrypto::traits::ToFromBytes; use p256::pkcs8::DecodePublicKey; use passkey_authenticator::{Authenticator, UserValidationMethod}; use passkey_client::Client; @@ -33,7 +32,7 @@ use passkey_types::{ }, Bytes, Passkey, }; -use shared_crypto::intent::{Intent, IntentMessage, INTENT_PREFIX_LENGTH}; +use shared_crypto::intent::{Intent, IntentMessage}; use url::Url; /// Helper struct to initialize passkey client. @@ -261,7 +260,7 @@ async fn test_passkey_fails_invalid_json() { error: "Invalid client data json".to_string() } ); - const CORRECT_LEN: usize = INTENT_PREFIX_LENGTH + DefaultHash::OUTPUT_SIZE; + const CORRECT_LEN: usize = DefaultHash::OUTPUT_SIZE; let client_data_json_too_short = format!( r#"{{"type":"webauthn.get", "challenge":"{}","origin":"http://localhost:5173","crossOrigin":false, "unknown": "unknown"}}"#, Base64UrlUnpadded::encode_string(&[0; CORRECT_LEN - 1]) @@ -341,56 +340,56 @@ async fn test_passkey_fails_wrong_client_data_type() { ); } -#[tokio::test] -async fn test_passkey_fails_not_normalized_signature() { - // crafts a particular not normalized signature, fails to verify. this is produced from typescript client https://github.com/joyqvq/sui-webauthn-poc/tree/joy/tx-example - let tx_data: TransactionData = bcs::from_bytes(&Base64::decode("AAAAAHaTZLc0GGZ6RNYAqPC8LWZV7xHO+54zf71arV1MwFUtAcDum6pkbPZZN/iYq0zJpOxiV2wrZAnVU0bnNpOjombGAgAAAAAAAAAgAIiQFrz1abd2rNdo76dQS026yMAS1noA7FiGsggyt9V2k2S3NBhmekTWAKjwvC1mVe8RzvueM3+9Wq1dTMBVLegDAAAAAAAAgIQeAAAAAAAA").unwrap()).unwrap(); - let response = PasskeyResponse:: { - user_sig_bytes: Hex::decode("02bbd02ace0bad3b32eb3a891dc5c85e56274f52695d24db41b247ec694d1531d6fe1a5bec11a8063d1eb0512e7971bfd23395c2cb8862f73049d0f78fd204c6d602276d5f3a22f3e698cdd2272a63da8bfdd9344de73312c7f7f9eca21bfc304f2e").unwrap(), - authenticator_data: Hex::decode("49960de5880e8c687434170f6476605b8fe4aeb9a28632c7995cf3ba831d97631d00000000").unwrap(), - client_data_json: r#"{"type":"webauthn.get","challenge":"AAAAZgUD1inhS1l9qUfZePaivu6IbIo_SxCGmYcfTwrmcFU","origin":"http://localhost:5173","crossOrigin":false}"#.to_string(), - intent_msg: IntentMessage::new(Intent::sui_transaction(), tx_data), - sender: SuiAddress::from_str("0x769364b73418667a44d600a8f0bc2d6655ef11cefb9e337fbd5aad5d4cc0552d").unwrap() - }; - let sig = GenericSignature::PasskeyAuthenticator( - PasskeyAuthenticator::new_for_testing( - response.authenticator_data, - response.client_data_json, - Signature::from_bytes(&response.user_sig_bytes).unwrap(), - ) - .unwrap(), - ); - - let res = sig.verify_authenticator( - &response.intent_msg, - response.sender, - 0, - &Default::default(), - Arc::new(VerifiedDigestCache::new_empty()), - ); - let err = res.unwrap_err(); - assert_eq!( - err, - SuiError::InvalidSignature { - error: "Fails to verify".to_string() - } - ); -} - -#[tokio::test] -async fn test_real_passkey_output() { - // response from a real passkey authenticator created in iCloud, from typescript client: https://github.com/joyqvq/sui-webauthn-poc/tree/joy/tx-example - let address = - SuiAddress::from_str("0xac8564f638fbf673fc92eb85b5abe5f7c29bdaa60a4a10329868fbe6c551dda2") - .unwrap(); - let sig = GenericSignature::from_bytes(&Base64::decode("BiVJlg3liA6MaHQ0Fw9kdmBbj+SuuaKGMseZXPO6gx2XYx0AAAAAigF7InR5cGUiOiJ3ZWJhdXRobi5nZXQiLCJjaGFsbGVuZ2UiOiJBQUFBdF9taklCMXZiVnBZTTZXVjZZX29peDZKOGFOXzlzYjhTS0ZidWtCZmlRdyIsIm9yaWdpbiI6Imh0dHA6Ly9sb2NhbGhvc3Q6NTE3MyIsImNyb3NzT3JpZ2luIjpmYWxzZX1iApjskL9Xyfopyg9Av7MSrcchSpfWqAYoJ+qfSId4gNmoQ1YNgj2alDpRIbq9kthmyGY25+k24FrW114PEoy5C+8DPRcOCTtACi3ZywtZ4UILhwV+Suh79rWtbKqDqhBQwxM=").unwrap()).unwrap(); - let tx_data: TransactionData = bcs::from_bytes(&Base64::decode("AAAAAKyFZPY4+/Zz/JLrhbWr5ffCm9qmCkoQMpho++bFUd2iAUwOMmeNHuxq2hS4PvO1uivs9exQGefW2wNQAt7tRkkdAgAAAAAAAAAgCsJHAaWbb8oUlZsGdsyW3Atf3d51wBEr9HLkrBF0/UushWT2OPv2c/yS64W1q+X3wpvapgpKEDKYaPvmxVHdougDAAAAAAAAgIQeAAAAAAAA").unwrap()).unwrap(); - let res = sig.verify_authenticator( - &IntentMessage::new(Intent::sui_transaction(), tx_data), - address, - 0, - &Default::default(), - Arc::new(VerifiedDigestCache::new_empty()), - ); - assert!(res.is_ok()); -} +// #[tokio::test] +// async fn test_passkey_fails_not_normalized_signature() { +// // crafts a particular not normalized signature, fails to verify. this is produced from typescript client https://github.com/joyqvq/sui-webauthn-poc/tree/joy/tx-example +// let tx_data: TransactionData = bcs::from_bytes(&Base64::decode("AAAAAHaTZLc0GGZ6RNYAqPC8LWZV7xHO+54zf71arV1MwFUtAcDum6pkbPZZN/iYq0zJpOxiV2wrZAnVU0bnNpOjombGAgAAAAAAAAAgAIiQFrz1abd2rNdo76dQS026yMAS1noA7FiGsggyt9V2k2S3NBhmekTWAKjwvC1mVe8RzvueM3+9Wq1dTMBVLegDAAAAAAAAgIQeAAAAAAAA").unwrap()).unwrap(); +// let response = PasskeyResponse:: { +// user_sig_bytes: Hex::decode("02bbd02ace0bad3b32eb3a891dc5c85e56274f52695d24db41b247ec694d1531d6fe1a5bec11a8063d1eb0512e7971bfd23395c2cb8862f73049d0f78fd204c6d602276d5f3a22f3e698cdd2272a63da8bfdd9344de73312c7f7f9eca21bfc304f2e").unwrap(), +// authenticator_data: Hex::decode("49960de5880e8c687434170f6476605b8fe4aeb9a28632c7995cf3ba831d97631d00000000").unwrap(), +// client_data_json: r#"{"type":"webauthn.get","challenge":"AAAAZgUD1inhS1l9qUfZePaivu6IbIo_SxCGmYcfTwrmcFU","origin":"http://localhost:5173","crossOrigin":false}"#.to_string(), +// intent_msg: IntentMessage::new(Intent::sui_transaction(), tx_data), +// sender: SuiAddress::from_str("0x769364b73418667a44d600a8f0bc2d6655ef11cefb9e337fbd5aad5d4cc0552d").unwrap() +// }; +// let sig = GenericSignature::PasskeyAuthenticator( +// PasskeyAuthenticator::new_for_testing( +// response.authenticator_data, +// response.client_data_json, +// Signature::from_bytes(&response.user_sig_bytes).unwrap(), +// ) +// .unwrap(), +// ); + +// let res = sig.verify_authenticator( +// &response.intent_msg, +// response.sender, +// 0, +// &Default::default(), +// Arc::new(VerifiedDigestCache::new_empty()), +// ); +// let err = res.unwrap_err(); +// assert_eq!( +// err, +// SuiError::InvalidSignature { +// error: "Fails to verify".to_string() +// } +// ); +// } + +// #[tokio::test] +// async fn test_real_passkey_output() { +// // response from a real passkey authenticator created in iCloud, from typescript client: https://github.com/joyqvq/sui-webauthn-poc/tree/joy/tx-example +// let address = +// SuiAddress::from_str("0xac8564f638fbf673fc92eb85b5abe5f7c29bdaa60a4a10329868fbe6c551dda2") +// .unwrap(); +// let sig = GenericSignature::from_bytes(&Base64::decode("BiVJlg3liA6MaHQ0Fw9kdmBbj+SuuaKGMseZXPO6gx2XYx0AAAAAigF7InR5cGUiOiJ3ZWJhdXRobi5nZXQiLCJjaGFsbGVuZ2UiOiJBQUFBdF9taklCMXZiVnBZTTZXVjZZX29peDZKOGFOXzlzYjhTS0ZidWtCZmlRdyIsIm9yaWdpbiI6Imh0dHA6Ly9sb2NhbGhvc3Q6NTE3MyIsImNyb3NzT3JpZ2luIjpmYWxzZX1iApjskL9Xyfopyg9Av7MSrcchSpfWqAYoJ+qfSId4gNmoQ1YNgj2alDpRIbq9kthmyGY25+k24FrW114PEoy5C+8DPRcOCTtACi3ZywtZ4UILhwV+Suh79rWtbKqDqhBQwxM=").unwrap()).unwrap(); +// let tx_data: TransactionData = bcs::from_bytes(&Base64::decode("AAAAAKyFZPY4+/Zz/JLrhbWr5ffCm9qmCkoQMpho++bFUd2iAUwOMmeNHuxq2hS4PvO1uivs9exQGefW2wNQAt7tRkkdAgAAAAAAAAAgCsJHAaWbb8oUlZsGdsyW3Atf3d51wBEr9HLkrBF0/UushWT2OPv2c/yS64W1q+X3wpvapgpKEDKYaPvmxVHdougDAAAAAAAAgIQeAAAAAAAA").unwrap()).unwrap(); +// let res = sig.verify_authenticator( +// &IntentMessage::new(Intent::sui_transaction(), tx_data), +// address, +// 0, +// &Default::default(), +// Arc::new(VerifiedDigestCache::new_empty()), +// ); +// assert!(res.is_ok()); +// } diff --git a/crates/sui/Cargo.toml b/crates/sui/Cargo.toml index 5d19ba411cc3c..9078ceac6b6dd 100644 --- a/crates/sui/Cargo.toml +++ b/crates/sui/Cargo.toml @@ -20,6 +20,7 @@ bin-version.workspace = true bip32.workspace = true camino.workspace = true clap.workspace = true +codespan-reporting.workspace = true datatest-stable.workspace = true futures.workspace = true http.workspace = true @@ -56,8 +57,8 @@ sui-cluster-test.workspace = true sui-execution = { path = "../../sui-execution" } sui-faucet.workspace = true sui-swarm-config.workspace = true -sui-graphql-rpc = {workspace = true } -sui-indexer = { workspace = true } +sui-graphql-rpc.workspace = true +sui-indexer.workspace = true sui-genesis-builder.workspace = true sui-types.workspace = true sui-json.workspace = true @@ -74,6 +75,7 @@ shared-crypto.workspace = true sui-replay.workspace = true sui-transaction-builder.workspace = true move-binary-format.workspace = true +move-bytecode-source-map.workspace = true test-cluster.workspace = true fastcrypto.workspace = true @@ -92,9 +94,11 @@ move-analyzer.workspace = true move-bytecode-verifier-meter.workspace = true move-core-types.workspace = true move-package.workspace = true +move-compiler.workspace = true csv.workspace = true move-vm-profiler.workspace = true move-vm-config.workspace = true +move-ir-types.workspace = true move-command-line-common.workspace = true [target.'cfg(not(target_env = "msvc"))'.dependencies] @@ -129,7 +133,7 @@ name = "ptb_files_tests" harness = false [features] -gas-profiler = [ - "sui-types/gas-profiler", - "sui-execution/gas-profiler", +tracing = [ + "sui-types/tracing", + "sui-execution/tracing", ] diff --git a/crates/sui/src/client_commands.rs b/crates/sui/src/client_commands.rs index 9f97b95c7b0f0..5d77f53690c8e 100644 --- a/crates/sui/src/client_commands.rs +++ b/crates/sui/src/client_commands.rs @@ -42,9 +42,9 @@ use sui_source_validation::{BytecodeSourceVerifier, ValidationMode}; use shared_crypto::intent::Intent; use sui_json::SuiJsonValue; use sui_json_rpc_types::{ - Coin, DryRunTransactionBlockResponse, DynamicFieldPage, SuiCoinMetadata, SuiData, - SuiExecutionStatus, SuiObjectData, SuiObjectDataOptions, SuiObjectResponse, - SuiObjectResponseQuery, SuiParsedData, SuiProtocolConfigValue, SuiRawData, + Coin, DevInspectArgs, DevInspectResults, DryRunTransactionBlockResponse, DynamicFieldPage, + SuiCoinMetadata, SuiData, SuiExecutionStatus, SuiObjectData, SuiObjectDataOptions, + SuiObjectResponse, SuiObjectResponseQuery, SuiParsedData, SuiProtocolConfigValue, SuiRawData, SuiTransactionBlockEffects, SuiTransactionBlockEffectsAPI, SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions, }; @@ -76,6 +76,7 @@ use sui_types::{ object::Owner, parse_sui_type_tag, signature::GenericSignature, + sui_serde, transaction::{ SenderSignedData, Transaction, TransactionData, TransactionDataAPI, TransactionKind, }, @@ -593,6 +594,9 @@ pub struct Opts { /// Perform a dry run of the transaction, without executing it. #[arg(long)] pub dry_run: bool, + /// Perform a dev inspect + #[arg(long)] + pub dev_inspect: bool, /// Instead of executing the transaction, serialize the bcs bytes of the unsigned transaction data /// (TransactionData) using base64 encoding, and print out the string . The string can /// be used to execute transaction with `sui client execute-signed-tx --tx-bytes `. @@ -623,6 +627,7 @@ impl Opts { Self { gas_budget: Some(gas_budget), dry_run: false, + dev_inspect: false, serialize_unsigned_transaction: false, serialize_signed_transaction: false, } @@ -633,6 +638,7 @@ impl Opts { Self { gas_budget: Some(gas_budget), dry_run: true, + dev_inspect: false, serialize_unsigned_transaction: false, serialize_signed_transaction: false, } @@ -673,10 +679,10 @@ impl SuiClientCommands { tx_digest, profile_output, } => { - move_vm_profiler::gas_profiler_feature_disabled! { + move_vm_profiler::tracing_feature_disabled! { bail!( - "gas-profiler feature is not enabled, rebuild or reinstall with \ - --features gas-profiler" + "tracing feature is not enabled, rebuild or reinstall with \ + --features tracing" ); }; @@ -908,7 +914,7 @@ impl SuiClientCommands { previous_id, )?; } - let (package_id, compiled_modules, dependencies, package_digest, upgrade_policy) = + let (package_id, compiled_modules, dependencies, package_digest, upgrade_policy, _) = upgrade_result?; let tx_kind = client @@ -1685,7 +1691,17 @@ pub(crate) async fn upgrade_package( with_unpublished_dependencies: bool, skip_dependency_verification: bool, env_alias: Option, -) -> Result<(ObjectID, Vec>, PackageDependencies, [u8; 32], u8), anyhow::Error> { +) -> Result< + ( + ObjectID, + Vec>, + PackageDependencies, + [u8; 32], + u8, + CompiledPackage, + ), + anyhow::Error, +> { let (dependencies, compiled_modules, compiled_package, package_id) = compile_package( read_api, build_config, @@ -1752,6 +1768,7 @@ pub(crate) async fn upgrade_package( dependencies, package_digest, upgrade_policy, + compiled_package, )) } @@ -2215,6 +2232,9 @@ impl Display for SuiClientCommandResult { SuiClientCommandResult::DryRun(response) => { writeln!(f, "{}", Pretty(response))?; } + SuiClientCommandResult::DevInspect(response) => { + writeln!(f, "{}", Pretty(response))?; + } } write!(f, "{}", writer.trim_end_matches('\n')) } @@ -2317,6 +2337,7 @@ impl SuiClientCommandResult { | SuiClientCommandResult::Balance(_, _) | SuiClientCommandResult::ChainIdentifier(_) | SuiClientCommandResult::DynamicFieldQuery(_) + | SuiClientCommandResult::DevInspect(_) | SuiClientCommandResult::Envs(_, _) | SuiClientCommandResult::Gas(_) | SuiClientCommandResult::NewAddress(_) @@ -2466,6 +2487,7 @@ pub enum SuiClientCommandResult { ChainIdentifier(String), DynamicFieldQuery(DynamicFieldPage), DryRun(DryRunTransactionBlockResponse), + DevInspect(DevInspectResults), Envs(Vec, Option), Gas(Vec), NewAddress(NewAddressOutput), @@ -2788,8 +2810,15 @@ pub(crate) async fn dry_run_or_execute_or_serialize( gas: Option, opts: Opts, ) -> Result { - let (dry_run, gas_budget, serialize_unsigned_transaction, serialize_signed_transaction) = ( + let ( + dry_run, + dev_inspect, + gas_budget, + serialize_unsigned_transaction, + serialize_signed_transaction, + ) = ( opts.dry_run, + opts.dev_inspect, opts.gas_budget, opts.serialize_unsigned_transaction, opts.serialize_signed_transaction, @@ -2804,12 +2833,27 @@ pub(crate) async fn dry_run_or_execute_or_serialize( context.get_reference_gas_price().await? }; + let client = context.get_client().await?; + + if dev_inspect { + return execute_dev_inspect( + context, + signer, + tx_kind, + gas_budget, + gas_price, + gas_payment, + None, + None, + ) + .await; + } + let gas = match gas_payment { Some(obj_ids) => Some(obj_ids), None => gas.map(|x| vec![x]), }; - let client = context.get_client().await?; if dry_run { return execute_dry_run( context, @@ -2894,6 +2938,49 @@ pub(crate) async fn dry_run_or_execute_or_serialize( } } +async fn execute_dev_inspect( + context: &mut WalletContext, + signer: SuiAddress, + tx_kind: TransactionKind, + gas_budget: Option, + gas_price: u64, + gas_payment: Option>, + gas_sponsor: Option, + skip_checks: Option, +) -> Result { + let client = context.get_client().await?; + let gas_budget = gas_budget.map(sui_serde::BigInt::from); + let mut gas_objs = vec![]; + let gas_objects = if let Some(gas_payment) = gas_payment { + for o in gas_payment.iter() { + let obj_ref = context.get_object_ref(*o).await?; + gas_objs.push(obj_ref); + } + Some(gas_objs) + } else { + None + }; + + let dev_inspect_args = DevInspectArgs { + gas_sponsor, + gas_budget, + gas_objects, + skip_checks, + show_raw_txn_data_and_effects: None, + }; + let dev_inspect_result = client + .read_api() + .dev_inspect_transaction_block( + signer, + tx_kind, + Some(sui_serde::BigInt::from(gas_price)), + None, + Some(dev_inspect_args), + ) + .await?; + Ok(SuiClientCommandResult::DevInspect(dev_inspect_result)) +} + pub(crate) async fn prerender_clever_errors( effects: &mut SuiTransactionBlockEffects, read_api: &ReadApi, diff --git a/crates/sui/src/client_ptb/ast.rs b/crates/sui/src/client_ptb/ast.rs index effa26e0a9603..94eb9c23a5dc9 100644 --- a/crates/sui/src/client_ptb/ast.rs +++ b/crates/sui/src/client_ptb/ast.rs @@ -3,7 +3,7 @@ use std::fmt; -use move_command_line_common::{ +use move_core_types::parsing::{ address::{NumericalAddress, ParsedAddress}, types::{ParsedFqName, ParsedModuleId, ParsedStructType, ParsedType}, }; @@ -35,6 +35,7 @@ pub const SUMMARY: &str = "summary"; pub const GAS_COIN: &str = "gas-coin"; pub const JSON: &str = "json"; pub const DRY_RUN: &str = "dry-run"; +pub const DEV_INSPECT: &str = "dev-inspect"; pub const SERIALIZE_UNSIGNED: &str = "serialize-unsigned-transaction"; pub const SERIALIZE_SIGNED: &str = "serialize-signed-transaction"; @@ -74,6 +75,7 @@ pub const COMMANDS: &[&str] = &[ GAS_COIN, JSON, DRY_RUN, + DEV_INSPECT, SERIALIZE_UNSIGNED, SERIALIZE_SIGNED, ]; @@ -111,6 +113,7 @@ pub struct ProgramMetadata { pub gas_object_id: Option>, pub json_set: bool, pub dry_run_set: bool, + pub dev_inspect_set: bool, pub gas_budget: Option>, } diff --git a/crates/sui/src/client_ptb/builder.rs b/crates/sui/src/client_ptb/builder.rs index d0bdf2efbb0fd..8df2dbf90dd7e 100644 --- a/crates/sui/src/client_ptb/builder.rs +++ b/crates/sui/src/client_ptb/builder.rs @@ -16,7 +16,7 @@ use miette::Severity; use move_binary_format::{ binary_config::BinaryConfig, file_format::SignatureToken, CompiledModule, }; -use move_command_line_common::{ +use move_core_types::parsing::{ address::{NumericalAddress, ParsedAddress}, parser::NumberFormat, }; @@ -1029,7 +1029,7 @@ impl<'a> PTBBuilder<'a> { ) .map_err(|e| err!(path_loc, "{e}"))?; } - let (package_id, compiled_modules, dependencies, package_digest, upgrade_policy) = + let (package_id, compiled_modules, dependencies, package_digest, upgrade_policy, _) = upgrade_result.map_err(|e| err!(path_loc, "{e}"))?; let upgrade_arg = self diff --git a/crates/sui/src/client_ptb/parser.rs b/crates/sui/src/client_ptb/parser.rs index 58766866848d7..e9e769a39cfa3 100644 --- a/crates/sui/src/client_ptb/parser.rs +++ b/crates/sui/src/client_ptb/parser.rs @@ -3,7 +3,7 @@ use std::iter::Peekable; -use move_command_line_common::{ +use move_core_types::parsing::{ address::{NumericalAddress, ParsedAddress}, parser::{parse_u128, parse_u16, parse_u256, parse_u32, parse_u64, parse_u8}, types::{ParsedFqName, ParsedModuleId, ParsedStructType, ParsedType}, @@ -41,6 +41,7 @@ struct ProgramParsingState { serialize_signed_set: bool, json_set: bool, dry_run_set: bool, + dev_inspect_set: bool, gas_object_id: Option>, gas_budget: Option>, } @@ -63,6 +64,7 @@ impl<'a, I: Iterator> ProgramParser<'a, I> { serialize_signed_set: false, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_object_id: None, gas_budget: None, }, @@ -110,6 +112,7 @@ impl<'a, I: Iterator> ProgramParser<'a, I> { L(T::Command, A::SUMMARY) => flag!(summary_set), L(T::Command, A::JSON) => flag!(json_set), L(T::Command, A::DRY_RUN) => flag!(dry_run_set), + L(T::Command, A::DEV_INSPECT) => flag!(dev_inspect_set), L(T::Command, A::PREVIEW) => flag!(preview_set), L(T::Command, A::WARN_SHADOWS) => flag!(warn_shadows_set), L(T::Command, A::GAS_COIN) => { @@ -207,6 +210,7 @@ impl<'a, I: Iterator> ProgramParser<'a, I> { gas_object_id: self.state.gas_object_id, json_set: self.state.json_set, dry_run_set: self.state.dry_run_set, + dev_inspect_set: self.state.dev_inspect_set, gas_budget: self.state.gas_budget, }, )) diff --git a/crates/sui/src/client_ptb/ptb.rs b/crates/sui/src/client_ptb/ptb.rs index 2796529de4e69..4a05195a11e97 100644 --- a/crates/sui/src/client_ptb/ptb.rs +++ b/crates/sui/src/client_ptb/ptb.rs @@ -150,6 +150,7 @@ impl PTB { gas: program_metadata.gas_object_id.map(|x| x.value), rest: Opts { dry_run: program_metadata.dry_run_set, + dev_inspect: program_metadata.dev_inspect_set, gas_budget: program_metadata.gas_budget.map(|x| x.value), serialize_unsigned_transaction: program_metadata.serialize_unsigned_set, serialize_signed_transaction: program_metadata.serialize_signed_set, @@ -295,6 +296,10 @@ pub fn ptb_description() -> clap::Command { --"dry-run" "Perform a dry run of the PTB instead of executing it." )) + .arg(arg!( + --"dev-inspect" + "Perform a dev-inspect of the PTB instead of executing it." + )) .arg(arg!( --"gas-coin" ... "The object ID of the gas coin to use. If not specified, it will try to use the first \ diff --git a/crates/sui/src/client_ptb/snapshots/sui__client_ptb__parser__tests__parse_commands.snap b/crates/sui/src/client_ptb/snapshots/sui__client_ptb__parser__tests__parse_commands.snap index d90e362c4bae7..89e72eb85b680 100644 --- a/crates/sui/src/client_ptb/snapshots/sui__client_ptb__parser__tests__parse_commands.snap +++ b/crates/sui/src/client_ptb/snapshots/sui__client_ptb__parser__tests__parse_commands.snap @@ -32,6 +32,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -72,6 +73,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -121,6 +123,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -189,6 +192,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -248,6 +252,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -322,6 +327,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -390,6 +396,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -449,6 +456,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -517,6 +525,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -576,6 +585,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -623,6 +633,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -689,6 +700,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -802,6 +814,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -892,6 +905,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -982,6 +996,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1023,6 +1038,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1089,6 +1105,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1140,6 +1157,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1191,6 +1209,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1276,6 +1295,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1333,6 +1353,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1396,6 +1417,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1428,6 +1450,7 @@ expression: parsed ), json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1452,6 +1475,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1476,6 +1500,7 @@ expression: parsed gas_object_id: None, json_set: true, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1500,6 +1525,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -1524,6 +1550,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { diff --git a/crates/sui/src/client_ptb/snapshots/sui__client_ptb__parser__tests__parse_publish.snap b/crates/sui/src/client_ptb/snapshots/sui__client_ptb__parser__tests__parse_publish.snap index fef41e7a46396..fe9c30cd71280 100644 --- a/crates/sui/src/client_ptb/snapshots/sui__client_ptb__parser__tests__parse_publish.snap +++ b/crates/sui/src/client_ptb/snapshots/sui__client_ptb__parser__tests__parse_publish.snap @@ -32,6 +32,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { @@ -72,6 +73,7 @@ expression: parsed gas_object_id: None, json_set: false, dry_run_set: false, + dev_inspect_set: false, gas_budget: Some( Spanned { span: Span { diff --git a/crates/sui/src/displays/dev_inspect.rs b/crates/sui/src/displays/dev_inspect.rs new file mode 100644 index 0000000000000..a0a83fb4508e0 --- /dev/null +++ b/crates/sui/src/displays/dev_inspect.rs @@ -0,0 +1,46 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::displays::Pretty; +use std::fmt::{Display, Formatter}; +use sui_json_rpc_types::{DevInspectResults, SuiTransactionBlockEffectsAPI}; + +impl<'a> Display for Pretty<'a, DevInspectResults> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let Pretty(response) = self; + + if let Some(error) = &response.error { + writeln!(f, "Dev inspect failed: {}", error)?; + return Ok(()); + } + + writeln!( + f, + "Dev inspect completed, execution status: {}", + response.effects.status() + )?; + + writeln!(f, "{}", response.effects)?; + write!(f, "{}", response.events)?; + + if let Some(results) = &response.results { + for result in results { + writeln!(f, "Execution Result")?; + writeln!(f, " Mutable Reference Outputs")?; + for m in result.mutable_reference_outputs.iter() { + writeln!(f, " Sui Argument: {}", m.0)?; + writeln!(f, " Sui TypeTag: {:?}", m.2)?; + writeln!(f, " Bytes: {:?}", m.1)?; + } + + writeln!(f, " Return values")?; + for val in result.return_values.iter() { + writeln!(f, " Sui TypeTag: {:?}", val.1)?; + writeln!(f, " Bytes: {:?}", val.0)?; + } + } + } + + Ok(()) + } +} diff --git a/crates/sui/src/displays/mod.rs b/crates/sui/src/displays/mod.rs index ef12a0ed23e77..a70ba885b9ebe 100644 --- a/crates/sui/src/displays/mod.rs +++ b/crates/sui/src/displays/mod.rs @@ -1,6 +1,7 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 +mod dev_inspect; mod dry_run_tx_block; mod gas_cost_summary; mod ptb_preview; diff --git a/crates/sui/src/keytool.rs b/crates/sui/src/keytool.rs index 08e4aabefee40..df1fc5b3087e0 100644 --- a/crates/sui/src/keytool.rs +++ b/crates/sui/src/keytool.rs @@ -629,8 +629,15 @@ impl KeyToolCommand { match SuiKeyPair::decode(&input_string) { Ok(skp) => { info!("Importing Bech32 encoded private key to keystore"); - let key = Key::from(&skp); - keystore.add_key(alias, skp)?; + let mut key = Key::from(&skp); + keystore.add_key(alias.clone(), skp)?; + + let alias = match alias { + Some(x) => x, + None => keystore.get_alias_by_address(&key.sui_address)?, + }; + + key.alias = Some(alias); CommandOutput::Import(key) } Err(_) => { @@ -639,10 +646,17 @@ impl KeyToolCommand { &input_string, key_scheme, derivation_path, - alias, + alias.clone(), )?; let skp = keystore.get_key(&sui_address)?; - let key = Key::from(skp); + let mut key = Key::from(skp); + + let alias = match alias { + Some(x) => x, + None => keystore.get_alias_by_address(&key.sui_address)?, + }; + + key.alias = Some(alias); CommandOutput::Import(key) } } diff --git a/crates/sui/src/sui_commands.rs b/crates/sui/src/sui_commands.rs index 392ba66fa7549..ea23510d24c2e 100644 --- a/crates/sui/src/sui_commands.rs +++ b/crates/sui/src/sui_commands.rs @@ -720,6 +720,8 @@ async fn start( None, Some(data_ingestion_path.clone()), None, + None, /* start_checkpoint */ + None, /* end_checkpoint */ ) .await; info!("Indexer started in writer mode"); diff --git a/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/Move.toml b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/Move.toml new file mode 100644 index 0000000000000..39cc2752ab46d --- /dev/null +++ b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/Move.toml @@ -0,0 +1,6 @@ +[package] +name = "upgrades" +edition = "2024.beta" # edition = "legacy" to use legacy (pre-2024) Move + +[addresses] +upgrades = "0x0" diff --git a/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/sources/enum.move b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/sources/enum.move new file mode 100644 index 0000000000000..d0c55d3ba99a4 --- /dev/null +++ b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/sources/enum.move @@ -0,0 +1,10 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module upgrades::enum_ { + public enum EnumToBeRemoved { + A, + B + } +} + diff --git a/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/sources/func.move b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/sources/func.move new file mode 100644 index 0000000000000..bb83aedef2f57 --- /dev/null +++ b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/sources/func.move @@ -0,0 +1,9 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module upgrades::func_ { + public fun fun_to_be_removed(): u64 { + 0 + } +} + diff --git a/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/sources/struct.move b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/sources/struct.move new file mode 100644 index 0000000000000..f75e460159e18 --- /dev/null +++ b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v1/sources/struct.move @@ -0,0 +1,10 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +#[allow(unused_field)] +module upgrades::struct_ { + public struct StructToBeRemoved { + b: u64 + } +} + diff --git a/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/Move.toml b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/Move.toml new file mode 100644 index 0000000000000..70f14c105d35c --- /dev/null +++ b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/Move.toml @@ -0,0 +1,6 @@ +[package] +name = "upgrades" +edition = "2024.beta" # edition = "legacy" to use legacy (pre-2024) Move + +[addresses] +upgrades = "0x0" \ No newline at end of file diff --git a/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/sources/enum.move b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/sources/enum.move new file mode 100644 index 0000000000000..8ba135d6bb144 --- /dev/null +++ b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/sources/enum.move @@ -0,0 +1,7 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module upgrades::enum_ { + //public enum EnumToBeRemoved {} +} + diff --git a/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/sources/func.move b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/sources/func.move new file mode 100644 index 0000000000000..ab40afde72c86 --- /dev/null +++ b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/sources/func.move @@ -0,0 +1,7 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module upgrades::func_ { + // public fun fun_to_be_removed(): u64 {} +} + diff --git a/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/sources/struct.move b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/sources/struct.move new file mode 100644 index 0000000000000..6d69ddc71b227 --- /dev/null +++ b/crates/sui/src/unit_tests/fixtures/upgrade_errors/declarations_missing_v2/sources/struct.move @@ -0,0 +1,7 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module upgrades::struct_ { + // public struct StructToBeRemoved {} +} + diff --git a/crates/sui/src/unit_tests/profiler_tests.rs b/crates/sui/src/unit_tests/profiler_tests.rs index c4fcbaa094d94..5d2596e8c5453 100644 --- a/crates/sui/src/unit_tests/profiler_tests.rs +++ b/crates/sui/src/unit_tests/profiler_tests.rs @@ -1,14 +1,14 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -/// This test exists to make sure that the feature gating for all the code under `gas-profiler` -/// remains fully connected such that if and only if we enable the feature here, the `gas-profiler` +/// This test exists to make sure that the feature gating for all the code under `tracing` +/// remains fully connected such that if and only if we enable the feature here, the `tracing` /// feature gets enabled anywhere. /// /// If this test fails, check for the following. /// -/// Any crate that has code decorated with #[cfg(feature = "gas-profiler")] needs to have -/// a feature declared in its Cargo.toml named `gas-profiler`. If moving / refactoring code with +/// Any crate that has code decorated with #[cfg(feature = "tracing")] needs to have +/// a feature declared in its Cargo.toml named `tracing`. If moving / refactoring code with /// this decorator from a crate to a different crate, it is likely needed to copy over some of the /// feature declaration defined in the original crate. Also ensure we do not include the feature in /// any dependency of the dependencies section so that the feature won't get partially enabled as @@ -21,18 +21,18 @@ /// defined in all the other crates that the decorated code in the current crate depends on. /// /// Note this crate will always have the feature enabled in testing due to the addition of -/// `sui = { path = ".", features = ["gas-profiler"] }` to our dev-dependencies. +/// `sui = { path = ".", features = ["tracing"] }` to our dev-dependencies. -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] #[test] fn test_macro_shows_feature_enabled() { - move_vm_profiler::gas_profiler_feature_disabled! { + move_vm_profiler::tracing_feature_disabled! { panic!("gas profile feature graph became disconnected"); } } #[ignore] -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] #[tokio::test(flavor = "multi_thread")] async fn test_profiler() { use std::fs; diff --git a/crates/sui/src/unit_tests/snapshots/sui__upgrade_compatibility__upgrade_compatibility_tests__declarations_missing.snap b/crates/sui/src/unit_tests/snapshots/sui__upgrade_compatibility__upgrade_compatibility_tests__declarations_missing.snap new file mode 100644 index 0000000000000..e47d7459c9250 --- /dev/null +++ b/crates/sui/src/unit_tests/snapshots/sui__upgrade_compatibility__upgrade_compatibility_tests__declarations_missing.snap @@ -0,0 +1,36 @@ +--- +source: crates/sui/src/unit_tests/upgrade_compatibility_tests.rs +expression: normalize_path(err.to_string()) +--- +error[Compatibility E01001]: missing public declaration + ┌─ /fixtures/upgrade_errors/declarations_missing_v2/sources/enum.move:4:18 + │ +4 │ module upgrades::enum_ { + │ ^^^^^ enum 'EnumToBeRemoved' is missing + │ + = enum is missing expected enum 'EnumToBeRemoved', but found none + = enums are part of a module's public interface and cannot be removed or changed during an upgrade + = add missing enum 'EnumToBeRemoved' back to the module 'enum_'. + +error[Compatibility E01001]: missing public declaration + ┌─ /fixtures/upgrade_errors/declarations_missing_v2/sources/func.move:4:18 + │ +4 │ module upgrades::func_ { + │ ^^^^^ public function 'fun_to_be_removed' is missing + │ + = public function is missing expected public function 'fun_to_be_removed', but found none + = public functions are part of a module's public interface and cannot be removed or changed during an upgrade + = add missing public function 'fun_to_be_removed' back to the module 'func_'. + +error[Compatibility E01001]: missing public declaration + ┌─ /fixtures/upgrade_errors/declarations_missing_v2/sources/struct.move:4:18 + │ +4 │ module upgrades::struct_ { + │ ^^^^^^^ struct 'StructToBeRemoved' is missing + │ + = struct is missing expected struct 'StructToBeRemoved', but found none + = structs are part of a module's public interface and cannot be removed or changed during an upgrade + = add missing struct 'StructToBeRemoved' back to the module 'struct_'. + + +Upgrade failed, this package requires changes to be compatible with the existing package. Its upgrade policy is set to 'Compatible'. diff --git a/crates/sui/src/unit_tests/upgrade_compatibility_tests.rs b/crates/sui/src/unit_tests/upgrade_compatibility_tests.rs index 1f0e38eae9995..8e6d002c712e0 100644 --- a/crates/sui/src/unit_tests/upgrade_compatibility_tests.rs +++ b/crates/sui/src/unit_tests/upgrade_compatibility_tests.rs @@ -6,26 +6,25 @@ use insta::assert_snapshot; use move_binary_format::CompiledModule; use std::path::PathBuf; use sui_move_build::BuildConfig; +use sui_move_build::CompiledPackage; #[test] +#[should_panic] fn test_all_fail() { - let (pkg_v1, pkg_v2) = get_packages("all"); + let (mods_v1, pkg_v2) = get_packages("all"); - let result = compare_packages(pkg_v1, pkg_v2); - assert!(result.is_err()); - let err = result.unwrap_err(); - - assert_snapshot!(err.to_string()); + // panics: Not all errors are implemented yet + compare_packages(mods_v1, pkg_v2).unwrap(); } #[test] -fn test_struct_missing() { - let (pkg_v1, pkg_v2) = get_packages("struct_missing"); +fn test_declarations_missing() { + let (pkg_v1, pkg_v2) = get_packages("declarations_missing"); let result = compare_packages(pkg_v1, pkg_v2); assert!(result.is_err()); let err = result.unwrap_err(); - assert_snapshot!(err.to_string()); + assert_snapshot!(normalize_path(err.to_string())); } #[test] @@ -42,12 +41,12 @@ fn test_entry_linking_ok() { assert!(compare_packages(pkg_v1, pkg_v2).is_ok()); } -fn get_packages(name: &str) -> (Vec, Vec) { +fn get_packages(name: &str) -> (Vec, CompiledPackage) { let mut path: PathBuf = PathBuf::from(env!("CARGO_MANIFEST_DIR")); path.push("src/unit_tests/fixtures/upgrade_errors/"); path.push(format!("{}_v1", name)); - let pkg_v1 = BuildConfig::new_for_testing() + let mods_v1 = BuildConfig::new_for_testing() .build(&path) .unwrap() .into_modules(); @@ -56,10 +55,18 @@ fn get_packages(name: &str) -> (Vec, Vec) { path.push("src/unit_tests/fixtures/upgrade_errors/"); path.push(format!("{}_v2", name)); - let pkg_v2 = BuildConfig::new_for_testing() - .build(&path) - .unwrap() - .into_modules(); + let pkg_v2 = BuildConfig::new_for_testing().build(&path).unwrap(); + + (mods_v1, pkg_v2) +} - (pkg_v1, pkg_v2) +/// Snapshots will differ on each machine, normalize to prevent test failures +fn normalize_path(err_string: String) -> String { + //test + let re = regex::Regex::new(r"^ ┌─ .*(\/fixtures\/.*\.move:\d+:\d+)$").unwrap(); + err_string + .lines() + .map(|line| re.replace(line, " ┌─ $1").into_owned()) + .collect::>() + .join("\n") } diff --git a/crates/sui/src/upgrade_compatibility.rs b/crates/sui/src/upgrade_compatibility.rs index bad3fca2c1a5d..96d5a64d01947 100644 --- a/crates/sui/src/upgrade_compatibility.rs +++ b/crates/sui/src/upgrade_compatibility.rs @@ -5,8 +5,12 @@ #[cfg(test)] mod upgrade_compatibility_tests; +use std::collections::{HashMap, HashSet}; +use std::fs; +use std::io::{stdout, IsTerminal}; +use std::sync::Arc; + use anyhow::{anyhow, Context, Error}; -use std::collections::HashMap; use move_binary_format::{ compatibility::Compatibility, @@ -15,19 +19,35 @@ use move_binary_format::{ normalized::{Enum, Function, Module, Struct}, CompiledModule, }; +use move_command_line_common::files::FileHash; +use move_compiler::diagnostics::codes::DiagnosticInfo; +use move_compiler::{ + diag, + diagnostics::{ + codes::{custom, Severity}, + report_diagnostics_to_buffer, Diagnostic, Diagnostics, + }, + shared::files::{FileName, FilesSourceText}, +}; use move_core_types::{ account_address::AccountAddress, identifier::{IdentStr, Identifier}, }; +use move_ir_types::location::Loc; +use move_package::compilation::compiled_package::CompiledUnitWithSource; use sui_json_rpc_types::{SuiObjectDataOptions, SuiRawData}; +use sui_move_build::CompiledPackage; use sui_protocol_config::ProtocolConfig; use sui_sdk::SuiClient; use sui_types::{base_types::ObjectID, execution_config_utils::to_binary_config}; /// Errors that can occur during upgrade compatibility checks. /// one-to-one related to the underlying trait functions see: [`CompatibilityMode`] -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) enum UpgradeCompatibilityModeError { + ModuleMissing { + name: Identifier, + }, StructMissing { name: Identifier, old_struct: Struct, @@ -102,8 +122,11 @@ pub(crate) enum UpgradeCompatibilityModeError { } impl UpgradeCompatibilityModeError { + /// check if the error breaks compatibility for a given [`Compatibility`] fn breaks_compatibility(&self, compatability: &Compatibility) -> bool { match self { + UpgradeCompatibilityModeError::ModuleMissing { .. } => true, + UpgradeCompatibilityModeError::StructAbilityMismatch { .. } | UpgradeCompatibilityModeError::StructTypeParamMismatch { .. } | UpgradeCompatibilityModeError::EnumAbilityMismatch { .. } @@ -149,7 +172,7 @@ pub(crate) struct CliCompatibilityMode { } impl CompatibilityMode for CliCompatibilityMode { - type Error = anyhow::Error; + type Error = Vec; // ignored, address is not populated pre-tx fn module_id_mismatch( &mut self, @@ -323,37 +346,78 @@ impl CompatibilityMode for CliCompatibilityMode { }); } - fn finish(&self, compatability: &Compatibility) -> Result<(), Self::Error> { - let errors: Vec = self + fn finish(self, compatability: &Compatibility) -> Result<(), Self::Error> { + let errors: Vec = self .errors - .iter() + .into_iter() .filter(|e| e.breaks_compatibility(compatability)) - .map(|e| format!("- {:?}", e)) .collect(); if !errors.is_empty() { - return Err(anyhow!( - "Upgrade compatibility check failed with the following errors:\n{}", - errors.join("\n") - )); + return Err(errors); } Ok(()) } } +const COMPATIBILITY_PREFIX: &str = "Compatibility "; +/// Generates an enum Category along with individual enum for each individual category +/// and impls into diagnostic info for each category. +macro_rules! upgrade_codes { + ($($cat:ident: [ + $($code:ident: { msg: $code_msg:literal }),* $(,)? + ]),* $(,)?) => { + #[derive(PartialEq, Eq, Clone, Copy, Debug, Hash, PartialOrd, Ord)] + #[repr(u8)] + pub enum Category { + #[allow(dead_code)] + ZeroPlaceholder, + $($cat,)* + } + + $( + #[derive(PartialEq, Eq, Clone, Copy, Debug, Hash)] + #[repr(u8)] + pub enum $cat { + #[allow(dead_code)] + ZeroPlaceholder, + $($code,)* + } + + // impl into diagnostic info + impl Into for $cat { + fn into(self) -> DiagnosticInfo { + match self { + Self::ZeroPlaceholder => + panic!("do not use placeholder error code"), + $(Self::$code => custom( + COMPATIBILITY_PREFIX, + Severity::NonblockingError, + Category::$cat as u8, + self as u8, + $code_msg, + ),)* + } + } + } + )* + }; +} + +// Used to generate diagnostics primary labels for upgrade compatibility errors. +upgrade_codes!( + Declarations: [ + PublicMissing: { msg: "missing public declaration" }, + ], +); + /// Check the upgrade compatibility of a new package with an existing on-chain package. pub(crate) async fn check_compatibility( client: &SuiClient, package_id: ObjectID, - compiled_modules: &[Vec], + new_package: CompiledPackage, protocol_config: ProtocolConfig, ) -> Result<(), Error> { - let new_modules = compiled_modules - .iter() - .map(|b| CompiledModule::deserialize_with_config(b, &to_binary_config(&protocol_config))) - .collect::, _>>() - .context("Unable to to deserialize compiled module")?; - let existing_obj_read = client .read_api() .get_object_with_options(package_id, SuiObjectDataOptions::new().with_bcs()) @@ -378,36 +442,156 @@ pub(crate) async fn check_compatibility( .collect::, _>>() .context("Unable to get existing package")?; - compare_packages(existing_modules, new_modules) + compare_packages(existing_modules, new_package) } +/// Collect all the errors into a single error message. fn compare_packages( existing_modules: Vec, - new_modules: Vec, + new_package: CompiledPackage, ) -> Result<(), Error> { // create a map from the new modules - let new_modules_map: HashMap = new_modules - .iter() + let new_modules_map: HashMap = new_package + .get_modules() .map(|m| (m.self_id().name().to_owned(), m.clone())) .collect(); - // for each existing find the new one run compatibility check - for existing_module in existing_modules { - let name = existing_module.self_id().name().to_owned(); - - // find the new module with the same name - match new_modules_map.get(&name) { - Some(new_module) => { - Compatibility::upgrade_check().check_with_mode::( - &Module::new(&existing_module), - &Module::new(new_module), - )?; - } - None => { - Err(anyhow!("Module {} is missing from the package", name))?; + let errors: Vec<(Identifier, UpgradeCompatibilityModeError)> = existing_modules + .iter() + .flat_map(|existing_module| { + let name = existing_module.self_id().name().to_owned(); + + // find the new module with the same name + match new_modules_map.get(&name) { + Some(new_module) => { + let compatible = Compatibility::upgrade_check() + .check_with_mode::( + &Module::new(existing_module), + &Module::new(new_module), + ); + if let Err(errors) = compatible { + errors.into_iter().map(|e| (name.to_owned(), e)).collect() + } else { + vec![] + } + } + None => vec![( + name.clone(), + UpgradeCompatibilityModeError::ModuleMissing { name }, + )], } + }) + .collect(); + + if errors.is_empty() { + return Ok(()); + } + + let mut files: FilesSourceText = HashMap::new(); + let mut file_set = HashSet::new(); + + let mut diags = Diagnostics::new(); + + for (name, err) in errors { + let compiled_unit_with_source = new_package + .package + .get_module_by_name_from_root(name.as_str()) + .context("Unable to get module")?; + + if !file_set.contains(&compiled_unit_with_source.source_path) { + let file_contents: Arc = + fs::read_to_string(&compiled_unit_with_source.source_path) + .context("Unable to read source file")? + .into(); + let file_hash = FileHash::new(&file_contents); + + files.insert( + file_hash, + ( + FileName::from(compiled_unit_with_source.source_path.to_string_lossy()), + file_contents, + ), + ); + + file_set.insert(compiled_unit_with_source.source_path.clone()); } + + diags.add(diag_from_error(&err, compiled_unit_with_source)) + } + + Err(anyhow!( + "{}\nUpgrade failed, this package requires changes to be compatible with the existing package. Its upgrade policy is set to 'Compatible'.", + String::from_utf8(report_diagnostics_to_buffer(&files.into(), diags, stdout().is_terminal())).context("Unable to convert buffer to string")? + )) +} + +/// Convert an error to a diagnostic using the specific error type's function. +fn diag_from_error( + error: &UpgradeCompatibilityModeError, + compiled_unit_with_source: &CompiledUnitWithSource, +) -> Diagnostic { + match error { + UpgradeCompatibilityModeError::StructMissing { name, .. } => missing_definition_diag( + Declarations::PublicMissing, + "struct", + &name, + compiled_unit_with_source, + ), + UpgradeCompatibilityModeError::EnumMissing { name, .. } => missing_definition_diag( + Declarations::PublicMissing, + "enum", + &name, + compiled_unit_with_source, + ), + UpgradeCompatibilityModeError::FunctionMissingPublic { name, .. } => { + missing_definition_diag( + Declarations::PublicMissing, + "public function", + &name, + compiled_unit_with_source, + ) + } + UpgradeCompatibilityModeError::FunctionMissingEntry { name, .. } => { + missing_definition_diag( + Declarations::PublicMissing, + "entry function", + &name, + compiled_unit_with_source, + ) + } + _ => todo!("Implement diag_from_error for {:?}", error), } +} - Ok(()) +/// Return a diagnostic for a missing definition. +fn missing_definition_diag( + error: impl Into, + declaration_kind: &str, + identifier_name: &Identifier, + compiled_unit_with_source: &CompiledUnitWithSource, +) -> Diagnostic { + let module_name = compiled_unit_with_source.unit.name; + let loc = compiled_unit_with_source + .unit + .source_map + .definition_location; + + Diagnostic::new( + error, + (loc, format!( + "{declaration_kind} '{identifier_name}' is missing", + declaration_kind = declaration_kind, + identifier_name = identifier_name, + )), + std::iter::empty::<(Loc, String)>(), + vec![format!( + "{declaration_kind} is missing expected {declaration_kind} '{identifier_name}', but found none", + ), + format!( + "{declaration_kind}s are part of a module's public interface and cannot be removed or changed during an upgrade", + ), + format!( + "add missing {declaration_kind} '{identifier_name}' back to the module '{module_name}'.", + )] + ) } diff --git a/crates/sui/tests/cli_tests.rs b/crates/sui/tests/cli_tests.rs index 7073cd7d76ed2..0d822a93c856c 100644 --- a/crates/sui/tests/cli_tests.rs +++ b/crates/sui/tests/cli_tests.rs @@ -2897,6 +2897,7 @@ async fn test_serialize_tx() -> Result<(), anyhow::Error> { opts: Opts { gas_budget: Some(rgp * TEST_ONLY_GAS_UNIT_FOR_TRANSFER), dry_run: false, + dev_inspect: false, serialize_unsigned_transaction: true, serialize_signed_transaction: false, }, @@ -2911,6 +2912,7 @@ async fn test_serialize_tx() -> Result<(), anyhow::Error> { opts: Opts { gas_budget: Some(rgp * TEST_ONLY_GAS_UNIT_FOR_TRANSFER), dry_run: false, + dev_inspect: false, serialize_unsigned_transaction: false, serialize_signed_transaction: true, }, @@ -2926,6 +2928,7 @@ async fn test_serialize_tx() -> Result<(), anyhow::Error> { opts: Opts { gas_budget: Some(rgp * TEST_ONLY_GAS_UNIT_FOR_TRANSFER), dry_run: false, + dev_inspect: false, serialize_unsigned_transaction: false, serialize_signed_transaction: true, }, @@ -3772,6 +3775,7 @@ async fn test_gas_estimation() -> Result<(), anyhow::Error> { opts: Opts { gas_budget: None, dry_run: false, + dev_inspect: false, serialize_unsigned_transaction: false, serialize_signed_transaction: false, }, diff --git a/crates/suiop-cli/Cargo.toml b/crates/suiop-cli/Cargo.toml index 0c39aa9b6e3a3..2f2ba91dea176 100644 --- a/crates/suiop-cli/Cargo.toml +++ b/crates/suiop-cli/Cargo.toml @@ -56,6 +56,7 @@ futures.workspace = true thiserror.workspace = true strsim = "0.11.1" futures-timer = "3.0.3" +tempfile.workspace = true [dev-dependencies] diff --git a/crates/suiop-cli/src/cli/env/mod.rs b/crates/suiop-cli/src/cli/env/mod.rs new file mode 100644 index 0000000000000..51d5b8c92ede8 --- /dev/null +++ b/crates/suiop-cli/src/cli/env/mod.rs @@ -0,0 +1,72 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::run_cmd; +use anyhow::Result; +use clap::Parser; +use inquire::Select; +use std::io::Write; +use tracing::{debug, info}; + +/// Load an environment from pulumi +/// +/// if no environment name is provided, the user will be prompted to select one from the list +#[derive(Parser, Debug)] +pub struct LoadEnvironmentArgs { + /// the optional name of the environment to load + environment_name: Option, +} + +pub fn load_environment_cmd(args: &LoadEnvironmentArgs) -> Result<()> { + setup_pulumi_environment(&args.environment_name.clone().unwrap_or_else(|| { + let output = run_cmd(vec!["pulumi", "env", "ls"], None).expect("Running pulumi env ls"); + let output_str = String::from_utf8_lossy(&output.stdout); + let options: Vec<&str> = output_str.lines().collect(); + + if options.is_empty() { + panic!("No environments found. Make sure you are logged into the correct pulumi org."); + } + + Select::new("Select an environment:", options) + .prompt() + .expect("Failed to select environment") + .to_owned() + })) +} + +pub fn setup_pulumi_environment(environment_name: &str) -> Result<()> { + let output = run_cmd(vec!["pulumi", "env", "open", environment_name], None)?; + let output_str = String::from_utf8_lossy(&output.stdout); + let output_json: serde_json::Value = serde_json::from_str(&output_str)?; + let env_vars = &output_json["environmentVariables"]; + // Open a file to write environment variables + let home_dir = std::env::var("HOME").expect("HOME environment variable not set"); + let suiop_dir = format!("{}/.suiop", home_dir); + std::fs::create_dir_all(&suiop_dir).expect("Failed to create .suiop directory"); + let env_file_path = format!("{}/env_vars", suiop_dir); + let mut env_file = + std::fs::File::create(&env_file_path).expect("Failed to create env_vars file"); + + if let serde_json::Value::Object(env_vars) = env_vars { + for (key, value) in env_vars { + if let Some(value_str) = value.as_str() { + writeln!(env_file, "{}={}", key, value_str)?; + info!("writing environment variable {}", key); + debug!("={}", value_str); + } else { + info!( + "Failed to set environment variable: {}. Value is not a string.", + key + ); + } + } + } else { + info!("Environment variables are not in the expected format."); + debug!("env: {:?}", output_json); + } + info!( + "finished loading environment. use `source {}` to load them into your shell", + env_file_path + ); + Ok(()) +} diff --git a/crates/suiop-cli/src/cli/incidents/mod.rs b/crates/suiop-cli/src/cli/incidents/mod.rs index b1d29afc2730a..c659d14ef5811 100644 --- a/crates/suiop-cli/src/cli/incidents/mod.rs +++ b/crates/suiop-cli/src/cli/incidents/mod.rs @@ -17,7 +17,7 @@ use jira::generate_follow_up_tasks; use pd::print_recent_incidents; use selection::review_recent_incidents; use std::path::PathBuf; -use tracing::debug; +use tracing::{debug, info}; #[derive(Parser, Debug, Clone)] pub struct IncidentsArgs { @@ -59,6 +59,7 @@ pub enum IncidentsAction { /// - Return the combined incident list. async fn get_incidents(limit: &usize, days: &usize) -> Result> { let current_time = Local::now(); + info!("going back {} days", days); let start_time = current_time - Duration::days(*days as i64); let slack = Slack::new().await; Ok(pd::fetch_incidents(*limit, start_time, current_time) diff --git a/crates/suiop-cli/src/cli/incidents/notion.rs b/crates/suiop-cli/src/cli/incidents/notion.rs index 96721bbe00250..0d2a83beb62fb 100644 --- a/crates/suiop-cli/src/cli/incidents/notion.rs +++ b/crates/suiop-cli/src/cli/incidents/notion.rs @@ -116,6 +116,7 @@ impl Notion { pub fn new() -> Self { let token = env::var("NOTION_API_TOKEN") .expect("Please set the NOTION_API_TOKEN environment variable"); + debug!("using notion token {}", token); let client = NotionApi::new(token.clone()).expect("Failed to create Notion API client"); Self { client, token } } @@ -145,8 +146,12 @@ impl Notion { if !response.status().is_success() { return Err(anyhow::anyhow!( - "Request failed with status: {}", - response.status() + "Request failed with status: {}, response: {}", + response.status(), + response + .text() + .await + .unwrap_or("no response text".to_owned()) )); } diff --git a/crates/suiop-cli/src/cli/incidents/pd/mod.rs b/crates/suiop-cli/src/cli/incidents/pd/mod.rs index ff96fcd4ece71..b1039f3e533ef 100644 --- a/crates/suiop-cli/src/cli/incidents/pd/mod.rs +++ b/crates/suiop-cli/src/cli/incidents/pd/mod.rs @@ -11,6 +11,7 @@ use reqwest::header::AUTHORIZATION; use serde::{Deserialize, Serialize}; use serde_json::Value as JsonValue; use std::env; +use tracing::debug; use super::incident::Incident; @@ -49,15 +50,18 @@ pub async fn fetch_incidents( ) -> Result> { let url = "https://api.pagerduty.com/incidents"; + let api_key = env::var("PD_API_KEY").expect("please set the PD_API_KEY env var"); + if api_key.is_empty() { + panic!("PD_API_KEY is not set"); + } + + debug!("fetching incidents from pagerduty with {}", api_key); let mut headers = HeaderMap::new(); headers.insert( AUTHORIZATION, - format!( - "Token token={}", - env::var("PD_API_KEY").expect("please set the PD_API_KEY env var") - ) - .parse() - .expect("header parsing"), + format!("Token token={}", api_key) + .parse() + .expect("header parsing"), ); headers.insert( ACCEPT, diff --git a/crates/suiop-cli/src/cli/mod.rs b/crates/suiop-cli/src/cli/mod.rs index 2111eb17153c3..fffcbf3b08387 100644 --- a/crates/suiop-cli/src/cli/mod.rs +++ b/crates/suiop-cli/src/cli/mod.rs @@ -3,6 +3,7 @@ mod ci; pub mod docker; +mod env; mod iam; mod incidents; pub mod lib; @@ -13,6 +14,7 @@ mod slack; pub use ci::{ci_cmd, CIArgs}; pub use docker::{docker_cmd, DockerArgs}; +pub use env::{load_environment_cmd, LoadEnvironmentArgs}; pub use iam::{iam_cmd, IAMArgs}; pub use incidents::{incidents_cmd, IncidentsArgs}; pub use pulumi::{pulumi_cmd, PulumiArgs}; diff --git a/crates/suiop-cli/src/cli/pulumi/init.rs b/crates/suiop-cli/src/cli/pulumi/init.rs index 7fa9551722fc7..90967a0af4e8c 100644 --- a/crates/suiop-cli/src/cli/pulumi/init.rs +++ b/crates/suiop-cli/src/cli/pulumi/init.rs @@ -12,6 +12,8 @@ use std::fs; use std::path::{Path, PathBuf}; use tracing::{debug, error, info, warn}; +use super::PulumiProjectRuntime; + #[derive(clap::Subcommand, Clone, Debug)] pub enum ProjectType { App, @@ -23,7 +25,12 @@ pub enum ProjectType { const KEYRING: &str = "pulumi-kms-automation-f22939d"; impl ProjectType { - pub fn create_project(&self, use_kms: &bool, project_name: Option) -> Result<()> { + pub fn create_project( + &self, + use_kms: &bool, + project_name: Option, + runtime: &PulumiProjectRuntime, + ) -> Result<()> { // make sure we're in suiops let suiops_path = ensure_in_suiops_repo()?; info!("suipop path: {}", suiops_path); @@ -87,11 +94,12 @@ impl ProjectType { &project_dir, Self::App, &project_opts, + runtime, )?; } Self::Basic => { info!("creating basic pulumi project"); - create_basic_project(&project_name, &project_dir, &project_opts)?; + create_basic_project(&project_name, &project_dir, &project_opts, runtime)?; } Self::CronJob => { info!("creating k8s cronjob project"); @@ -100,6 +108,7 @@ impl ProjectType { &project_dir, Self::CronJob, &project_opts, + runtime, )?; } } @@ -145,6 +154,7 @@ fn run_pulumi_new( project_name: &str, project_dir_str: &str, project_opts: &[String], + runtime: &PulumiProjectRuntime, ) -> Result<()> { info!( "creating new pulumi project in {}", @@ -152,12 +162,16 @@ fn run_pulumi_new( ); let opts = project_opts.join(" "); info!("extra pulumi options added: {}", &opts.bright_purple()); + let runtime_arg = match runtime { + PulumiProjectRuntime::Go => "go", + PulumiProjectRuntime::Typescript => "ts", + }; run_cmd( vec![ "bash", "-c", &format!( - r#"pulumi new go --dir {0} -d "pulumi project for {1}" --name "{1}" --stack mysten/dev --yes {2}"#, + r#"pulumi new {runtime_arg} --dir {0} -d "pulumi project for {1}" --name "{1}" --stack mysten/dev --yes {2}"#, project_dir_str, project_name, opts ), ], @@ -171,15 +185,17 @@ fn run_pulumi_new_from_template( project_dir_str: &str, project_type: ProjectType, project_opts: &[String], + runtime: &PulumiProjectRuntime, ) -> Result<()> { info!( "creating new pulumi project in {}", project_dir_str.bright_purple() ); - let template_dir = match project_type { - ProjectType::App | ProjectType::Service => "app-go", - ProjectType::CronJob => "cronjob-go", - _ => "app-go", + let template_dir = match (project_type, runtime) { + (ProjectType::App | ProjectType::Service, PulumiProjectRuntime::Go) => "app-go", + (ProjectType::CronJob, PulumiProjectRuntime::Go) => "cronjob-go", + (ProjectType::App | ProjectType::Service, PulumiProjectRuntime::Typescript) => "app-ts", + _ => panic!("unsupported runtime for this project type"), }; let opts = project_opts.join(" "); info!("extra pulumi options added: {}", &opts.bright_purple()); @@ -288,6 +304,7 @@ fn create_basic_project( project_name: &str, project_dir: &PathBuf, project_opts: &[String], + runtime: &PulumiProjectRuntime, ) -> Result<()> { let project_dir_str = project_dir.to_str().expect("project dir to str"); info!( @@ -296,13 +313,16 @@ fn create_basic_project( ); fs::create_dir_all(project_dir).context("failed to create project directory")?; // initialize pulumi project - run_pulumi_new(project_name, project_dir_str, project_opts).inspect_err(|_| { + run_pulumi_new(project_name, project_dir_str, project_opts, runtime).inspect_err(|_| { remove_project_dir(project_dir).unwrap(); let backend = get_current_backend().unwrap(); remove_stack(&backend, project_name, "mysten/dev").unwrap(); })?; // run go mod tidy to make sure all dependencies are installed - run_go_mod_tidy(project_dir_str)?; + if runtime == &PulumiProjectRuntime::Go { + debug!("running go mod tidy"); + run_go_mod_tidy(project_dir_str)?; + } // set pulumi env set_pulumi_env(project_dir_str)?; // try a pulumi preview to make sure it's good @@ -314,6 +334,7 @@ fn create_mysten_k8s_project( project_dir: &PathBuf, project_type: ProjectType, project_opts: &[String], + runtime: &PulumiProjectRuntime, ) -> Result<()> { let project_dir_str = project_dir.to_str().expect("project dir to str"); info!( @@ -322,14 +343,23 @@ fn create_mysten_k8s_project( ); fs::create_dir_all(project_dir).context("failed to create project directory")?; // initialize pulumi project - run_pulumi_new_from_template(project_name, project_dir_str, project_type, project_opts) - .inspect_err(|_| { - remove_project_dir(project_dir).unwrap(); - let backend = get_current_backend().unwrap(); - remove_stack(&backend, project_name, "mysten/dev").unwrap(); - })?; + run_pulumi_new_from_template( + project_name, + project_dir_str, + project_type, + project_opts, + runtime, + ) + .inspect_err(|_| { + remove_project_dir(project_dir).unwrap(); + let backend = get_current_backend().unwrap(); + remove_stack(&backend, project_name, "mysten/dev").unwrap(); + })?; // run go mod tidy to make sure all dependencies are installed - run_go_mod_tidy(project_dir_str)?; + if runtime == &PulumiProjectRuntime::Go { + debug!("running go mod tidy"); + run_go_mod_tidy(project_dir_str)?; + } // we don't run preview for templated apps because the user // has to give the repo dir (improvements to this coming soon) diff --git a/crates/suiop-cli/src/cli/pulumi/mod.rs b/crates/suiop-cli/src/cli/pulumi/mod.rs index 68b0d6145e999..2d9df6d16f078 100644 --- a/crates/suiop-cli/src/cli/pulumi/mod.rs +++ b/crates/suiop-cli/src/cli/pulumi/mod.rs @@ -5,11 +5,21 @@ mod init; mod setup; use anyhow::Result; +use clap::arg; use clap::Parser; +use clap::ValueEnum; use init::ProjectType; use setup::ensure_gcloud; use setup::ensure_pulumi_setup; +#[derive(ValueEnum, PartialEq, Clone, Debug)] +pub enum PulumiProjectRuntime { + #[clap(alias = "golang")] + Go, + #[clap(alias = "ts")] + Typescript, +} + #[derive(Parser, Debug, Clone)] pub struct PulumiArgs { #[command(subcommand)] @@ -32,6 +42,10 @@ pub enum PulumiAction { /// the name of the project to be created #[arg(long, aliases = ["name"])] project_name: Option, + + /// the runtime to use for the project + #[arg(long, default_value = "go")] + runtime: PulumiProjectRuntime, }, } @@ -42,11 +56,12 @@ pub fn pulumi_cmd(args: &PulumiArgs) -> Result<()> { project_type, kms, project_name, + runtime, } => { if *kms { ensure_gcloud()?; } - project_type.create_project(kms, project_name.clone()) + project_type.create_project(kms, project_name.clone(), runtime) } } } diff --git a/crates/suiop-cli/src/cli/slack/mod.rs b/crates/suiop-cli/src/cli/slack/mod.rs index 95d77d601ee53..b2ab0ae5d2f6d 100644 --- a/crates/suiop-cli/src/cli/slack/mod.rs +++ b/crates/suiop-cli/src/cli/slack/mod.rs @@ -32,7 +32,12 @@ fn get_serialize_filepath(subname: &str) -> PathBuf { /// Serialize the obj into ~/.suiop/{subname} so we can cache it across /// executions pub fn serialize_to_file(subname: &str, obj: &Vec) -> Result<()> { - let file = File::create(get_serialize_filepath(subname).as_path())?; + let filepath = get_serialize_filepath(subname); + // Ensure the parent directory exists + if let Some(parent) = filepath.parent() { + std::fs::create_dir_all(parent)?; + } + let file = File::create(filepath.as_path())?; serde_json::to_writer(file, obj)?; Ok(()) } @@ -70,6 +75,7 @@ impl Slack { let token = std::env::var("SLACK_BOT_TOKEN").expect( "Please set SLACK_BOT_TOKEN env var ('slack bot token (incidentbot)' in 1password)", ); + debug!("using slack token {}", token); let mut headers = header::HeaderMap::new(); headers.insert( header::AUTHORIZATION, diff --git a/crates/suiop-cli/src/main.rs b/crates/suiop-cli/src/main.rs index 3d3c7c99c83ea..f80de84e677a7 100644 --- a/crates/suiop-cli/src/main.rs +++ b/crates/suiop-cli/src/main.rs @@ -5,12 +5,12 @@ use anyhow::Result; use clap::Parser; use suioplib::{ cli::{ - ci_cmd, docker_cmd, iam_cmd, incidents_cmd, pulumi_cmd, service_cmd, CIArgs, DockerArgs, - IAMArgs, IncidentsArgs, PulumiArgs, ServiceArgs, + ci_cmd, docker_cmd, iam_cmd, incidents_cmd, load_environment_cmd, pulumi_cmd, service_cmd, + CIArgs, DockerArgs, IAMArgs, IncidentsArgs, LoadEnvironmentArgs, PulumiArgs, ServiceArgs, }, DEBUG_MODE, }; -use tracing::info; +use tracing::{debug, info, warn}; use tracing_subscriber::{ filter::{EnvFilter, LevelFilter}, FmtSubscriber, @@ -38,6 +38,8 @@ pub(crate) enum Resource { Service(ServiceArgs), #[clap()] CI(CIArgs), + #[clap(name="load-env", aliases = ["e", "env"])] + LoadEnvironment(LoadEnvironmentArgs), } #[tokio::main(flavor = "current_thread")] @@ -52,6 +54,24 @@ async fn main() -> Result<()> { tracing::subscriber::set_global_default(subscriber).expect("setting default subscriber failed"); + // Load environment variables from ~/.suiop/env_vars + debug!("loading environment variables"); + let home_dir = std::env::var("HOME").expect("HOME environment variable not set"); + let env_file_path = std::path::Path::new(&home_dir) + .join(".suiop") + .join("env_vars"); + + if let Ok(env_contents) = std::fs::read_to_string(env_file_path) { + for line in env_contents.lines() { + if let Some((key, value)) = line.split_once('=') { + debug!("setting environment variable {}={}", key, value); + std::env::set_var(key.trim(), value.trim()); + } + } + } else { + warn!("Warning: Could not read ~/.suiop/env_vars file. Environment variables not loaded."); + } + if *DEBUG_MODE { info!("Debug mode enabled"); } @@ -76,6 +96,9 @@ async fn main() -> Result<()> { Resource::CI(args) => { ci_cmd(&args).await?; } + Resource::LoadEnvironment(args) => { + load_environment_cmd(&args)?; + } } Ok(()) diff --git a/crates/test-cluster/Cargo.toml b/crates/test-cluster/Cargo.toml index 58ca4d37edd93..70bb3d0be5023 100644 --- a/crates/test-cluster/Cargo.toml +++ b/crates/test-cluster/Cargo.toml @@ -17,11 +17,14 @@ futures.workspace = true tracing.workspace = true jsonrpsee.workspace = true tokio = { workspace = true, features = ["full", "tracing", "test-util"] } +tokio-util.workspace = true rand.workspace = true +tempfile.workspace = true sui-config.workspace = true sui-core = { workspace = true, features = ["test-utils"] } sui-framework.workspace = true sui-swarm-config.workspace = true +sui-indexer.workspace = true sui-json-rpc.workspace = true sui-json-rpc-types.workspace = true sui-json-rpc-api.workspace = true diff --git a/crates/test-cluster/src/indexer_util.rs b/crates/test-cluster/src/indexer_util.rs new file mode 100644 index 0000000000000..5d3b8b5605dd7 --- /dev/null +++ b/crates/test-cluster/src/indexer_util.rs @@ -0,0 +1,84 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use jsonrpsee::http_client::{HttpClient, HttpClientBuilder}; +use std::path::PathBuf; +use std::time::Duration; +use sui_config::local_ip_utils::get_available_port; +use sui_indexer::tempdb::TempDb; +use sui_indexer::test_utils::{ + start_indexer_jsonrpc_for_testing, start_indexer_writer_for_testing, +}; +use sui_json_rpc_api::ReadApiClient; +use sui_sdk::{SuiClient, SuiClientBuilder}; +use tempfile::TempDir; +use tokio::time::sleep; + +pub(crate) struct IndexerHandle { + pub(crate) rpc_client: HttpClient, + pub(crate) sui_client: SuiClient, + pub(crate) rpc_url: String, + #[allow(unused)] + cancellation_tokens: Vec, + #[allow(unused)] + data_ingestion_dir: Option, + #[allow(unused)] + database: TempDb, +} + +// TODO: this only starts indexer writer and reader (jsonrpc server) today. +// Consider adding graphql server here as well. +pub(crate) async fn setup_indexer_backed_rpc( + fullnode_rpc_url: String, + temp_data_ingestion_dir: Option, + data_ingestion_path: PathBuf, +) -> IndexerHandle { + let mut cancellation_tokens = vec![]; + let database = TempDb::new().unwrap(); + let pg_address = database.database().url().as_str().to_owned(); + let indexer_jsonrpc_address = format!("127.0.0.1:{}", get_available_port("127.0.0.1")); + + // Start indexer writer + let (_, _, writer_token) = start_indexer_writer_for_testing( + pg_address.clone(), + None, + None, + Some(data_ingestion_path.clone()), + None, + ) + .await; + cancellation_tokens.push(writer_token.drop_guard()); + + // Start indexer jsonrpc service + let (_, reader_token) = start_indexer_jsonrpc_for_testing( + pg_address.clone(), + fullnode_rpc_url, + indexer_jsonrpc_address.clone(), + None, + ) + .await; + cancellation_tokens.push(reader_token.drop_guard()); + + let rpc_address = format!("http://{}", indexer_jsonrpc_address); + + let rpc_client = HttpClientBuilder::default().build(&rpc_address).unwrap(); + + // Wait for the rpc client to be ready + while rpc_client.get_chain_identifier().await.is_err() { + sleep(Duration::from_millis(100)).await; + } + + let sui_client = SuiClientBuilder::default() + .build(&rpc_address) + .await + .unwrap(); + + IndexerHandle { + rpc_client, + sui_client, + rpc_url: rpc_address.clone(), + database, + data_ingestion_dir: temp_data_ingestion_dir, + cancellation_tokens, + } +} diff --git a/crates/test-cluster/src/lib.rs b/crates/test-cluster/src/lib.rs index 05643d8716b01..5cfaf7d2b2ad8 100644 --- a/crates/test-cluster/src/lib.rs +++ b/crates/test-cluster/src/lib.rs @@ -61,6 +61,8 @@ use tokio::time::{timeout, Instant}; use tokio::{task::JoinHandle, time::sleep}; use tracing::{error, info}; +mod test_indexer_handle; + const NUM_VALIDATOR: usize = 4; pub struct FullNodeHandle { @@ -90,23 +92,33 @@ pub struct TestCluster { pub swarm: Swarm, pub wallet: WalletContext, pub fullnode_handle: FullNodeHandle, + indexer_handle: Option, } impl TestCluster { pub fn rpc_client(&self) -> &HttpClient { - &self.fullnode_handle.rpc_client + self.indexer_handle + .as_ref() + .map(|h| &h.rpc_client) + .unwrap_or(&self.fullnode_handle.rpc_client) } pub fn sui_client(&self) -> &SuiClient { - &self.fullnode_handle.sui_client + self.indexer_handle + .as_ref() + .map(|h| &h.sui_client) + .unwrap_or(&self.fullnode_handle.sui_client) } - pub fn quorum_driver_api(&self) -> &QuorumDriverApi { - self.sui_client().quorum_driver_api() + pub fn rpc_url(&self) -> &str { + self.indexer_handle + .as_ref() + .map(|h| h.rpc_url.as_str()) + .unwrap_or(&self.fullnode_handle.rpc_url) } - pub fn rpc_url(&self) -> &str { - &self.fullnode_handle.rpc_url + pub fn quorum_driver_api(&self) -> &QuorumDriverApi { + self.sui_client().quorum_driver_api() } pub fn wallet(&mut self) -> &WalletContext { @@ -829,6 +841,8 @@ pub struct TestClusterBuilder { max_submit_position: Option, submit_delay_step_override_millis: Option, validator_state_accumulator_v2_enabled_config: StateAccumulatorV2EnabledConfig, + + indexer_backed_rpc: bool, } impl TestClusterBuilder { @@ -859,6 +873,7 @@ impl TestClusterBuilder { validator_state_accumulator_v2_enabled_config: StateAccumulatorV2EnabledConfig::Global( true, ), + indexer_backed_rpc: false, } } @@ -1057,6 +1072,11 @@ impl TestClusterBuilder { self } + pub fn with_indexer_backed_rpc(mut self) -> Self { + self.indexer_backed_rpc = true; + self + } + pub async fn build(mut self) -> TestCluster { // All test clusters receive a continuous stream of random JWKs. // If we later use zklogin authenticated transactions in tests we will need to supply @@ -1087,20 +1107,50 @@ impl TestClusterBuilder { })); } + let mut temp_data_ingestion_dir = None; + let mut data_ingestion_path = None; + + if self.indexer_backed_rpc { + if self.data_ingestion_dir.is_none() { + temp_data_ingestion_dir = Some(tempfile::tempdir().unwrap()); + self.data_ingestion_dir = Some( + temp_data_ingestion_dir + .as_ref() + .unwrap() + .path() + .to_path_buf(), + ); + assert!(self.data_ingestion_dir.is_some()); + } + assert!(self.data_ingestion_dir.is_some()); + data_ingestion_path = Some(self.data_ingestion_dir.as_ref().unwrap().to_path_buf()); + } + let swarm = self.start_swarm().await.unwrap(); let working_dir = swarm.dir(); - let mut wallet_conf: SuiClientConfig = - PersistedConfig::read(&working_dir.join(SUI_CLIENT_CONFIG)).unwrap(); - let fullnode = swarm.fullnodes().next().unwrap(); let json_rpc_address = fullnode.config().json_rpc_address; let fullnode_handle = FullNodeHandle::new(fullnode.get_node_handle().unwrap(), json_rpc_address).await; + let (rpc_url, indexer_handle) = if self.indexer_backed_rpc { + let handle = test_indexer_handle::IndexerHandle::new( + fullnode_handle.rpc_url.clone(), + temp_data_ingestion_dir, + data_ingestion_path.unwrap(), + ) + .await; + (handle.rpc_url.clone(), Some(handle)) + } else { + (fullnode_handle.rpc_url.clone(), None) + }; + + let mut wallet_conf: SuiClientConfig = + PersistedConfig::read(&working_dir.join(SUI_CLIENT_CONFIG)).unwrap(); wallet_conf.envs.push(SuiEnv { alias: "localnet".to_string(), - rpc: fullnode_handle.rpc_url.clone(), + rpc: rpc_url, ws: None, basic_auth: None, }); @@ -1118,6 +1168,7 @@ impl TestClusterBuilder { swarm, wallet, fullnode_handle, + indexer_handle, } } diff --git a/crates/test-cluster/src/test_indexer_handle.rs b/crates/test-cluster/src/test_indexer_handle.rs new file mode 100644 index 0000000000000..ec399de40f0d3 --- /dev/null +++ b/crates/test-cluster/src/test_indexer_handle.rs @@ -0,0 +1,88 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use jsonrpsee::http_client::{HttpClient, HttpClientBuilder}; +use std::path::PathBuf; +use std::time::Duration; +use sui_config::local_ip_utils::new_local_tcp_socket_for_testing_string; +use sui_indexer::tempdb::TempDb; +use sui_indexer::test_utils::{ + start_indexer_jsonrpc_for_testing, start_indexer_writer_for_testing, +}; +use sui_json_rpc_api::ReadApiClient; +use sui_sdk::{SuiClient, SuiClientBuilder}; +use tempfile::TempDir; +use tokio::time::sleep; + +pub(crate) struct IndexerHandle { + pub(crate) rpc_client: HttpClient, + pub(crate) sui_client: SuiClient, + pub(crate) rpc_url: String, + #[allow(unused)] + cancellation_tokens: Vec, + #[allow(unused)] + data_ingestion_dir: Option, + #[allow(unused)] + database: TempDb, +} + +impl IndexerHandle { + // TODO: this only starts indexer writer and reader (jsonrpc server) today. + // Consider adding graphql server here as well. + pub async fn new( + fullnode_rpc_url: String, + data_ingestion_dir: Option, + data_ingestion_path: PathBuf, + ) -> IndexerHandle { + let mut cancellation_tokens = vec![]; + let database = TempDb::new().unwrap(); + let pg_address = database.database().url().as_str().to_owned(); + let indexer_jsonrpc_address = new_local_tcp_socket_for_testing_string(); + + // Start indexer writer + let (_, _, writer_token) = start_indexer_writer_for_testing( + pg_address.clone(), + None, + None, + Some(data_ingestion_path.clone()), + None, + None, + None, + ) + .await; + cancellation_tokens.push(writer_token.drop_guard()); + + // Start indexer jsonrpc service + let (_, reader_token) = start_indexer_jsonrpc_for_testing( + pg_address.clone(), + fullnode_rpc_url, + indexer_jsonrpc_address.clone(), + None, + ) + .await; + cancellation_tokens.push(reader_token.drop_guard()); + + let rpc_address = format!("http://{}", indexer_jsonrpc_address); + + let rpc_client = HttpClientBuilder::default().build(&rpc_address).unwrap(); + + // Wait for the rpc client to be ready + while rpc_client.get_chain_identifier().await.is_err() { + sleep(Duration::from_millis(100)).await; + } + + let sui_client = SuiClientBuilder::default() + .build(&rpc_address) + .await + .unwrap(); + + IndexerHandle { + rpc_client, + sui_client, + rpc_url: rpc_address.clone(), + database, + data_ingestion_dir, + cancellation_tokens, + } + } +} diff --git a/crates/x/src/lint.rs b/crates/x/src/lint.rs index 55116070aa5b4..d84c9c3ca581f 100644 --- a/crates/x/src/lint.rs +++ b/crates/x/src/lint.rs @@ -143,7 +143,8 @@ pub fn handle_lint_results_exclude_external_crate_checks( |source: &LintSource, path: &Utf8Path| -> bool { (path.starts_with(EXTERNAL_CRATE_DIR) || path.starts_with(CREATE_DAPP_TEMPLATE_DIR) - || path.to_string().contains("/generated/")) + || path.to_string().contains("/generated/") + || path.to_string().contains("/proto/")) && source.name() == "license-header" }, // ignore check to skip buck related code paths, meta (fb) derived starlark, etc. diff --git a/dapps/multisig-toolkit/package.json b/dapps/multisig-toolkit/package.json index 33363e105e1f3..2bd735f3411c1 100644 --- a/dapps/multisig-toolkit/package.json +++ b/dapps/multisig-toolkit/package.json @@ -21,6 +21,7 @@ "@hookform/resolvers": "^3.9.0", "@mysten/dapp-kit": "workspace:*", "@mysten/sui": "workspace:*", + "@noble/hashes": "^1.4.0", "@radix-ui/react-dialog": "^1.1.1", "@radix-ui/react-label": "^2.1.0", "@radix-ui/react-navigation-menu": "^1.2.0", @@ -43,6 +44,7 @@ "devDependencies": { "@tailwindcss/forms": "^0.5.7", "@tsconfig/docusaurus": "^2.0.3", + "@types/node": "^20.14.10", "@types/react": "^18.3.3", "@types/react-dom": "^18.3.0", "@vitejs/plugin-react": "^4.3.1", diff --git a/dapps/multisig-toolkit/src/routes/offline-signer.tsx b/dapps/multisig-toolkit/src/routes/offline-signer.tsx index 9136e630ab4f7..5430b1b17c297 100644 --- a/dapps/multisig-toolkit/src/routes/offline-signer.tsx +++ b/dapps/multisig-toolkit/src/routes/offline-signer.tsx @@ -3,10 +3,13 @@ import { useCurrentAccount, useSignTransaction, useSuiClientContext } from '@mysten/dapp-kit'; import { getFullnodeUrl, SuiClient } from '@mysten/sui/client'; +import { messageWithIntent } from '@mysten/sui/cryptography'; import { Transaction } from '@mysten/sui/transactions'; +import { fromBase64, toHex } from '@mysten/sui/utils'; +import { blake2b } from '@noble/hashes/blake2b'; import { useMutation } from '@tanstack/react-query'; import { AlertCircle, Terminal } from 'lucide-react'; -import { useEffect, useState } from 'react'; +import { useEffect, useMemo, useState } from 'react'; import { ConnectWallet } from '@/components/connect'; import { DryRunProvider, type Network } from '@/components/preview-effects/DryRunContext'; @@ -70,6 +73,21 @@ export default function OfflineSigner() { }, }); + // Step 3: compute the blake2b hash + const ledgerTransactionHash = useMemo(() => { + if (!bytes) return null; + try { + // Decode the base64-encoded transaction bytes + const decodedBytes = fromBase64(bytes); + const intentMessage = messageWithIntent('TransactionData', decodedBytes); + const intentMessageDigest = blake2b(intentMessage, { dkLen: 32 }); + const intentMessageDigestHex = toHex(intentMessageDigest); + return `0x${intentMessageDigestHex}`; + } catch (error) { + return 'Error computing hash'; + } + }, [bytes]); + return (

@@ -145,6 +163,15 @@ export default function OfflineSigner() { )} + + {ledgerTransactionHash && ( +
+

Ledger Transaction Hash

+
+ {ledgerTransactionHash} +
+
+ )}

diff --git a/docker/sui-graphql-rpc-staging/build.sh b/docker/sui-graphql-rpc-staging/build.sh new file mode 100755 index 0000000000000..84038b6538c15 --- /dev/null +++ b/docker/sui-graphql-rpc-staging/build.sh @@ -0,0 +1,36 @@ +#!/bin/sh +# Copyright (c) Mysten Labs, Inc. +# SPDX-License-Identifier: Apache-2.0 + +# fast fail. +set -e + +DIR="$( cd "$( dirname "$0" )" && pwd )" +REPO_ROOT="$(git rev-parse --show-toplevel)" +DOCKERFILE="$DIR/Dockerfile" +GIT_REVISION="$(git describe --always --abbrev=12 --dirty --exclude '*')" +BUILD_DATE="$(date -u +'%Y-%m-%d')" + +# option to build using debug symbols +if [ "$1" = "--debug-symbols" ]; then + PROFILE="bench" + echo "Building with full debug info enabled ... WARNING: binary size might significantly increase" + shift +else + PROFILE="release" +fi + +echo +echo "Building sui-graphql-rpc docker image" +echo "Dockerfile: \t$DOCKERFILE" +echo "docker context: $REPO_ROOT" +echo "build date: \t$BUILD_DATE" +echo "git revision: \t$GIT_REVISION" +echo + +docker build -f "$DOCKERFILE" "$REPO_ROOT" \ + --build-arg GIT_REVISION="$GIT_REVISION" \ + --build-arg BUILD_DATE="$BUILD_DATE" \ + --build-arg PROFILE="$PROFILE" \ + --features staging \ + "$@" diff --git a/docker/sui-indexer-alt/Dockerfile b/docker/sui-indexer-alt/Dockerfile new file mode 100644 index 0000000000000..088295fa97bc5 --- /dev/null +++ b/docker/sui-indexer-alt/Dockerfile @@ -0,0 +1,37 @@ +# Build application +# +# Copy in all crates, Cargo.toml and Cargo.lock unmodified, +# and build the application. +FROM rust:1.81-bullseye AS builder +ARG PROFILE=release +ARG GIT_REVISION +ENV GIT_REVISION=$GIT_REVISION +WORKDIR "$WORKDIR/sui" + +# sui-indexer need ca-certificates +RUN apt update && apt install -y ca-certificates postgresql + +RUN apt-get update && apt-get install -y cmake clang + +COPY Cargo.toml Cargo.lock ./ +COPY consensus consensus +COPY crates crates +COPY sui-execution sui-execution +COPY narwhal narwhal +COPY external-crates external-crates + +RUN cargo build --profile ${PROFILE} --bin sui-indexer-alt + +# Production Image +FROM debian:bullseye-slim AS runtime +# Use jemalloc as memory allocator +RUN apt-get update && apt-get install -y libjemalloc-dev ca-certificates curl +ENV LD_PRELOAD /usr/lib/x86_64-linux-gnu/libjemalloc.so +WORKDIR "$WORKDIR/sui" +COPY --from=builder /sui/target/release/sui-indexer-alt /usr/local/bin +RUN apt update && apt install -y ca-certificates postgresql + +ARG BUILD_DATE +ARG GIT_REVISION +LABEL build-date=$BUILD_DATE +LABEL git-revision=$GIT_REVISION diff --git a/docker/sui-indexer-alt/build.sh b/docker/sui-indexer-alt/build.sh new file mode 100644 index 0000000000000..f11ac4fa88ef7 --- /dev/null +++ b/docker/sui-indexer-alt/build.sh @@ -0,0 +1,25 @@ +#!/bin/sh +# Copyright (c) Mysten Labs, Inc. +# SPDX-License-Identifier: Apache-2.0 + +# fast fail. +set -e + +DIR="$( cd "$( dirname "$0" )" && pwd )" +REPO_ROOT="$(git rev-parse --show-toplevel)" +DOCKERFILE="$DIR/Dockerfile" +GIT_REVISION="$(git describe --always --abbrev=12 --dirty --exclude '*')" +BUILD_DATE="$(date -u +'%Y-%m-%d')" + +echo +echo "Building sui-indexer-alt docker image" +echo "Dockerfile: \t$DOCKERFILE" +echo "docker context: $REPO_ROOT" +echo "build date: \t$BUILD_DATE" +echo "git revision: \t$GIT_REVISION" +echo + +docker build -f "$DOCKERFILE" "$REPO_ROOT" \ + --build-arg GIT_REVISION="$GIT_REVISION" \ + --build-arg BUILD_DATE="$BUILD_DATE" \ + "$@" \ No newline at end of file diff --git a/docker/sui-mvr-indexer/Dockerfile b/docker/sui-mvr-indexer/Dockerfile new file mode 100644 index 0000000000000..bf0f2be29e2e7 --- /dev/null +++ b/docker/sui-mvr-indexer/Dockerfile @@ -0,0 +1,37 @@ +# Build application +# +# Copy in all crates, Cargo.toml and Cargo.lock unmodified, +# and build the application. +FROM rust:1.81-bullseye AS builder +ARG PROFILE=release +ARG GIT_REVISION +ENV GIT_REVISION=$GIT_REVISION +WORKDIR "$WORKDIR/sui" + +# sui-mvr-indexer needs postgres libpq5 and ca-certificates +RUN apt update && apt install -y libpq5 ca-certificates libpq-dev postgresql + +RUN apt-get update && apt-get install -y cmake clang + +COPY Cargo.toml Cargo.lock ./ +COPY consensus consensus +COPY crates crates +COPY sui-execution sui-execution +COPY narwhal narwhal +COPY external-crates external-crates + +RUN cargo build --profile ${PROFILE} --bin sui-mvr-indexer + +# Production Image +FROM debian:bullseye-slim AS runtime +# Use jemalloc as memory allocator +RUN apt-get update && apt-get install -y libjemalloc-dev ca-certificates curl +ENV LD_PRELOAD /usr/lib/x86_64-linux-gnu/libjemalloc.so +WORKDIR "$WORKDIR/sui" +COPY --from=builder /sui/target/release/sui-mvr-indexer /usr/local/bin +RUN apt update && apt install -y libpq5 ca-certificates libpq-dev postgresql + +ARG BUILD_DATE +ARG GIT_REVISION +LABEL build-date=$BUILD_DATE +LABEL git-revision=$GIT_REVISION diff --git a/docker/sui-mvr-indexer/build.sh b/docker/sui-mvr-indexer/build.sh new file mode 100755 index 0000000000000..5e1c8c1623fe7 --- /dev/null +++ b/docker/sui-mvr-indexer/build.sh @@ -0,0 +1,25 @@ +#!/bin/sh +# Copyright (c) Mysten Labs, Inc. +# SPDX-License-Identifier: Apache-2.0 + +# fast fail. +set -e + +DIR="$( cd "$( dirname "$0" )" && pwd )" +REPO_ROOT="$(git rev-parse --show-toplevel)" +DOCKERFILE="$DIR/Dockerfile" +GIT_REVISION="$(git describe --always --abbrev=12 --dirty --exclude '*')" +BUILD_DATE="$(date -u +'%Y-%m-%d')" + +echo +echo "Building sui-mvr-indexer docker image" +echo "Dockerfile: \t$DOCKERFILE" +echo "docker context: $REPO_ROOT" +echo "build date: \t$BUILD_DATE" +echo "git revision: \t$GIT_REVISION" +echo + +docker build -f "$DOCKERFILE" "$REPO_ROOT" \ + --build-arg GIT_REVISION="$GIT_REVISION" \ + --build-arg BUILD_DATE="$BUILD_DATE" \ + "$@" diff --git a/docs/content/concepts/sui-bridge.mdx b/docs/content/concepts/sui-bridge.mdx new file mode 100644 index 0000000000000..508f716c15dc8 --- /dev/null +++ b/docs/content/concepts/sui-bridge.mdx @@ -0,0 +1,87 @@ +--- +title: Sui Bridge +draft: true +--- + +[Sui Bridge](https://bridge.sui.io/) is the native bridge for the Sui network. Unlike third-party solutions, a native bridge is typically built into the core architecture of a blockchain, ensuring optimal integration and operation. The bridge allows users to move digital assets from one chain to another, preserving security and maintaining interoperability between diverse ecosystems. Sui Bridge is the native bridge on Sui, designed to enhance interoperability between Sui and Ethereum. Sui Bridge enables users to securely and efficiently transfer native and wrapped ETH to and from Sui. The bridge leverages the unique capabilities of the Sui network to offer fast transaction speeds, lower transaction costs, and a decentralized architecture. + +You can bridge tokens in the official Sui Bridge website: https://bridge.sui.io/. + +## Operation and governance + +Sui Bridge is operated and governed by Sui network validators, the same set of validators that secure the Sui network. Bridge transfers and other actions require validator signatures with a minimal threshold of voting power. + +Similar to the Sui network, all governance related to the Sui Bridge is done via validator voting. + +## Architecture + +WIP + +## Supported chains and tokens + +Sui Bridge supports token bridging between Sui and Ethereum Mainnet with the following supported assets: + +| Asset | Description | +| --- | --- | +| Ethereum (ETH) | The native cryptocurrency of the Ethereum network, widely used for transactions and smart contract interactions. | +| Wrapped Ethereum (WETH) | Tokenized representation of native ETH. | + +You can transfer these assets both to and from the Sui network, utilizing the bridge’s capabilities to provide fast, secure, and efficient cross-chain transactions. + +## Package IDs and contract addresses + +The following package IDs and addresses are reserved for the Sui Bridge. + +| Asset | Address/ID | +| --- | --- | +| Sui Bridge Package on Sui | [`0xb`](https://suiscan.xyz/mainnet/object/0x000000000000000000000000000000000000000000000000000000000000000b/txs) | +| Sui Bridge Object on Sui | [`0x9`](https://suiscan.xyz/mainnet/object/0x0000000000000000000000000000000000000000000000000000000000000009) | +| Sui Bridge Proxy on Etheruem Mainnet | [`0xda3bD1fE1973470312db04551B65f401Bc8a92fD`](https://etherscan.io/address/0xda3bd1fe1973470312db04551b65f401bc8a92fd) | +| ETH on Sui | [`0xd0e89b2af5e4910726fbcd8b8dd37bb79b29e5f83f7491bca830e94f7f226d29::eth::ETH`](https://suiscan.xyz/mainnet/coin/0xd0e89b2af5e4910726fbcd8b8dd37bb79b29e5f83f7491bca830e94f7f226d29::eth::ETH/txs) | +| ETH on Ethereum | Native Ether | +| WETH on Ethereum | [`0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2`](https://etherscan.io/address/0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2) | + +## Source code + +The source code for Sui Bridge is open-source and found in the following locations: + +- Move: https://github.com/MystenLabs/sui/tree/main/crates/sui-framework/packages/bridge +- Solidity: https://github.com/MystenLabs/sui/tree/main/bridge/evm +- Bridged ETH (Move): https://github.com/MystenLabs/sui/tree/main/bridge/move/tokens/eth +- Bridge Node: https://github.com/MystenLabs/sui/tree/main/crates/sui-bridge +- Bridge Indexer: https://github.com/MystenLabs/sui/tree/main/crates/sui-bridge-indexer + +## Audits + +There are two audit reports available for Sui Bridge: +- [OtterSec](https://github.com/sui-foundation/security-audits/blob/main/docs/Sui_bridge_v1_OtterSec.pdf) +- [Zellic](https://github.com/sui-foundation/security-audits/blob/main/docs/Sui_Bridge_v1_Zellic.pdf) + +## Global limiter {#global-limiter} + +A limiter protects the user's funds by constraining the total value of assets leaving Sui Bridge in any 24-hour window. It tracks total value hourly and aggregates values from the previous 24 hours. Therefore, when the limiter cools down, it refreshes every hour. + +The limit applies globally and varies per direction. For example, the amount might be different between Ethereum to Sui and Sui to Ethereum. + +The limit also impacts the maximal amount of single transfer. Simply put, in one bridge transfer, you cannot move assets worth more than the limit. The bridge frontend might apply stricter restrictions to protect user assets. + +The limit per route is governed by the validator committee through voting. + +The global limit is currently $8 million from Ethereum to Sui and $5 million from Sui to Etheruem every 24 hours. + +## Asset price + +Sui Bridge v1 uses static pricing to calculate limits. The price for ETH is configured at $2,600.00. Namely, bridging one ETH consumes $2,600 USD in limit calculation. + +The validator committee governs the pricing through voting. It works together with the global limiter to protect user funds. + +## Transfer limit + +There is no minimal limit for transfer but a tiny fraction might be rounded down. Particularly for native Ethereum (ETH) and wrapped Ethereum (WETH) because of reduced precision of eight decimal places, the value of 10.0000000000000001 (W)ETH is rounded down to 10 (W)ETH. + +| Token | Minimal value | +| --- | --- | +| ETH | 0.00000001 ETH (eight decimal places of precision) | +| WETH | 0.00000001 ETH (eight decimal places of precision) | + +The maximum limit per transfer is the global limit in USD value. Namely a user cannot claim assets on the destination chain if the USD value is higher than the global limit. See the [Global limiter section](#global-limiter) for details. \ No newline at end of file diff --git a/docs/content/guides/developer/getting-started/local-network.mdx b/docs/content/guides/developer/getting-started/local-network.mdx index afbf5f6cacaf5..1b2beadf903db 100644 --- a/docs/content/guides/developer/getting-started/local-network.mdx +++ b/docs/content/guides/developer/getting-started/local-network.mdx @@ -21,7 +21,7 @@ This command: * Instructs Rust to set specific logging through the `RUST_LOG`=`off,sui_node=info` flags, which turns off logging for all components except `sui-node`. If you want to see more detailed logs, you can remove `RUST_LOG` from the command. :::info -Each time you start the network by passing `--force-regenesis`, the local network starts from a random genesis with no previous data, and the local network is not persisted. If you'd like to persist data, skip passing the `--force-regenesis` flag. For more details, see the [Persist local network state](#persist-local-network) section. +Each time you start the network by passing `--force-regenesis`, the local network starts from a random genesis with no previous data, and the local network is not persisted. If you'd like to persist data, skip passing the `--force-regenesis` flag. For more details, see the [Persist local network state](#persist-local-network-state) section. ::: To customize your local Sui network, such as starting other services or changing default ports and hosts, include additional flags or options in the `sui start` command. diff --git a/docs/content/references/cli.mdx b/docs/content/references/cli.mdx index 48fe196107026..e55cbeb6abc75 100644 --- a/docs/content/references/cli.mdx +++ b/docs/content/references/cli.mdx @@ -12,12 +12,12 @@ Sui provides a command line interface (CLI) tool to interact with the Sui networ To get the latest version of the CLI, you can run the following command from a terminal or console. Be sure to replace `` with `main`, `devnet`, `testnet`, or `mainnet` to get the desired version. For more information on the branches available, see [Sui Environment Setup](./contribute/sui-environment.mdx). ```shell -cargo install --locked --git https://github.com/MystenLabs/sui.git --branch --features gas-profiler sui +cargo install --locked --git https://github.com/MystenLabs/sui.git --branch --features tracing sui ``` :::info -The `--features gas-profiler` flag is necessary only if you want to run gas profiles for transactions. +The `--features tracing` flag is necessary only if you want to run gas profiles for transactions. ::: diff --git a/docs/content/references/cli/client.mdx b/docs/content/references/cli/client.mdx index 8bedd5c602943..3057f535e8bf2 100644 --- a/docs/content/references/cli/client.mdx +++ b/docs/content/references/cli/client.mdx @@ -473,10 +473,10 @@ and produce a gas profile. Similar to the `replay` command, this command fetches Full node specified in the client environment that are needed to execute the transaction. During the local execution of the transaction, this command records all the Move function invocations and the gas cost breakdown for each invocation. -To enable the profiler, you must either install or build the Sui Client binary locally with the `--features gas-profiler` flag. +To enable the profiler, you must either install or build the Sui Client binary locally with the `--features tracing` flag. ```shell -cargo install --locked --git https://github.com/MystenLabs/sui.git --branch --features gas-profiler sui +cargo install --locked --git https://github.com/MystenLabs/sui.git --branch --features tracing sui ``` The command outputs a profile to the current working directory in the format `gas_profile_{tx_digest}_{unix_timestamp}.json`. diff --git a/docs/content/sidebars/concepts.js b/docs/content/sidebars/concepts.js index 490d4978e25cb..ffe5f546f0e9c 100644 --- a/docs/content/sidebars/concepts.js +++ b/docs/content/sidebars/concepts.js @@ -178,6 +178,7 @@ const concepts = [ 'concepts/tokenomics/gas-in-sui', ], }, + 'concepts/sui-bridge', 'concepts/research-papers', ]; module.exports = concepts; diff --git a/docs/content/standards/deepbook.mdx b/docs/content/standards/deepbook.mdx index e2ae5555ce796..e58df3b5e0100 100644 --- a/docs/content/standards/deepbook.mdx +++ b/docs/content/standards/deepbook.mdx @@ -9,9 +9,7 @@ DeepBook is a next-generation decentralized central limit order book (CLOB) buil ## Versioning -The latest development iteration of DeepBook is version 3 (DeepBookV3). [DeebBookV3](./deepbookv3.mdx) is currently available in both Devnet and Testnet. [DeepBookV2](./deepbookv2.mdx) is the current Mainnet iteration of DeepBook. - -DeepBookV3 is scheduled to replace DeepBookV2 on Mainnet in the second half of 2024. +The latest development iteration of DeepBook is version 3 (DeepBookV3). [DeepBookV3](./deepbookv3.mdx) is currently available across all Sui networks. [DeepBookV2](./deepbookv2.mdx) is currently still available to provide time for legacy contracts to migrate to DeepBookV3. @@ -19,10 +17,8 @@ DeepBookV3 is scheduled to replace DeepBookV2 on Mainnet in the second half of 2 DeepBook is open for community development. You can use the [Sui Improvement Proposals](https://github.com/sui-foundation/sips?ref=blog.sui.io) (SIPs) process to suggest changes to make DeepBook better. -- [DeepBookV2 packages](https://github.com/MystenLabs/sui/tree/main/crates/sui-framework/packages/deepbook) (part of Sui framework) -- [DeepBookV3 repository on GitHub](https://github.com/MystenLabs/deepbookv3) - - ## Related links +- [DeepBookV3 repository on GitHub](https://github.com/MystenLabs/deepbookv3) - DeepBook framework docs: Autogenerated framework documentation for DeepBookV2. + diff --git a/docs/content/standards/deepbookv3/query-the-pool.mdx b/docs/content/standards/deepbookv3/query-the-pool.mdx index 24ad2495c0591..94a831c9a9483 100644 --- a/docs/content/standards/deepbookv3/query-the-pool.mdx +++ b/docs/content/standards/deepbookv3/query-the-pool.mdx @@ -6,7 +6,7 @@ title: Query the Pool The `Pool` shared object represents a market, such as a SUI/USDC market. That `Pool` is the only one representing that unique pairing (SUI/USDC) and the pairing is the only member of that particular `Pool`. See [DeepBook Design](./design.mdx#pool) to learn more about the structure of pools. -To perform trades, you pass a `BalanceManager` and `TradeProof` into the relvant `Pool`. Unlike `Pool`s, `BalanceManager` shared objects can contain any type of token, such that the same `BalanceManager` can access multiple `Pool`s to interact with many different trade pairings. See [BalanceManager](./balance-manager.mdx) to learn more. +To perform trades, you pass a `BalanceManager` and `TradeProof` into the relevant `Pool`. Unlike `Pool`s, `BalanceManager` shared objects can contain any type of token, such that the same `BalanceManager` can access multiple `Pool`s to interact with many different trade pairings. See [BalanceManager](./balance-manager.mdx) to learn more. ## API @@ -32,7 +32,7 @@ public fun get_quote_quantity_out( ): (u64, u64, u64) ``` -### Check quote quantity against quote +### Check base quantity against quote Dry run to determine the base quantity out for a given quote quantity. @@ -195,4 +195,4 @@ Returns the `OrderDeepPrice` struct for the pool, which determines the conversio public fun get_order_deep_price( self: &Pool, ): OrderDeepPrice -``` \ No newline at end of file +``` diff --git a/docs/site/src/components/API/api-ref/compnav.js b/docs/site/src/components/API/api-ref/compnav.js new file mode 100644 index 0000000000000..03009d56a5984 --- /dev/null +++ b/docs/site/src/components/API/api-ref/compnav.js @@ -0,0 +1,29 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +import React from "react"; +import Link from "@docusaurus/Link"; + +const CompNav = (props) => { + const { json, apis } = props; + + return ( +
+
+

Component schemas

+ {Object.keys(json["components"]["schemas"]).map((component) => { + return ( +
+ + {component} + +
+ )})} +
+
+ ); +}; + +export default CompNav; diff --git a/docs/site/src/components/API/api-ref/components.js b/docs/site/src/components/API/api-ref/components.js new file mode 100644 index 0000000000000..c95234a23d486 --- /dev/null +++ b/docs/site/src/components/API/api-ref/components.js @@ -0,0 +1,326 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +import React, { useRef } from "react"; +import Link from "@docusaurus/Link"; +import Markdown from "markdown-to-jsx"; +import { Light as SyntaxHighlighter } from "react-syntax-highlighter"; +import js from "react-syntax-highlighter/dist/esm/languages/hljs/json"; +import docco from "react-syntax-highlighter/dist/esm/styles/hljs/docco"; +import dark from "react-syntax-highlighter/dist/esm/styles/hljs/dracula"; +import ScrollSpy from "react-ui-scrollspy"; + +SyntaxHighlighter.registerLanguage("json", js); + +const pillStyle = + "p-2 border border-solid border-sui-blue-dark rounded-lg max-w-max bg-sui-ghost-white dark:bg-sui-gray-90"; + +const RefLink = (props) => { + const { refer } = props; + const link = refer.substring(refer.lastIndexOf("/") + 1); + return {link}; +}; + +const Of = (props) => { + const { of, type } = props; + return ( + <> + {of.map((o) => { + if (o["$ref"]) { + return ( +
+

+ +

+ {o.description && ( +

+ {o.description} +

+ )} +
+ ); + } else if (o.type && o.type === "object") { + return ( +
+

+ Object +

+ {o.description && ( +

+ {o.description} +

+ )} + {o.properties && ( + + )} +
+ ); + } else if (o.type && o.type === "string") { + return ( +
+

+ String{" "} + {o.enum && o.enum.length > 0 && ( + + enum: [ {o.enum.map((e) => `"${e}"`).join(" | ")} ] + + )} +

+ {o.description && ( +

+ {o.description} +

+ )} +
+ ); + } else if (o.type && o.type === "integer") { + return ( +
+

+ {o.type[0].toUpperCase()} + {o.type.substring(1)}<{o.format}> Minimum: {o.minimum} +

+ {o.description && {o.description}} +
+ ); + } else if (o.type && o.type === "boolean") { + return ( +
+

+ Boolean +

+ {o.description && {o.description}} +
+ ); + } else if (o.type && o.type === "array") { + return ( +
+

+ [ + {o.items && + Object.keys(o.items).map((k) => { + if (k === "$ref") { + return ; + } + })} + ] +

+ {o.description && ( +

+ {o.description} +

+ )} +
+ ); + } else if (o.anyOf) { + return ; + } else if (o.type) { + return

{o.type}

; + } + })} + + ); +}; + +const AllOf = (props) => { + const { allof } = props; + return ( +
+ +
+ ); +}; + +const AnyOf = (props) => { + const { anyof } = props; + return ( +
+

+ Any of +

+
+ +
+
+ ); +}; + +const AnyOfInline = (props) => { + const { anyof, pill } = props; + return ( +
+ {anyof.map((a, i) => { + if (a["$ref"]) { + return ( + <> + + {i % 2 === 0 ? " | " : ""} + + ); + } + if (a.type) { + return ( + <> + {a.type} + {i % 2 === 0 ? " | " : ""} + + ); + } + })} +
+ ); +}; + +const OneOf = (props) => { + const { oneof } = props; + return ( +
+

+ One of +

+
+ +
+
+ ); +}; + +const PropertiesTable = (props) => { + const { properties, schema } = props; + if (!properties) { + return; + } + return ( +
+ + + + + + + + + + {properties.map(([k, v]) => ( + <> + + + + + + + {v.type === "object" ? ( + + + + + ) : ( + "" + )} + + ))} + +
PropertyTypeReq?Description
{k} + {Array.isArray(v.type) ? v.type.join(" | ") : v.type} + {v.enum && + ` enum [ ${v.enum.map((e) => `"${e}"`).join(" | ")} ]`} + {v["$ref"] && } + {v.anyOf && } + {v.allOf && } + {v.oneOf && "ONEOFCELL"} + {v === true && "true"} + + {schema.required && schema.required.includes(k) ? "Yes" : "No"} + {v.description && v.description}
+ {v.additionalProperties && "Additional properties"} + + {v.additionalProperties && v.additionalProperties["$ref"] && ( + + )} + {!v.additionalProperties && v.properties && ( + + )} + {v.additionalProperties && + v.additionalProperties.type && + v.additionalProperties.type} + {v.additionalProperties && v.additionalProperties.anyOf && ( + + )} + {v.additionalProperties && + v.additionalProperties === true && + "true"} +
+ ); +}; + +const Components = (props) => { + const { schemas } = props; + const names = Object.keys(schemas); + const parentScrollContainerRef = () => { + (useRef < React.HTMLDivElement) | (null > null); + }; + return ( +

+

Component schemas

+ + {names && + names.map((name) => { + return ( +
+

{name}

+ + {schemas[name].description && ( +

+ {schemas[name].description} +

+ )} + {schemas[name].type && ( +

+ {schemas[name].type[0].toUpperCase()} + {schemas[name].type.substring(1)} + {schemas[name].enum && + ` enum [ ${schemas[name].enum.map((e) => `"${e}"`).join(" | ")} ]`} +

+ )} + + {schemas[name].properties && ( + + )} + {schemas[name].allOf && } + {schemas[name].oneOf && } + {schemas[name].anyOf && } + {schemas[name]["$ref"] && ( + + )} +
+ + Toggle raw JSON + +
+                    {`"${name}":  ${JSON.stringify(schemas[name], null, 2)}`}
+                  
+
+
+ ); + })} +
+
+ ); +}; + +export default Components; diff --git a/docs/site/src/components/API/api-ref/refnav.js b/docs/site/src/components/API/api-ref/refnav.js index d7398e6b35903..003903cf8023f 100644 --- a/docs/site/src/components/API/api-ref/refnav.js +++ b/docs/site/src/components/API/api-ref/refnav.js @@ -9,7 +9,7 @@ const RefNav = (props) => { const { json, apis } = props; return ( -
+
@@ -40,6 +40,7 @@ const RefNav = (props) => { ); })} +
); })} diff --git a/docs/site/src/components/API/api-ref/result.js b/docs/site/src/components/API/api-ref/result.js index 542536775dd5c..45eb1f021d34b 100644 --- a/docs/site/src/components/API/api-ref/result.js +++ b/docs/site/src/components/API/api-ref/result.js @@ -41,7 +41,6 @@ const Property = (props) => { const Result = (props) => { const { json, result } = props; - //console.log(result) const hasRef = typeof result.schema["$ref"] !== "undefined"; let refObj = {}; diff --git a/docs/site/src/components/API/index.js b/docs/site/src/components/API/index.js index ccc5fbd053362..f2fe2f1aa21b7 100644 --- a/docs/site/src/components/API/index.js +++ b/docs/site/src/components/API/index.js @@ -4,7 +4,9 @@ import React, { useState, useEffect } from "react"; import ExecutionEnvironment from "@docusaurus/ExecutionEnvironment"; import RefNav from "./api-ref/refnav"; +import CompNav from "./api-ref/compnav"; import Methods from "./api-ref/method"; +import Components from "./api-ref/components"; import ScrollSpy from "react-ui-scrollspy"; @@ -81,6 +83,7 @@ const Rpc = () => {
+
@@ -89,12 +92,15 @@ const Rpc = () => {

Sui JSON-RPC Reference - Version: {openrpc.info.version}

- +

{openrpc.info.description}

+ + +
-
+
diff --git a/docs/site/src/pages/index.js b/docs/site/src/pages/index.js index 35e4c66203ce0..081e10cfc486c 100644 --- a/docs/site/src/pages/index.js +++ b/docs/site/src/pages/index.js @@ -88,17 +88,6 @@ export default function Home() { Standards - - - Tokenomics - - - Cryptography - - - Standards - - Result<()> { + async fn process_checkpoint(&self, checkpoint: &CheckpointData) -> Result<()> { // custom processing logic println!("Processing Local checkpoint: {}", checkpoint.checkpoint_summary.to_string()); Ok(()) diff --git a/examples/custom-indexer/rust/remote_reader.rs b/examples/custom-indexer/rust/remote_reader.rs index ed91f1523a5de..65cd99f5c32ff 100644 --- a/examples/custom-indexer/rust/remote_reader.rs +++ b/examples/custom-indexer/rust/remote_reader.rs @@ -11,7 +11,7 @@ struct CustomWorker; #[async_trait] impl Worker for CustomWorker { type Result = (); - async fn process_checkpoint(&self, checkpoint: CheckpointData) -> Result<()> { + async fn process_checkpoint(&self, checkpoint: &CheckpointData) -> Result<()> { // custom processing logic // print out the checkpoint number println!("Processing checkpoint: {}", checkpoint.checkpoint_summary.to_string()); diff --git a/external-crates/move/Cargo.lock b/external-crates/move/Cargo.lock index cdb0abbe485ff..107c1c64e07b9 100644 --- a/external-crates/move/Cargo.lock +++ b/external-crates/move/Cargo.lock @@ -1804,7 +1804,6 @@ dependencies = [ "hex", "move-binary-format", "move-core-types", - "num-bigint", "once_cell", "proptest", "serde", diff --git a/external-crates/move/Cargo.toml b/external-crates/move/Cargo.toml index 3231118dec233..d7f9e31040a67 100644 --- a/external-crates/move/Cargo.toml +++ b/external-crates/move/Cargo.toml @@ -76,7 +76,6 @@ memory-stats = "1.0.0" mirai-annotations = "1.10.1" named-lock = "0.2.0" num = "0.4.0" -num-bigint = "0.4.0" num_cpus = "1.13.0" once_cell = "1.7.2" ouroboros = "0.17.2" diff --git a/external-crates/move/crates/move-analyzer/src/analysis/parsing_analysis.rs b/external-crates/move/crates/move-analyzer/src/analysis/parsing_analysis.rs index ff8c342ccf196..c8d68aeb97743 100644 --- a/external-crates/move/crates/move-analyzer/src/analysis/parsing_analysis.rs +++ b/external-crates/move/crates/move-analyzer/src/analysis/parsing_analysis.rs @@ -424,7 +424,11 @@ impl<'a> ParsingAnalysisContext<'a> { self.exp_symbols(e1); self.exp_symbols(e2); } - E::Abort(e) => self.exp_symbols(e), + E::Abort(oe) => { + if let Some(e) = oe.as_ref() { + self.exp_symbols(e) + } + } E::Return(_, oe) => { if let Some(e) = oe.as_ref() { self.exp_symbols(e) diff --git a/external-crates/move/crates/move-analyzer/src/analysis/typing_analysis.rs b/external-crates/move/crates/move-analyzer/src/analysis/typing_analysis.rs index e22321ddf20ac..1e0a02d11266d 100644 --- a/external-crates/move/crates/move-analyzer/src/analysis/typing_analysis.rs +++ b/external-crates/move/crates/move-analyzer/src/analysis/typing_analysis.rs @@ -11,7 +11,7 @@ use crate::{ }; use move_compiler::{ - diagnostics as diag, + diagnostics::warning_filters::WarningFilters, expansion::ast::{self as E, ModuleIdent}, naming::ast as N, parser::ast::{self as P, ConstantName}, @@ -661,7 +661,7 @@ impl TypingAnalysisContext<'_> { impl<'a> TypingVisitorContext for TypingAnalysisContext<'a> { // Nothing to do -- we're not producing errors. - fn add_warning_filter_scope(&mut self, _filter: diag::WarningFilters) {} + fn push_warning_filter_scope(&mut self, _filter: WarningFilters) {} // Nothing to do -- we're not producing errors. fn pop_warning_filter_scope(&mut self) {} @@ -907,7 +907,7 @@ impl<'a> TypingVisitorContext for TypingAnalysisContext<'a> { match &fdef.body.value { T::FunctionBody_::Defined(seq) => { - self.visit_seq(seq); + self.visit_seq(fdef.body.loc, seq); } T::FunctionBody_::Macro | T::FunctionBody_::Native => (), } @@ -985,7 +985,7 @@ impl<'a> TypingVisitorContext for TypingAnalysisContext<'a> { } } - fn visit_seq(&mut self, (use_funs, seq): &T::Sequence) { + fn visit_seq(&mut self, _loc: Loc, (use_funs, seq): &T::Sequence) { let old_traverse_mode = self.traverse_only; // start adding new use-defs etc. when processing arguments if use_funs.color == 0 { diff --git a/external-crates/move/crates/move-analyzer/src/analyzer.rs b/external-crates/move/crates/move-analyzer/src/analyzer.rs index bb8c067e97b6c..62f406e9215c5 100644 --- a/external-crates/move/crates/move-analyzer/src/analyzer.rs +++ b/external-crates/move/crates/move-analyzer/src/analyzer.rs @@ -45,9 +45,10 @@ pub fn run() { let (connection, io_threads) = Connection::stdio(); let symbols_map = Arc::new(Mutex::new(BTreeMap::new())); - let pkg_deps = Arc::new(Mutex::new( - BTreeMap::::new(), - )); + let pkg_deps = Arc::new(Mutex::new(BTreeMap::< + PathBuf, + symbols::PrecomputedPkgDepsInfo, + >::new())); let ide_files_root: VfsPath = MemoryFS::new().into(); let (id, client_response) = connection @@ -147,7 +148,8 @@ pub fn run() { // main reason for this is to enable unit tests that rely on the symbolication information // to be available right after the client is initialized. if let Some(uri) = initialize_params.root_uri { - if let Some(p) = symbols::SymbolicatorRunner::root_dir(&uri.to_file_path().unwrap()) { + let build_path = uri.to_file_path().unwrap(); + if let Some(p) = symbols::SymbolicatorRunner::root_dir(&build_path) { if let Ok((Some(new_symbols), _)) = symbols::get_symbols( Arc::new(Mutex::new(BTreeMap::new())), ide_files_root.clone(), @@ -277,7 +279,7 @@ fn on_request( context: &Context, request: &Request, ide_files_root: VfsPath, - pkg_dependencies: Arc>>, + pkg_dependencies: Arc>>, shutdown_request_received: bool, ) -> bool { if shutdown_request_received { diff --git a/external-crates/move/crates/move-analyzer/src/completions/mod.rs b/external-crates/move/crates/move-analyzer/src/completions/mod.rs index 362fca386d22b..470c8e0c6923b 100644 --- a/external-crates/move/crates/move-analyzer/src/completions/mod.rs +++ b/external-crates/move/crates/move-analyzer/src/completions/mod.rs @@ -10,7 +10,7 @@ use crate::{ utils::{completion_item, PRIMITIVE_TYPE_COMPLETIONS}, }, context::Context, - symbols::{self, CursorContext, PrecompiledPkgDeps, SymbolicatorRunner, Symbols}, + symbols::{self, CursorContext, PrecomputedPkgDepsInfo, SymbolicatorRunner, Symbols}, }; use lsp_server::Request; use lsp_types::{CompletionItem, CompletionItemKind, CompletionParams, Position}; @@ -78,7 +78,7 @@ pub fn on_completion_request( context: &Context, request: &Request, ide_files_root: VfsPath, - pkg_dependencies: Arc>>, + pkg_dependencies: Arc>>, ) { eprintln!("handling completion request"); let parameters = serde_json::from_value::(request.params.clone()) @@ -119,7 +119,7 @@ pub fn on_completion_request( fn completions( context: &Context, ide_files_root: VfsPath, - pkg_dependencies: Arc>>, + pkg_dependencies: Arc>>, path: &Path, pos: Position, ) -> Option> { @@ -143,7 +143,7 @@ fn completions( pub fn compute_completions( current_symbols: &Symbols, ide_files_root: VfsPath, - pkg_dependencies: Arc>>, + pkg_dependencies: Arc>>, path: &Path, pos: Position, ) -> Vec { @@ -156,7 +156,7 @@ pub fn compute_completions( /// view of the code (returns `None` if the symbols could not be re-computed). fn compute_completions_new_symbols( ide_files_root: VfsPath, - pkg_dependencies: Arc>>, + pkg_dependencies: Arc>>, path: &Path, cursor_position: Position, ) -> Option> { diff --git a/external-crates/move/crates/move-analyzer/src/symbols.rs b/external-crates/move/crates/move-analyzer/src/symbols.rs index 4bec62fce555b..fd9fa72c781dd 100644 --- a/external-crates/move/crates/move-analyzer/src/symbols.rs +++ b/external-crates/move/crates/move-analyzer/src/symbols.rs @@ -78,6 +78,7 @@ use std::{ path::{Path, PathBuf}, sync::{Arc, Condvar, Mutex}, thread, + time::Instant, }; use tempfile::tempdir; use url::Url; @@ -127,27 +128,63 @@ const MANIFEST_FILE_NAME: &str = "Move.toml"; const STD_LIB_PKG_ADDRESS: &str = "0x1"; type SourceFiles = BTreeMap; +/// Information about compiled program (ASTs at different levels) +#[derive(Clone)] +struct CompiledProgram { + parsed: P::Program, + typed: T::Program, +} + +/// Information about cached dependencies used during compilation and analysis +#[derive(Clone)] +struct CachedDeps { + /// Cached fully compiled program representing dependencies + compiled_program: Arc, + /// Cached symbols computation data for dependencies + symbols_data: Option>, +} + /// Information about the compiled package and data structures -/// computed during compilation +/// computed during compilation and analysis #[derive(Clone)] pub struct CompiledPkgInfo { - parsed_program: P::Program, - typed_program: T::Program, - libs: Option>, + /// Package path + path: PathBuf, + /// Manifest hash + manifest_hash: Option, + /// A combined hash for manifest files of the dependencies + deps_hash: String, + /// Information about cached dependencies + cached_deps: Option, + /// Compiled user program + program: CompiledProgram, + /// Source files source_files: SourceFiles, + /// Maped files mapped_files: MappedFiles, + /// Edition of the compiler edition: Option, + /// Compiler info compiler_info: Option, + /// Comments for both user code and the dependencies all_comments: CommentMap, } /// Data used during symbols computation #[derive(Clone)] pub struct SymbolsComputationData { + /// Outermost definitions in a module (structs, consts, functions), keyed on a ModuleIdent + /// string mod_outer_defs: BTreeMap, + /// A UseDefMap for a given module (needs to be appropriately set before the module + /// processing starts) keyed on a ModuleIdent string mod_use_defs: BTreeMap, + /// Uses (references) for a definition at a given location references: BTreeMap>, + /// Additional information about a definitions at a given location def_info: BTreeMap, + /// Module name lengths in access paths for a given module (needs to be appropriately + /// set before the module processing starts) keyed on a ModuleIdent string mod_to_alias_lengths: BTreeMap>, } @@ -163,15 +200,17 @@ impl SymbolsComputationData { } } -/// Information about precompiled package dependencies +/// Precomputed information about package dependencies. #[derive(Clone)] -pub struct PrecompiledPkgDeps { +pub struct PrecomputedPkgDepsInfo { /// Hash of the manifest file for a given package manifest_hash: Option, /// Hash of dependency source files deps_hash: String, /// Precompiled deps deps: Arc, + /// Symbols computation data + deps_symbols_data: Arc, } /// Location of a use's identifier @@ -404,7 +443,8 @@ pub type StructFieldOrderInfo = BTreeMap>; /// Map from enum name to variant name to field order information pub type VariantFieldOrderInfo = BTreeMap>>; -/// Information about field order in structs and enums +/// Information about field order in structs and enums needed for auto-completion +/// to be consistent with field order in the source code #[derive(Debug, Clone, Ord, PartialOrd, PartialEq, Eq)] pub struct FieldOrderInfo { structs: BTreeMap, @@ -1305,7 +1345,7 @@ impl SymbolicatorRunner { pub fn new( ide_files_root: VfsPath, symbols_map: Arc>>, - pkg_deps: Arc>>, + pkg_deps: Arc>>, sender: Sender>>>, lint: LintLevel, ) -> Self { @@ -1643,17 +1683,11 @@ impl UseDefMap { self.0.len() } - pub fn extend_inner(&mut self, use_defs: BTreeMap>) { + pub fn extend(&mut self, use_defs: BTreeMap>) { for (k, v) in use_defs { self.0.entry(k).or_default().extend(v); } } - - pub fn extend(&mut self, use_defs: Self) { - for (k, v) in use_defs.0 { - self.0.entry(k).or_default().extend(v); - } - } } impl Symbols { @@ -1688,7 +1722,7 @@ impl Symbols { fn has_precompiled_deps( pkg_path: &Path, - pkg_dependencies: Arc>>, + pkg_dependencies: Arc>>, ) -> bool { let pkg_deps = pkg_dependencies.lock().unwrap(); pkg_deps.contains_key(pkg_path) @@ -1697,7 +1731,7 @@ fn has_precompiled_deps( /// Builds a package at a given path and, if successful, returns parsed AST /// and typed AST as well as (regardless of success) diagnostics. pub fn get_compiled_pkg( - pkg_dependencies: Arc>>, + pkg_dependencies: Arc>>, ide_files_root: VfsPath, pkg_path: &Path, lint: LintLevel, @@ -1758,7 +1792,7 @@ pub fn get_compiled_pkg( let mut diagnostics = None; let mut dependencies = build_plan.compute_dependencies(); - let compiled_libs = if let Ok(deps_package_paths) = dependencies.make_deps_for_compiler() { + let cached_deps = if let Ok(deps_package_paths) = dependencies.make_deps_for_compiler() { // Partition deps_package according whether src is available let src_deps = deps_package_paths .iter() @@ -1776,16 +1810,19 @@ pub fn get_compiled_pkg( .filter_map(|p| p.name.as_ref().map(|(n, _)| *n)) .collect::>(); - let mut pkg_deps = pkg_dependencies.lock().unwrap(); - let compiled_deps = match pkg_deps.get(pkg_path) { + let pkg_deps = pkg_dependencies.lock().unwrap(); + let pkg_cached_deps = match pkg_deps.get(pkg_path) { Some(d) if manifest_hash.is_some() && manifest_hash == d.manifest_hash && deps_hash == d.deps_hash => { - eprintln!("found pre-compiled libs for {:?}", pkg_path); + eprintln!("found cached deps for {:?}", pkg_path); mapped_files.extend_with_duplicates(d.deps.files.clone()); - Some(d.deps.clone()) + Some(CachedDeps { + compiled_program: d.deps.clone(), + symbols_data: Some(d.deps_symbols_data.clone()), + }) } _ => construct_pre_compiled_lib( src_deps, @@ -1798,30 +1835,27 @@ pub fn get_compiled_pkg( .map(|libs| { eprintln!("created pre-compiled libs for {:?}", pkg_path); mapped_files.extend_with_duplicates(libs.files.clone()); - let deps = Arc::new(libs); - pkg_deps.insert( - pkg_path.to_path_buf(), - PrecompiledPkgDeps { - manifest_hash, - deps_hash, - deps: deps.clone(), - }, - ); - deps + CachedDeps { + compiled_program: Arc::new(libs), + symbols_data: None, + } }), }; - if compiled_deps.is_some() { + if pkg_cached_deps.is_some() { // if successful, remove only source deps but keep bytecode deps as they // were not used to construct pre-compiled lib in the first place dependencies.remove_deps(src_names); } - compiled_deps + pkg_cached_deps } else { None }; let mut edition = None; let mut comments = None; + let compiled_libs = cached_deps + .clone() + .map(|deps| deps.compiled_program.clone()); build_plan.compile_with_driver_and_deps(dependencies, &mut std::io::sink(), |compiler| { let compiler = compiler.set_ide_mode(); // extract expansion AST @@ -1841,7 +1875,7 @@ pub fn get_compiled_pkg( eprintln!("compiled to parsed AST"); let (compiler, parsed_program) = compiler.into_ast(); parsed_ast = Some(parsed_program.clone()); - mapped_files.extend_with_duplicates(compiler.compilation_env_ref().mapped_files().clone()); + mapped_files.extend_with_duplicates(compiler.compilation_env().mapped_files().clone()); // extract typed AST let compilation_result = compiler.at_parser(parsed_program).run::(); @@ -1856,17 +1890,17 @@ pub fn get_compiled_pkg( } }; eprintln!("compiled to typed AST"); - let (mut compiler, typed_program) = compiler.into_ast(); + let (compiler, typed_program) = compiler.into_ast(); typed_ast = Some(typed_program.clone()); compiler_info = Some(CompilerInfo::from( - compiler.compilation_env().ide_information.clone(), + compiler.compilation_env().ide_information().clone(), )); edition = Some(compiler.compilation_env().edition(Some(root_pkg_name))); // compile to CFGIR for accurate diags eprintln!("compiling to CFGIR"); let compilation_result = compiler.at_typing(typed_program).run::(); - let mut compiler = match compilation_result { + let compiler = match compilation_result { Ok(v) => v, Err((_pass, diags)) => { let failure = false; @@ -1906,9 +1940,14 @@ pub fn get_compiled_pkg( all_comments.extend(libs.comments.clone()); } let compiled_pkg_info = CompiledPkgInfo { - parsed_program, - typed_program, - libs: compiled_libs, + path: pkg_path.into(), + manifest_hash, + deps_hash, + cached_deps, + program: CompiledProgram { + parsed: parsed_program, + typed: typed_program, + }, source_files, mapped_files, edition, @@ -1921,17 +1960,18 @@ pub fn get_compiled_pkg( /// Preprocess parsed and typed programs prior to actual symbols computation. pub fn compute_symbols_pre_process( computation_data: &mut SymbolsComputationData, - compiled_pkg_info: &CompiledPkgInfo, + computation_data_deps: &mut SymbolsComputationData, + compiled_pkg_info: &mut CompiledPkgInfo, cursor_info: Option<(&PathBuf, Position)>, ) -> Option { let mut fields_order_info = FieldOrderInfo::new(); - - pre_process_parsed_program(&compiled_pkg_info.parsed_program, &mut fields_order_info); + let parsed_program = &compiled_pkg_info.program.parsed; + let typed_program = &compiled_pkg_info.program.typed; + pre_process_parsed_program(parsed_program, &mut fields_order_info); let mut cursor_context = compute_cursor_context(&compiled_pkg_info.mapped_files, cursor_info); - pre_process_typed_modules( - &compiled_pkg_info.typed_program.modules, + &typed_program.modules, &fields_order_info, &compiled_pkg_info.mapped_files, &mut computation_data.mod_outer_defs, @@ -1943,29 +1983,51 @@ pub fn compute_symbols_pre_process( &compiled_pkg_info.all_comments, ); - if let Some(libs) = compiled_pkg_info.libs.clone() { - pre_process_typed_modules( - &libs.typing.modules, - &fields_order_info, - &compiled_pkg_info.mapped_files, - &mut computation_data.mod_outer_defs, - &mut computation_data.mod_use_defs, - &mut computation_data.references, - &mut computation_data.def_info, - &compiled_pkg_info.edition, - None, // Cursor can never be in a compiled library(?) - &compiled_pkg_info.all_comments, - ); + if let Some(cached_deps) = compiled_pkg_info.cached_deps.clone() { + // we have at least compiled program available + let (deps_mod_outer_defs, deps_def_info) = + if let Some(cached_symbols_data) = cached_deps.symbols_data { + // We have cached results of the dependency symbols computation from the previous run. + ( + cached_symbols_data.mod_outer_defs.clone(), + cached_symbols_data.def_info.clone(), + ) + } else { + // No cached dependency symbols data but we still have cached compilation results. + // Fill out dependency symbols from compiled package info to cache them at the end of analysis + pre_process_typed_modules( + &cached_deps.compiled_program.typing.modules, + &FieldOrderInfo::new(), + &compiled_pkg_info.mapped_files, + &mut computation_data_deps.mod_outer_defs, + &mut computation_data_deps.mod_use_defs, + &mut computation_data_deps.references, + &mut computation_data_deps.def_info, + &compiled_pkg_info.edition, + None, // Cursor can never be in a compiled library(?) + &compiled_pkg_info.all_comments, + ); + ( + computation_data_deps.mod_outer_defs.clone(), + computation_data_deps.def_info.clone(), + ) + }; + // We need to update definitions for the code being currently processed + // so that these definitions are available when ASTs for this code are visited + computation_data.mod_outer_defs.extend(deps_mod_outer_defs); + computation_data.def_info.extend(deps_def_info); } + cursor_context } -/// Process parsed program for symbols computation. -pub fn compute_symbols_parsed_program( +/// Run parsing analysis for either main program or dependencies +fn run_parsing_analysis( computation_data: &mut SymbolsComputationData, compiled_pkg_info: &CompiledPkgInfo, - mut cursor_context: Option, -) -> Option { + cursor_context: Option<&mut CursorContext>, + parsed_program: &P::Program, +) { let mut parsing_symbolicator = parsing_analysis::ParsingAnalysisContext { mod_outer_defs: &mut computation_data.mod_outer_defs, files: &compiled_pkg_info.mapped_files, @@ -1975,66 +2037,156 @@ pub fn compute_symbols_parsed_program( current_mod_ident_str: None, alias_lengths: BTreeMap::new(), pkg_addresses: &NamedAddressMap::new(), - cursor: cursor_context.as_mut(), + cursor: cursor_context, }; parsing_symbolicator.prog_symbols( - &compiled_pkg_info.parsed_program, + parsed_program, &mut computation_data.mod_use_defs, &mut computation_data.mod_to_alias_lengths, ); - if let Some(libs) = compiled_pkg_info.libs.clone() { - parsing_symbolicator.cursor = None; - parsing_symbolicator.prog_symbols( - &libs.parser, - &mut computation_data.mod_use_defs, - &mut computation_data.mod_to_alias_lengths, - ); +} + +/// Process parsed program for symbols computation. +pub fn compute_symbols_parsed_program( + computation_data: &mut SymbolsComputationData, + computation_data_deps: &mut SymbolsComputationData, + compiled_pkg_info: &CompiledPkgInfo, + mut cursor_context: Option, +) -> Option { + run_parsing_analysis( + computation_data, + compiled_pkg_info, + cursor_context.as_mut(), + &compiled_pkg_info.program.parsed, + ); + if let Some(cached_deps) = &compiled_pkg_info.cached_deps { + // run parsing analysis only if cached symbols computation data + // is not available to fill out dependency symbols from compiled package info + // to cache them at the end of analysis + if cached_deps.symbols_data.is_none() { + run_parsing_analysis( + computation_data_deps, + compiled_pkg_info, + None, + &cached_deps.compiled_program.parser, + ); + } } cursor_context } -/// Process typed program for symbols computation. -pub fn compute_symbols_typed_program( +/// Run typing analysis for either main program or dependencies +fn run_typing_analysis( mut computation_data: SymbolsComputationData, - mut compiled_pkg_info: CompiledPkgInfo, - cursor_context: Option, -) -> Symbols { - let mut file_use_defs = BTreeMap::new(); - let mut compiler_info = compiled_pkg_info.compiler_info.unwrap(); + mapped_files: &MappedFiles, + compiler_info: &mut CompilerInfo, + typed_program: &T::Program, +) -> SymbolsComputationData { let mut typing_symbolicator = typing_analysis::TypingAnalysisContext { mod_outer_defs: &mut computation_data.mod_outer_defs, - files: &compiled_pkg_info.mapped_files, + files: mapped_files, references: &mut computation_data.references, def_info: &mut computation_data.def_info, use_defs: UseDefMap::new(), current_mod_ident_str: None, alias_lengths: &BTreeMap::new(), traverse_only: false, - compiler_info: &mut compiler_info, + compiler_info, type_params: BTreeMap::new(), expression_scope: OrdMap::new(), }; process_typed_modules( - &mut compiled_pkg_info.typed_program.modules, - &compiled_pkg_info.source_files, + &typed_program.modules, &computation_data.mod_to_alias_lengths, &mut typing_symbolicator, - &mut file_use_defs, &mut computation_data.mod_use_defs, ); + computation_data +} - if let Some(libs) = compiled_pkg_info.libs { - process_typed_modules( - &mut libs.typing.modules.clone(), - &compiled_pkg_info.source_files, - &computation_data.mod_to_alias_lengths, - &mut typing_symbolicator, - &mut file_use_defs, - &mut computation_data.mod_use_defs, - ); +// Given use-defs for a the main program or dependencies, update the per-file +// use-def map +fn update_file_use_defs( + computation_data: &SymbolsComputationData, + source_files: &SourceFiles, + file_use_defs: &mut FileUseDefs, +) { + for (module_ident_str, use_defs) in &computation_data.mod_use_defs { + // unwrap here is safe as all modules in a given program have the module_defs entry + // in the map + let module_defs = computation_data + .mod_outer_defs + .get(module_ident_str) + .unwrap(); + let fpath = match source_files.get(&module_defs.fhash) { + Some((p, _, _)) => p, + None => return, + }; + + let fpath_buffer = + dunce::canonicalize(fpath.as_str()).unwrap_or_else(|_| PathBuf::from(fpath.as_str())); + + file_use_defs + .entry(fpath_buffer) + .or_default() + .extend(use_defs.clone().elements()); } +} + +/// Process typed program for symbols computation. +pub fn compute_symbols_typed_program( + computation_data: SymbolsComputationData, + computation_data_deps: SymbolsComputationData, + mut compiled_pkg_info: CompiledPkgInfo, + cursor_context: Option, +) -> (Symbols, Option>) { + // run typing analysis for the main user program + let compiler_info = &mut compiled_pkg_info.compiler_info.as_mut().unwrap(); + let mapped_files = &compiled_pkg_info.mapped_files; + let source_files = &compiled_pkg_info.source_files; + let mut computation_data = run_typing_analysis( + computation_data, + mapped_files, + compiler_info, + &compiled_pkg_info.program.typed, + ); + let mut file_use_defs = BTreeMap::new(); + update_file_use_defs(&computation_data, source_files, &mut file_use_defs); + + let cacheable_symbols_data_opt = + if let Some(cached_deps) = compiled_pkg_info.cached_deps.clone() { + // we have at least compiled program available + let deps_symbols_data = if let Some(cached_symbols_data) = cached_deps.symbols_data { + // We have cached results of the dependency symbols computation from the previous run. + cached_symbols_data + } else { + // No cached dependency symbols data but we still have cached compilation results. + // Fill out dependency symbols from compiled package info to cache them at the end of analysis + let computation_data_deps = run_typing_analysis( + computation_data_deps, + mapped_files, + compiler_info, + &cached_deps.compiled_program.typing, + ); + Arc::new(computation_data_deps) + }; + // create `file_use_defs` map and merge references to produce complete symbols data + // (mod_outer_defs and def_info have already been merged to facilitate user program + // analysis) + update_file_use_defs(&deps_symbols_data, source_files, &mut file_use_defs); + for (def_loc, uses) in &deps_symbols_data.references { + computation_data + .references + .entry(*def_loc) + .or_default() + .extend(uses); + } + Some(deps_symbols_data) + } else { + None + }; let mut file_mods: FileModules = BTreeMap::new(); for d in computation_data.mod_outer_defs.into_values() { @@ -2042,36 +2194,77 @@ pub fn compute_symbols_typed_program( file_mods.entry(path.to_path_buf()).or_default().insert(d); } - Symbols { - references: computation_data.references, - file_use_defs, - file_mods, - def_info: computation_data.def_info, - files: compiled_pkg_info.mapped_files, - compiler_info, - cursor_context, - } + ( + Symbols { + references: computation_data.references, + file_use_defs, + file_mods, + def_info: computation_data.def_info, + files: compiled_pkg_info.mapped_files, + compiler_info: compiled_pkg_info.compiler_info.unwrap(), + cursor_context, + }, + cacheable_symbols_data_opt, + ) } /// Compute symbols for a given package from the parsed and typed ASTs, /// as well as other auxiliary data provided in `compiled_pkg_info`. pub fn compute_symbols( - compiled_pkg_info: CompiledPkgInfo, + pkg_dependencies: Arc>>, + mut compiled_pkg_info: CompiledPkgInfo, cursor_info: Option<(&PathBuf, Position)>, ) -> Symbols { + let pkg_path = compiled_pkg_info.path.clone(); + let manifest_hash = compiled_pkg_info.manifest_hash; + let cached_dep_opt = compiled_pkg_info.cached_deps.clone(); + let deps_hash = compiled_pkg_info.deps_hash.clone(); let mut symbols_computation_data = SymbolsComputationData::new(); + let mut symbols_computation_data_deps = SymbolsComputationData::new(); let cursor_context = compute_symbols_pre_process( &mut symbols_computation_data, - &compiled_pkg_info, + &mut symbols_computation_data_deps, + &mut compiled_pkg_info, cursor_info, ); let cursor_context = compute_symbols_parsed_program( &mut symbols_computation_data, + &mut symbols_computation_data_deps, &compiled_pkg_info, cursor_context, ); - compute_symbols_typed_program(symbols_computation_data, compiled_pkg_info, cursor_context) + let (symbols, cacheable_symbols_data_opt) = compute_symbols_typed_program( + symbols_computation_data, + symbols_computation_data_deps, + compiled_pkg_info, + cursor_context, + ); + + let mut pkg_deps = pkg_dependencies.lock().unwrap(); + + if let Some(cached_deps) = cached_dep_opt { + // we have at least compiled program available, either already cached + // or created for the purpose of this analysis + if cached_deps.symbols_data.is_none() { + // if no symbols computation data was cached, it means that + // compiled program was created for the purpose of this analysis + // and we need to cache both + if let Some(deps_symbols_data) = cacheable_symbols_data_opt { + eprintln!("caching pre-compiled program and pre-computed symbols"); + pkg_deps.insert( + pkg_path, + PrecomputedPkgDepsInfo { + manifest_hash, + deps_hash, + deps: cached_deps.compiled_program.clone(), + deps_symbols_data, + }, + ); + } + } + } + symbols } /// Main driver to get symbols for the whole package. Returned symbols is an option as only the @@ -2079,19 +2272,22 @@ pub fn compute_symbols( /// actually (re)computed and the diagnostics are returned, the old symbolic information should /// be retained even if it's getting out-of-date. pub fn get_symbols( - pkg_dependencies: Arc>>, + pkg_dependencies: Arc>>, ide_files_root: VfsPath, pkg_path: &Path, lint: LintLevel, cursor_info: Option<(&PathBuf, Position)>, ) -> Result<(Option, BTreeMap>)> { + let compilation_start = Instant::now(); let (compiled_pkg_info_opt, ide_diagnostics) = - get_compiled_pkg(pkg_dependencies, ide_files_root, pkg_path, lint)?; + get_compiled_pkg(pkg_dependencies.clone(), ide_files_root, pkg_path, lint)?; + eprintln!("compilation complete in: {:?}", compilation_start.elapsed()); let Some(compiled_pkg_info) = compiled_pkg_info_opt else { return Ok((None, ide_diagnostics)); }; - let symbols = compute_symbols(compiled_pkg_info, cursor_info); - + let analysis_start = Instant::now(); + let symbols = compute_symbols(pkg_dependencies, compiled_pkg_info, cursor_info); + eprintln!("analysis complete in {:?}", analysis_start.elapsed()); eprintln!("get_symbols load complete"); Ok((Some(symbols), ide_diagnostics)) @@ -2209,32 +2405,19 @@ fn pre_process_typed_modules( } fn process_typed_modules<'a>( - typed_modules: &mut UniqueMap, - source_files: &SourceFiles, + typed_modules: &UniqueMap, mod_to_alias_lengths: &'a BTreeMap>, typing_symbolicator: &mut typing_analysis::TypingAnalysisContext<'a>, - file_use_defs: &mut FileUseDefs, mod_use_defs: &mut BTreeMap, ) { - for (module_ident, module_def) in typed_modules.key_cloned_iter_mut() { + for (module_ident, module_def) in typed_modules.key_cloned_iter() { let mod_ident_str = expansion_mod_ident_to_map_key(&module_ident.value); typing_symbolicator.use_defs = mod_use_defs.remove(&mod_ident_str).unwrap(); typing_symbolicator.alias_lengths = mod_to_alias_lengths.get(&mod_ident_str).unwrap(); typing_symbolicator.visit_module(module_ident, module_def); - let fpath = match source_files.get(&module_ident.loc.file_hash()) { - Some((p, _, _)) => p, - None => continue, - }; - - let fpath_buffer = - dunce::canonicalize(fpath.as_str()).unwrap_or_else(|_| PathBuf::from(fpath.as_str())); - let use_defs = std::mem::replace(&mut typing_symbolicator.use_defs, UseDefMap::new()); - file_use_defs - .entry(fpath_buffer) - .or_default() - .extend_inner(use_defs.elements()); + mod_use_defs.insert(mod_ident_str, use_defs); } } diff --git a/external-crates/move/crates/move-analyzer/tests/ide_testsuite.rs b/external-crates/move/crates/move-analyzer/tests/ide_testsuite.rs index bfb9b0ae2c9c1..1111d7e20f3c6 100644 --- a/external-crates/move/crates/move-analyzer/tests/ide_testsuite.rs +++ b/external-crates/move/crates/move-analyzer/tests/ide_testsuite.rs @@ -178,7 +178,7 @@ impl CompletionTest { fn test( &self, test_idx: usize, - compiled_pkg_info: CompiledPkgInfo, + mut compiled_pkg_info: CompiledPkgInfo, symbols: &mut Symbols, output: &mut dyn std::io::Write, use_file_path: &Path, @@ -195,15 +195,18 @@ impl CompletionTest { let cursor_path = use_file_path.to_path_buf(); let cursor_info = Some((&cursor_path, use_pos)); let mut symbols_computation_data = SymbolsComputationData::new(); + let mut symbols_computation_data_deps = SymbolsComputationData::new(); // we only compute cursor context and tag it on the existing symbols to avoid spending time // recomputing all symbols (saves quite a bit of time when running the test suite) let mut cursor_context = compute_symbols_pre_process( &mut symbols_computation_data, - &compiled_pkg_info, + &mut symbols_computation_data_deps, + &mut compiled_pkg_info, cursor_info, ); cursor_context = compute_symbols_parsed_program( &mut symbols_computation_data, + &mut symbols_computation_data_deps, &compiled_pkg_info, cursor_context, ); @@ -239,7 +242,7 @@ impl CursorTest { fn test( &self, test_ndx: usize, - compiled_pkg_info: CompiledPkgInfo, + mut compiled_pkg_info: CompiledPkgInfo, symbols: &mut Symbols, output: &mut dyn std::io::Write, path: &Path, @@ -257,13 +260,16 @@ impl CursorTest { let cursor_path = path.to_path_buf(); let cursor_info = Some((&cursor_path, Position { line, character })); let mut symbols_computation_data = SymbolsComputationData::new(); + let mut symbols_computation_data_deps = SymbolsComputationData::new(); let mut cursor_context = compute_symbols_pre_process( &mut symbols_computation_data, - &compiled_pkg_info, + &mut symbols_computation_data_deps, + &mut compiled_pkg_info, cursor_info, ); cursor_context = compute_symbols_parsed_program( &mut symbols_computation_data, + &mut symbols_computation_data_deps, &compiled_pkg_info, cursor_context, ); @@ -379,7 +385,11 @@ fn initial_symbols( )?; let compiled_pkg_info = compiled_pkg_info_opt.ok_or("PACKAGE COMPILATION FAILED")?; - let symbols = compute_symbols(compiled_pkg_info.clone(), None); + let symbols = compute_symbols( + Arc::new(Mutex::new(BTreeMap::new())), + compiled_pkg_info.clone(), + None, + ); Ok((project_path, compiled_pkg_info, symbols)) } diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/.mocharc.yaml b/external-crates/move/crates/move-analyzer/trace-adapter/.mocharc.yaml new file mode 100644 index 0000000000000..106233efe8557 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/.mocharc.yaml @@ -0,0 +1,3 @@ +require: './tests/run_spec.js' +spec: + - 'tests/**/*.spec.js' diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/package-lock.json b/external-crates/move/crates/move-analyzer/trace-adapter/package-lock.json index 4f6a65b91fa49..d5d8cc6862162 100644 --- a/external-crates/move/crates/move-analyzer/trace-adapter/package-lock.json +++ b/external-crates/move/crates/move-analyzer/trace-adapter/package-lock.json @@ -15,6 +15,8 @@ "@vscode/debugadapter-testsupport": "^1.56.0", "@vscode/debugprotocol": "1.66.0", "eslint": "^8.57.0", + "line-diff": "^2.1.1", + "mocha": "10.2.0", "toml": "^3.0.0", "typescript": "^5.4.5" } @@ -456,6 +458,15 @@ "url": "https://github.com/sponsors/epoberezkin" } }, + "node_modules/ansi-colors": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.1.tgz", + "integrity": "sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, "node_modules/ansi-regex": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", @@ -480,6 +491,19 @@ "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, "node_modules/argparse": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", @@ -501,6 +525,18 @@ "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", "dev": true }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/brace-expansion": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", @@ -522,6 +558,12 @@ "node": ">=8" } }, + "node_modules/browser-stdout": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", + "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", + "dev": true + }, "node_modules/callsites": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", @@ -531,6 +573,18 @@ "node": ">=6" } }, + "node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/chalk": { "version": "4.1.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", @@ -559,6 +613,44 @@ "node": ">=8" } }, + "node_modules/chokidar": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", + "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ], + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/cliui": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "dev": true, + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" + } + }, "node_modules/color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", @@ -614,12 +706,33 @@ } } }, + "node_modules/decamelize": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-4.0.0.tgz", + "integrity": "sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/deep-is": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", "dev": true }, + "node_modules/diff": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-5.0.0.tgz", + "integrity": "sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w==", + "dev": true, + "engines": { + "node": ">=0.3.1" + } + }, "node_modules/dir-glob": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", @@ -644,6 +757,21 @@ "node": ">=6.0.0" } }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, "node_modules/escape-string-regexp": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", @@ -915,6 +1043,15 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/flat": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "dev": true, + "bin": { + "flat": "cli.js" + } + }, "node_modules/flat-cache": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", @@ -941,6 +1078,50 @@ "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", "dev": true }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/glob": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.0.tgz", + "integrity": "sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/glob-parent": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", @@ -953,6 +1134,28 @@ "node": ">= 6" } }, + "node_modules/glob/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, "node_modules/globals": { "version": "13.24.0", "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", @@ -1003,6 +1206,15 @@ "node": ">=8" } }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "dev": true, + "bin": { + "he": "bin/he" + } + }, "node_modules/ignore": { "version": "5.3.2", "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", @@ -1054,6 +1266,18 @@ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", "dev": true }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", @@ -1063,6 +1287,15 @@ "node": ">=0.10.0" } }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, "node_modules/is-glob": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", @@ -1093,6 +1326,27 @@ "node": ">=8" } }, + "node_modules/is-plain-obj": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/isexe": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", @@ -1138,6 +1392,12 @@ "json-buffer": "3.0.1" } }, + "node_modules/levdist": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/levdist/-/levdist-1.0.0.tgz", + "integrity": "sha512-YguwC2spb0pqpJM3a5OsBhih/GG2ZHoaSHnmBqhEI7997a36buhqcRTegEjozHxyxByIwLpZHZTVYMThq+Zd3g==", + "dev": true + }, "node_modules/levn": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", @@ -1151,6 +1411,15 @@ "node": ">= 0.8.0" } }, + "node_modules/line-diff": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/line-diff/-/line-diff-2.1.1.tgz", + "integrity": "sha512-vswdynAI5AMPJacOo2o+JJ4caDJbnY2NEqms4MhMW0NJbjh3skP/brpVTAgBxrg55NRZ2Vtw88ef18hnagIpYQ==", + "dev": true, + "dependencies": { + "levdist": "^1.0.0" + } + }, "node_modules/locate-path": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", @@ -1172,6 +1441,22 @@ "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", "dev": true }, + "node_modules/log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "dev": true, + "dependencies": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/merge2": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", @@ -1209,18 +1494,120 @@ "url": "https://github.com/sponsors/isaacs" } }, + "node_modules/mocha": { + "version": "10.2.0", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-10.2.0.tgz", + "integrity": "sha512-IDY7fl/BecMwFHzoqF2sg/SHHANeBoMMXFlS9r0OXKDssYE1M5O43wUY/9BVPeIvfH2zmEbBfseqN9gBQZzXkg==", + "dev": true, + "dependencies": { + "ansi-colors": "4.1.1", + "browser-stdout": "1.3.1", + "chokidar": "3.5.3", + "debug": "4.3.4", + "diff": "5.0.0", + "escape-string-regexp": "4.0.0", + "find-up": "5.0.0", + "glob": "7.2.0", + "he": "1.2.0", + "js-yaml": "4.1.0", + "log-symbols": "4.1.0", + "minimatch": "5.0.1", + "ms": "2.1.3", + "nanoid": "3.3.3", + "serialize-javascript": "6.0.0", + "strip-json-comments": "3.1.1", + "supports-color": "8.1.1", + "workerpool": "6.2.1", + "yargs": "16.2.0", + "yargs-parser": "20.2.4", + "yargs-unparser": "2.0.0" + }, + "bin": { + "_mocha": "bin/_mocha", + "mocha": "bin/mocha.js" + }, + "engines": { + "node": ">= 14.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/mochajs" + } + }, + "node_modules/mocha/node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dev": true, + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/mocha/node_modules/debug/node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "node_modules/mocha/node_modules/minimatch": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.0.1.tgz", + "integrity": "sha512-nLDxIFRyhDblz3qMuq+SoRZED4+miJ/G+tdDrjkkkRnjAsBexeGpgjLEQ0blJy7rHhR2b93rhQY4SvyWu9v03g==", + "dev": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/mocha/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true + }, "node_modules/ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", "dev": true }, + "node_modules/nanoid": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.3.tgz", + "integrity": "sha512-p1sjXuopFs0xg+fPASzQ28agW1oHD7xDsd9Xkf3T15H3c/cifrFHVwrh74PdoklAPi+i7MdRsE47vm2r6JoB+w==", + "dev": true, + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, "node_modules/natural-compare": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", "dev": true }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", @@ -1375,6 +1762,36 @@ } ] }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dev": true, + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/resolve-from": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", @@ -1476,6 +1893,26 @@ "queue-microtask": "^1.2.2" } }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, "node_modules/semver": { "version": "7.6.3", "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz", @@ -1488,6 +1925,15 @@ "node": ">=10" } }, + "node_modules/serialize-javascript": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.0.tgz", + "integrity": "sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==", + "dev": true, + "dependencies": { + "randombytes": "^2.1.0" + } + }, "node_modules/shebang-command": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", @@ -1518,6 +1964,20 @@ "node": ">=8" } }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/strip-ansi": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", @@ -1542,6 +2002,21 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, "node_modules/text-table": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", @@ -1654,12 +2129,86 @@ "node": ">=0.10.0" } }, + "node_modules/workerpool": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.2.1.tgz", + "integrity": "sha512-ILEIE97kDZvF9Wb9f6h5aXK4swSlKGUcOEGiIYb2OOu/IrDU9iwj0fD//SsA6E5ibwJxpEvhullJY4Sl4GcpAw==", + "dev": true + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", "dev": true }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs": { + "version": "16.2.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", + "dev": true, + "dependencies": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs-parser": { + "version": "20.2.4", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.4.tgz", + "integrity": "sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs-unparser": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-2.0.0.tgz", + "integrity": "sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==", + "dev": true, + "dependencies": { + "camelcase": "^6.0.0", + "decamelize": "^4.0.0", + "flat": "^5.0.2", + "is-plain-obj": "^2.1.0" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/yocto-queue": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/package.json b/external-crates/move/crates/move-analyzer/trace-adapter/package.json index d61e8914330fc..f80b31d4e6793 100644 --- a/external-crates/move/crates/move-analyzer/trace-adapter/package.json +++ b/external-crates/move/crates/move-analyzer/trace-adapter/package.json @@ -6,17 +6,20 @@ "main": "./out/server.js", "scripts": { "compile": "tsc -p ./", - "lint": "eslint src --ext ts" + "lint": "eslint src --ext ts", + "test": "npm run compile && mocha --config ./.mocharc.yaml" }, "devDependencies": { "@types/node": "20.x", "@typescript-eslint/eslint-plugin": "^7.14.1", "@typescript-eslint/parser": "^7.11.0", "eslint": "^8.57.0", + "line-diff": "^2.1.1", + "mocha": "10.2.0", "typescript": "^5.4.5", "@vscode/debugadapter": "^1.56.0", "@vscode/debugadapter-testsupport": "^1.56.0", "@vscode/debugprotocol": "1.66.0", "toml": "^3.0.0" } -} \ No newline at end of file +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/src/adapter.ts b/external-crates/move/crates/move-analyzer/trace-adapter/src/adapter.ts index 311155f5e2874..38f5550f1875d 100644 --- a/external-crates/move/crates/move-analyzer/trace-adapter/src/adapter.ts +++ b/external-crates/move/crates/move-analyzer/trace-adapter/src/adapter.ts @@ -1,4 +1,5 @@ import { + Breakpoint, Handles, Logger, logger, @@ -18,9 +19,10 @@ import { RuntimeEvents, RuntimeValueType, IRuntimeVariableScope, - CompoundType + CompoundType, + IRuntimeRefValue } from './runtime'; -import { run } from 'node:test'; + const enum LogLevel { Log = 'log', @@ -103,6 +105,9 @@ export class MoveDebugSession extends LoggingDebugSession { this.runtime.on(RuntimeEvents.stopOnStep, () => { this.sendEvent(new StoppedEvent('step', MoveDebugSession.THREAD_ID)); }); + this.runtime.on(RuntimeEvents.stopOnLineBreakpoint, () => { + this.sendEvent(new StoppedEvent('breakpoint', MoveDebugSession.THREAD_ID)); + }); this.runtime.on(RuntimeEvents.end, () => { this.sendEvent(new TerminatedEvent()); }); @@ -117,6 +122,12 @@ export class MoveDebugSession extends LoggingDebugSession { // the adapter implements the configurationDone request response.body.supportsConfigurationDoneRequest = false; + // the adapter supports conditional breakpoints + response.body.supportsConditionalBreakpoints = false; + + // the adapter supports breakpoints that break execution after a specified number of hits + response.body.supportsHitConditionalBreakpoints = false; + // make VS Code use 'evaluate' when hovering over source response.body.supportsEvaluateForHovers = false; @@ -176,6 +187,7 @@ export class MoveDebugSession extends LoggingDebugSession { ): Promise { logger.setup(convertLoggerLogLevel(args.logLevel ?? LogLevel.None), false); logger.log(`Launching trace viewer for file: ${args.source} and trace: ${args.traceInfo}`); + try { await this.runtime.start(args.source, args.traceInfo, args.stopOnEntry || false); } catch (err) { @@ -186,13 +198,6 @@ export class MoveDebugSession extends LoggingDebugSession { this.sendEvent(new StoppedEvent('entry', MoveDebugSession.THREAD_ID)); } - protected configurationDoneRequest( - response: DebugProtocol.ConfigurationDoneResponse, - _args: DebugProtocol.ConfigurationDoneArguments - ): void { - this.sendResponse(response); - } - protected threadsRequest(response: DebugProtocol.ThreadsResponse): void { response.body = { threads: [ @@ -216,7 +221,7 @@ export class MoveDebugSession extends LoggingDebugSession { }).reverse(), totalFrames: stack_height, optimized_lines: stack_height > 0 - ? runtimeStack.frames[stack_height - 1].sourceMap.optimizedLines + ? runtimeStack.frames[stack_height - 1].optimizedLines : [] }; } catch (err) { @@ -229,14 +234,15 @@ export class MoveDebugSession extends LoggingDebugSession { /** * Gets the scopes for a given frame. * - * @param frameId identifier of the frame scopes are requested for. + * @param frameID identifier of the frame scopes are requested for. * @returns an array of scopes. + * @throws Error with a descriptive error message if scopes cannot be retrieved. */ - private getScopes(frameId: number): DebugProtocol.Scope[] { + private getScopes(frameID: number): DebugProtocol.Scope[] { const runtimeStack = this.runtime.stack(); - const frame = runtimeStack.frames.find(frame => frame.id === frameId); + const frame = runtimeStack.frames.find(frame => frame.id === frameID); if (!frame) { - throw new Error(`No frame found for id: ${frameId}`); + throw new Error(`No frame found for id: ${frameID} when getting scopes`); } const scopes: DebugProtocol.Scope[] = []; if (frame.locals.length > 0) { @@ -272,6 +278,48 @@ export class MoveDebugSession extends LoggingDebugSession { this.sendResponse(response); } + /** + * Converts a runtime reference value to a DAP variable. + * + * @param value reference value. + * @param name name of variable containing the reference value. + * @param type optional type of the variable containing the reference value. + * @returns a DAP variable. + * @throws Error with a descriptive error message if conversion fails. + */ + private convertRefValue( + value: IRuntimeRefValue, + name: string, + type?: string + ): DebugProtocol.Variable { + const frameID = value.loc.frameID; + const localIndex = value.loc.localIndex; + const runtimeStack = this.runtime.stack(); + const frame = runtimeStack.frames.find(frame => frame.id === frameID); + if (!frame) { + throw new Error('No frame found for id ' + + frameID + + ' when converting ref value for local index ' + + localIndex); + } + // a local will be in one of the scopes at a position corresponding to its local index + let local = undefined; + for (const scope of frame.locals) { + local = scope[localIndex]; + if (local) { + break; + } + } + if (!local) { + throw new Error('No local found for index ' + + localIndex + + ' when converting ref value for frame id ' + + frameID); + } + + return this.convertRuntimeValue(local.value, name, type); + } + /** * Converts a runtime value to a DAP variable. * @@ -300,21 +348,28 @@ export class MoveDebugSession extends LoggingDebugSession { value: '(' + value.length + ')[...]', variablesReference: compoundValueReference }; - } else { + } else if ('fields' in value) { const compoundValueReference = this.variableHandles.create(value); - const accessChainParts = value.type.split('::'); + // use type if available as it will have information about whether + // it's a reference or not (e.g., `&mut 0x42::mod::SomeStruct`), + // as opposed to the type that come with the value + // (e.g., `0x42::mod::SomeStruct`) + const actualType = type ? type : value.type; + const accessChainParts = actualType.split('::'); const datatypeName = accessChainParts[accessChainParts.length - 1]; return { name, type: value.variantName - ? value.type + '::' + value.variantName - : value.type, + ? actualType + '::' + value.variantName + : actualType, value: (value.variantName ? datatypeName + '::' + value.variantName : datatypeName ) + '{...}', variablesReference: compoundValueReference }; + } else { + return this.convertRefValue(value, name, type); } } @@ -440,6 +495,27 @@ export class MoveDebugSession extends LoggingDebugSession { this.sendResponse(response); } + protected setBreakPointsRequest(response: DebugProtocol.SetBreakpointsResponse, args: DebugProtocol.SetBreakpointsArguments): void { + try { + const finalBreakpoints = []; + if (args.breakpoints && args.source.path) { + const breakpointLines = args.breakpoints.map(bp => bp.line); + const validatedBreakpoints = this.runtime.setLineBreakpoints(args.source.path, breakpointLines); + for (let i = 0; i < breakpointLines.length; i++) { + finalBreakpoints.push(new Breakpoint(validatedBreakpoints[i], breakpointLines[i])); + } + } + response.body = { + breakpoints: finalBreakpoints + }; + } catch (err) { + response.success = false; + response.message = err instanceof Error ? err.message : String(err); + } + this.sendResponse(response); + } + + protected disconnectRequest( response: DebugProtocol.DisconnectResponse, _args: DebugProtocol.DisconnectArguments diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/src/runtime.ts b/external-crates/move/crates/move-analyzer/trace-adapter/src/runtime.ts index 233e561c3fa41..32fbbac44a6a0 100644 --- a/external-crates/move/crates/move-analyzer/trace-adapter/src/runtime.ts +++ b/external-crates/move/crates/move-analyzer/trace-adapter/src/runtime.ts @@ -6,9 +6,14 @@ import * as crypto from 'crypto'; import * as fs from 'fs'; import * as path from 'path'; import toml from 'toml'; -import { ISourceMap, IFileInfo, readAllSourceMaps } from './source_map_utils'; -import { TraceEffectKind, TraceEvent, TraceEventKind, TraceInstructionKind, TraceLocKind, TraceValKind, TraceValue, readTrace } from './trace_utils'; -import { ModuleInfo } from './utils'; +import { IFileInfo, readAllSourceMaps } from './source_map_utils'; +import { + TraceEffectKind, + TraceEvent, + TraceEventKind, + TraceInstructionKind, + readTrace +} from './trace_utils'; /** * Describes the runtime variable scope (e.g., local variables @@ -30,8 +35,27 @@ export type CompoundType = RuntimeValueType[] | IRuntimeCompundValue; * - boolean, number, string (converted to string) * - compound type (vector, struct, enum) */ -export type RuntimeValueType = string | CompoundType; +export type RuntimeValueType = string | CompoundType | IRuntimeRefValue; +/** + * Locaction of a local variable in the runtime. + */ +export interface IRuntimeVariableLoc { + frameID: number; + localIndex: number; +} + +/** + * Value of a reference in the runtime. + */ +export interface IRuntimeRefValue { + mutable: boolean; + loc: IRuntimeVariableLoc +} + +/** + * Information about a runtime compound value (struct/enum). + */ export interface IRuntimeCompundValue { fields: [string, RuntimeValueType][]; type: string; @@ -53,26 +77,44 @@ interface IRuntimeVariable { * during trace viewing session. */ interface IRuntimeStackFrame { - // Source map for the frame. - sourceMap: ISourceMap; - // Frame identifier. + /** + * Frame identifier. + */ id: number; - // Name of the function in this frame. + /** + * Name of the function in this frame. + */ name: string; - // Path to the file containing the function. + /** + * Path to the file containing the function. + */ file: string; - // Current line in the file correponding to currently viewed instruction. + /** + * Current line in the file correponding to currently viewed instruction. + */ line: number; // 1-based - // Local variable types by variable frame index. + /** + * Local variable types by variable frame index. + */ localsTypes: string[]; - // Local variables per scope (local scope at 0 and then following block scopes), - // indexed by variable frame index. + /** + * Local variable names by variable frame index. + */ + localsNames: string[]; + /** + * Local variables per scope (local scope at 0 and then following block scopes), + * indexed by variable frame index. + */ locals: (IRuntimeVariable | undefined)[][]; /** * Line of the last call instruction that was processed in this frame. * It's needed to make sure that step/next into/over call works correctly. */ lastCallInstructionLine: number | undefined; + /** + * Lines that are not present in the source map. + */ + optimizedLines: number[] } /** @@ -87,9 +129,19 @@ export interface IRuntimeStack { * Events emitted by the runtime during trace viewing session. */ export enum RuntimeEvents { - // Stop after step/next action is performed. + /** + * Stop after step/next action is performed. + */ stopOnStep = 'stopOnStep', - // Finish trace viewing session. + + /** + * Stop after a line breakpoint is hit. + */ + stopOnLineBreakpoint = 'stopOnLineBreakpoint', + + /** + * Finish trace viewing session. + */ end = 'end', } @@ -101,7 +153,11 @@ export class Runtime extends EventEmitter { /** * Trace being viewed. */ - private trace = { events: [] as TraceEvent[], localLifetimeEnds: new Map() }; + private trace = { + events: [] as TraceEvent[], + localLifetimeEnds: new Map(), + tracedLines: new Map>() + }; /** * Index of the current trace event being processed. @@ -119,15 +175,16 @@ export class Runtime extends EventEmitter { private filesMap = new Map(); /** - * Map of stringified module info to source maps. + * Map of line breakpoints, keyed on a file path. */ - private sourceMapsMap = new Map(); + private lineBreakpoints = new Map>(); /** * Start a trace viewing session and set up the initial state of the runtime. * * @param source path to the Move source file whose traces are to be viewed. * @param traceInfo trace selected for viewing. + * @throws Error with a descriptive error message if starting runtime has failed. * */ public async start(source: string, traceInfo: string, stopOnEntry: boolean): Promise { @@ -151,11 +208,11 @@ export class Runtime extends EventEmitter { hashToFileMap(path.join(pkgRoot, 'sources'), this.filesMap); // create source maps for all modules in the `build` directory - this.sourceMapsMap = readAllSourceMaps(path.join(pkgRoot, 'build', pkg_name, 'source_maps'), this.filesMap); + const sourceMapsMap = readAllSourceMaps(path.join(pkgRoot, 'build', pkg_name, 'source_maps'), this.filesMap); // reconstruct trace file path from trace info const traceFilePath = path.join(pkgRoot, 'traces', traceInfo.replace(/:/g, '_') + '.json'); - this.trace = readTrace(traceFilePath); + this.trace = readTrace(traceFilePath, sourceMapsMap, this.filesMap); // start trace viewing session with the first trace event this.eventIndex = 0; @@ -169,8 +226,10 @@ export class Runtime extends EventEmitter { this.newStackFrame( currentEvent.id, currentEvent.name, - currentEvent.modInfo, - currentEvent.localsTypes + currentEvent.fileHash, + currentEvent.localsTypes, + currentEvent.localsNames, + currentEvent.optimizedLines ); this.frameStack = { frames: [newFrame] @@ -206,8 +265,10 @@ export class Runtime extends EventEmitter { if (currentEvent.type === TraceEventKind.Instruction) { const stackHeight = this.frameStack.frames.length; if (stackHeight <= 0) { - throw new Error('No frame on the stack when processing Instruction event at PC: ' - + currentEvent.pc); + throw new Error('No frame on the stack when processing Instruction event on line: ' + + currentEvent.loc.line + + ' in column: ' + + currentEvent.loc.column); } const currentFrame = this.frameStack.frames[stackHeight - 1]; // remember last call instruction line before it (potentially) changes @@ -287,13 +348,26 @@ export class Runtime extends EventEmitter { this.sendEvent(RuntimeEvents.stopOnStep); return false; } else if (currentEvent.type === TraceEventKind.OpenFrame) { + // if function is native then the next event will be CloseFrame + if (currentEvent.isNative) { + if (this.trace.events.length <= this.eventIndex + 1 || + this.trace.events[this.eventIndex + 1].type !== TraceEventKind.CloseFrame) { + throw new Error('Expected an CloseFrame event after native OpenFrame event'); + } + // skip over CloseFrame as there is no frame to pop + this.eventIndex++; + return this.step(next, stopAtCloseFrame); + } + // create a new frame and push it onto the stack const newFrame = this.newStackFrame( currentEvent.id, currentEvent.name, - currentEvent.modInfo, - currentEvent.localsTypes + currentEvent.fileHash, + currentEvent.localsTypes, + currentEvent.localsNames, + currentEvent.optimizedLines ); // set values of parameters in the new frame this.frameStack.frames.push(newFrame); @@ -325,16 +399,18 @@ export class Runtime extends EventEmitter { } else if (currentEvent.type === TraceEventKind.Effect) { const effect = currentEvent.effect; if (effect.type === TraceEffectKind.Write) { - const stackHeight = this.frameStack.frames.length; - if (stackHeight <= 0) { - throw new Error('No frame on the stack when processing a write'); - } - const currentFrame = this.frameStack.frames[stackHeight - 1]; - const traceLocation = effect.location; + const traceLocation = effect.loc; const traceValue = effect.value; - if (traceLocation.type === TraceLocKind.Local) { - localWrite(currentFrame, traceLocation.localIndex, traceValue); + const frame = this.frameStack.frames.find( + frame => frame.id === traceLocation.frameID + ); + if (!frame) { + throw new Error('Cannot find frame with ID: ' + + traceLocation.frameID + + ' when processing Write effect for local variable at index: ' + + traceLocation.localIndex); } + localWrite(frame, traceLocation.localIndex, traceValue); } return this.step(next, stopAtCloseFrame); } else { @@ -396,7 +472,54 @@ export class Runtime extends EventEmitter { if (this.step(/* next */ false, /* stopAtCloseFrame */ false)) { return true; } + let currentEvent = this.trace.events[this.eventIndex]; + if (currentEvent.type === TraceEventKind.Instruction) { + const stackHeight = this.frameStack.frames.length; + if (stackHeight <= 0) { + throw new Error('No frame on the stack when processing Instruction event on line: ' + + currentEvent.loc.line + + ' in column: ' + + currentEvent.loc.column); + } + const currentFrame = this.frameStack.frames[stackHeight - 1]; + const breakpoints = this.lineBreakpoints.get(currentFrame.file); + if (!breakpoints) { + continue; + } + if (breakpoints.has(currentEvent.loc.line)) { + this.sendEvent(RuntimeEvents.stopOnLineBreakpoint); + return false; + } + } + } + } + + /** + * Sets line breakpoints for a file (resetting any existing ones). + * + * @param path file path. + * @param lines breakpoints lines. + * @returns array of booleans indicating if a breakpoint was set on a line. + * @throws Error with a descriptive error message if breakpoints cannot be set. + */ + public setLineBreakpoints(path: string, lines: number[]): boolean[] { + const breakpoints = new Set(); + const tracedLines = this.trace.tracedLines.get(path); + // Set all breakpoints to invalid and validate the correct ones in the loop, + // otherwise let them all be invalid if there are no traced lines. + // Valid breakpoints are those that are on lines that have at least + // one instruction in the trace on them. + const validated = lines.map(() => false); + if (tracedLines) { + for (let i = 0; i < lines.length; i++) { + if (tracedLines.has(lines[i])) { + validated[i] = true; + breakpoints.add(lines[i]); + } + } } + this.lineBreakpoints.set(path, breakpoints); + return validated; } /** @@ -411,24 +534,6 @@ export class Runtime extends EventEmitter { currentFrame: IRuntimeStackFrame, instructionEvent: Extract ): [boolean, number] { - const currentFun = currentFrame.sourceMap.functions.get(currentFrame.name); - if (!currentFun) { - throw new Error(`Cannot find function: ${currentFrame.name} in source map`); - } - - // if map does not contain an entry for a PC that can be found in the trace file, - // it means that the position of the last PC in the source map should be used - let currentPCLoc = instructionEvent.pc >= currentFun.pcLocs.length - ? currentFun.pcLocs[currentFun.pcLocs.length - 1] - : currentFun.pcLocs[instructionEvent.pc]; - - if (!currentPCLoc) { - throw new Error('Cannot find location for PC: ' - + instructionEvent.pc - + ' in function: ' - + currentFrame.name); - } - // if current instruction ends lifetime of a local variable, mark this in the // local variable array const frameLocalLifetimeEnds = this.trace.localLifetimeEnds.get(currentFrame.id); @@ -451,18 +556,18 @@ export class Runtime extends EventEmitter { } } } - + const loc = instructionEvent.loc; if (instructionEvent.kind === TraceInstructionKind.CALL || instructionEvent.kind === TraceInstructionKind.CALL_GENERIC) { - currentFrame.lastCallInstructionLine = currentPCLoc.line; + currentFrame.lastCallInstructionLine = loc.line; } - if (currentPCLoc.line === currentFrame.line) { + if (loc.line === currentFrame.line) { // so that instructions on the same line can be bypassed - return [true, currentPCLoc.line]; + return [true, loc.line]; } else { - currentFrame.line = currentPCLoc.line; - return [false, currentPCLoc.line]; + currentFrame.line = loc.line; + return [false, loc.line]; } } @@ -474,41 +579,38 @@ export class Runtime extends EventEmitter { * @param funName function name. * @param modInfo information about module containing the function. * @param localsTypes types of local variables in the frame. + * @param localsNames names of local variables in the frame. + * @param optimizedLines lines that are not present in the source map. * @returns new frame. * @throws Error with a descriptive error message if frame cannot be constructed. */ private newStackFrame( frameID: number, funName: string, - modInfo: ModuleInfo, - localsTypes: string[] + fileHash: string, + localsTypes: string[], + localsNames: string[], + optimizedLines: number[] ): IRuntimeStackFrame { - const sourceMap = this.sourceMapsMap.get(JSON.stringify(modInfo)); - - if (!sourceMap) { - throw new Error('Cannot find source map for module: ' - + modInfo.name - + ' in package: ' - + modInfo.addr); - } - const currentFile = this.filesMap.get(sourceMap.fileHash); + const currentFile = this.filesMap.get(fileHash); if (!currentFile) { - throw new Error(`Cannot find file with hash: ${sourceMap.fileHash}`); + throw new Error(`Cannot find file with hash: ${fileHash}`); } let locals = []; // create first scope for local variables locals[0] = []; const stackFrame: IRuntimeStackFrame = { - sourceMap, id: frameID, name: funName, file: currentFile.path, line: 0, // line will be updated when next event (Instruction) is processed localsTypes, + localsNames, locals, lastCallInstructionLine: undefined, + optimizedLines }; if (this.trace.events.length <= this.eventIndex + 1 || @@ -529,53 +631,181 @@ export class Runtime extends EventEmitter { this.emit(event, ...args); }, 0); } + + // + // Utility functions for testing and debugging. + // + + /** + * Whitespace used for indentation in the string representation of the runtime. + */ + private singleTab = ' '; + + /** + * Returns a string representig the current state of the runtime. + * + * @returns string representation of the runtime. + */ + public toString(): string { + let res = 'current frame stack:\n'; + for (const frame of this.frameStack.frames) { + res += this.singleTab + 'function: ' + frame.name + ' (line ' + frame.line + ')\n'; + for (let i = 0; i < frame.locals.length; i++) { + res += this.singleTab + this.singleTab + 'scope ' + i + ' :\n'; + for (let j = 0; j < frame.locals[i].length; j++) { + const local = frame.locals[i][j]; + if (local) { + res += this.varToString(this.singleTab + + this.singleTab + + this.singleTab, local) + '\n'; + } + } + } + } + if (this.lineBreakpoints && this.lineBreakpoints.size > 0) { + res += 'line breakpoints\n'; + for (const [file, breakpoints] of this.lineBreakpoints) { + res += this.singleTab + path.basename(file) + '\n'; + for (const line of breakpoints) { + res += this.singleTab + this.singleTab + line + '\n'; + } + } + } + return res; + } + /** + * Returns a string representation of a runtime variable. + * + * @param variable runtime variable. + * @returns string representation of the variable. + */ + private varToString(tabs: string, variable: IRuntimeVariable): string { + return this.valueToString(tabs, variable.value, variable.name, variable.type); + } + + /** + * Returns a string representation of a runtime compound value. + * + * @param compoundValue runtime compound value. + * @returns string representation of the compound value. + */ + private compoundValueToString(tabs: string, compoundValue: IRuntimeCompundValue): string { + const type = compoundValue.variantName + ? compoundValue.type + '::' + compoundValue.variantName + : compoundValue.type; + let res = '(' + type + ') {\n'; + for (const [name, value] of compoundValue.fields) { + res += this.valueToString(tabs + this.singleTab, value, name); + } + res += tabs + '}\n'; + return res; + } + + /** + * Returns a string representation of a runtime reference value. + * + * @param refValue runtime reference value. + * @param name name of the variable containing reference value. + * @param type optional type of the variable containing reference value. + * @returns string representation of the reference value. + */ + private refValueToString( + tabs: string, + refValue: IRuntimeRefValue, + name: string, + type?: string + ): string { + let res = ''; + const frame = this.frameStack.frames.find(frame => frame.id === refValue.loc.frameID); + let local = undefined; + if (!frame) { + return res; + } + for (const scope of frame.locals) { + local = scope[refValue.loc.localIndex]; + if (local) { + break; + } + } + if (!local) { + return res; + } + return this.valueToString(tabs, local.value, name, type); + } + + /** + * Returns a string representation of a runtime value. + * + * @param value runtime value. + * @param name name of the variable containing the value. + * @param type optional type of the variable containing the value. + * @returns string representation of the value. + */ + private valueToString( + tabs: string, + value: RuntimeValueType, + name: string, + type?: string + ): string { + let res = ''; + if (typeof value === 'string') { + res += tabs + name + ' : ' + value + '\n'; + if (type) { + res += tabs + 'type: ' + type + '\n'; + } + } else if (Array.isArray(value)) { + res += tabs + name + ' : [\n'; + for (let i = 0; i < value.length; i++) { + res += this.valueToString(tabs + this.singleTab, value[i], String(i)); + } + res += tabs + ']\n'; + if (type) { + res += tabs + 'type: ' + type + '\n'; + } + return res; + } else if ('fields' in value) { + res += tabs + name + ' : ' + this.compoundValueToString(tabs, value); + if (type) { + res += tabs + 'type: ' + type + '\n'; + } + } else { + res += this.refValueToString(tabs, value, name, type); + } + return res; + } } /** - * Handles a write to a local variable in the current frame. + * Handles a write to a local variable in a stack frame. * - * @param currentFrame current frame. + * @param frame stack frame frame. * @param localIndex variable index in the frame. * @param runtimeValue variable value. */ function localWrite( - currentFrame: IRuntimeStackFrame, + frame: IRuntimeStackFrame, localIndex: number, - traceValue: TraceValue + value: RuntimeValueType ): void { - if (traceValue.type !== TraceValKind.Runtime) { - throw new Error('Expected a RuntimeValue when writing local variable at index: ' - + localIndex - + ' in function: ' - + currentFrame.name - + ' but got: ' - + traceValue.type); - } - const type = currentFrame.localsTypes[localIndex]; + const type = frame.localsTypes[localIndex]; if (!type) { throw new Error('Cannot find type for local variable at index: ' + localIndex + ' in function: ' - + currentFrame.name); - } - const value = traceValue.value; - const funEntry = currentFrame.sourceMap.functions.get(currentFrame.name); - if (!funEntry) { - throw new Error('Cannot find function entry in source map for function: ' - + currentFrame.name); + + frame.name); } - const name = funEntry.localsNames[localIndex]; + const name = frame.localsNames[localIndex]; if (!name) { throw new Error('Cannot find local variable at index: ' + localIndex + ' in function: ' - + currentFrame.name); + + frame.name); } - const scopesCount = currentFrame.locals.length; + const scopesCount = frame.locals.length; if (scopesCount <= 0) { throw new Error("There should be at least one variable scope in functon" - + currentFrame.name); + + frame.name); } // If a variable has the same name but a different index (it is shadowed) // it has to be put in a different scope (e.g., locals[1], locals[2], etc.). @@ -583,7 +813,7 @@ function localWrite( // the outermost one let existingVarScope = -1; for (let i = scopesCount - 1; i >= 0; i--) { - const existingVarIndex = currentFrame.locals[i].findIndex(runtimeVar => { + const existingVarIndex = frame.locals[i].findIndex(runtimeVar => { return runtimeVar && runtimeVar.name === name; }); if (existingVarIndex !== -1 && existingVarIndex !== localIndex) { @@ -592,14 +822,14 @@ function localWrite( } } if (existingVarScope >= 0) { - const shadowedScope = currentFrame.locals[existingVarScope + 1]; + const shadowedScope = frame.locals[existingVarScope + 1]; if (!shadowedScope) { - currentFrame.locals.push([]); + frame.locals.push([]); } - currentFrame.locals[existingVarScope + 1][localIndex] = { name, value, type }; + frame.locals[existingVarScope + 1][localIndex] = { name, value, type }; } else { // put variable in the "main" locals scope - currentFrame.locals[0][localIndex] = { name, value, type }; + frame.locals[0][localIndex] = { name, value, type }; } } @@ -680,3 +910,4 @@ function fileHash(fileContents: string): Uint8Array { const hash = crypto.createHash('sha256').update(fileContents).digest(); return new Uint8Array(hash); } + diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/src/source_map_utils.ts b/external-crates/move/crates/move-analyzer/trace-adapter/src/source_map_utils.ts index 23edbd8424175..9fdc0193224dc 100644 --- a/external-crates/move/crates/move-analyzer/trace-adapter/src/source_map_utils.ts +++ b/external-crates/move/crates/move-analyzer/trace-adapter/src/source_map_utils.ts @@ -49,7 +49,7 @@ interface JSONSrcRootObject { /** * Describes a location in the source file. */ -interface ILoc { +export interface ILoc { line: number; column: number; } @@ -88,7 +88,9 @@ export interface ISourceMap { fileHash: string modInfo: ModuleInfo, functions: Map, - // Lines that are not present in the source map. + /** + * Lines that are not present in the source map. + */ optimizedLines: number[] } diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/src/trace_utils.ts b/external-crates/move/crates/move-analyzer/trace-adapter/src/trace_utils.ts index b4f48e9d9b25a..76b7230b86539 100644 --- a/external-crates/move/crates/move-analyzer/trace-adapter/src/trace_utils.ts +++ b/external-crates/move/crates/move-analyzer/trace-adapter/src/trace_utils.ts @@ -3,7 +3,13 @@ import * as fs from 'fs'; import { FRAME_LIFETIME, ModuleInfo } from './utils'; -import { IRuntimeCompundValue, RuntimeValueType } from './runtime'; +import { + IRuntimeCompundValue, + RuntimeValueType, + IRuntimeVariableLoc, + IRuntimeRefValue +} from './runtime'; +import { ISourceMap, ILoc, IFileInfo } from './source_map_utils'; // Data types corresponding to trace file JSON schema. @@ -30,15 +36,20 @@ interface JSONVectorType { type JSONBaseType = string | JSONStructType | JSONVectorType; +enum JSONTraceRefType { + Mut = 'Mut', + Imm = 'Imm' +} + interface JSONTraceType { - ref_type: string | null; type_: JSONBaseType; + ref_type?: JSONTraceRefType } -type JSONTraceValueType = boolean | number | string | JSONTraceValueType[] | JSONTraceCompound; +type JSONTraceRuntimeValueType = boolean | number | string | JSONTraceRuntimeValueType[] | JSONTraceCompound; interface JSONTraceFields { - [key: string]: JSONTraceValueType; + [key: string]: JSONTraceRuntimeValueType; } interface JSONTraceCompound { @@ -48,14 +59,31 @@ interface JSONTraceCompound { variant_tag?: number; } -interface JSONTraceRuntimeValue { - value: JSONTraceValueType; +interface JSONTraceRefValueContent { + location: JSONTraceLocation; + snapshot: JSONTraceRuntimeValueType; } -interface JSONTraceValue { - RuntimeValue: JSONTraceRuntimeValue; +interface JSONTraceMutRefValue { + MutRef: JSONTraceRefValueContent; } +interface JSONTraceImmRefValue { + ImmRef: JSONTraceRefValueContent; +} + +interface JSONTraceRuntimeValueContent { + value: JSONTraceRuntimeValueType; +} + +interface JSONTraceRuntimeValue { + RuntimeValue: JSONTraceRuntimeValueContent; +} + +export type JSONTraceRefValue = JSONTraceMutRefValue | JSONTraceImmRefValue; + +export type JSONTraceValue = JSONTraceRuntimeValue | JSONTraceRefValue; + interface JSONTraceFrame { binary_member_index: number; frame_id: number; @@ -92,17 +120,17 @@ type JSONTraceLocation = JSONTraceLocalLocation | JSONTraceIndexedLocation; interface JSONTraceWriteEffect { location: JSONTraceLocation; - root_value_after_write: JSONTraceValue; + root_value_after_write: JSONTraceRuntimeValue; } interface JSONTraceReadEffect { location: JSONTraceLocation; moved: boolean; - root_value_read: JSONTraceValue; + root_value_read: JSONTraceRuntimeValue; } interface JSONTracePushEffect { - RuntimeValue?: JSONTraceRuntimeValue; + RuntimeValue?: JSONTraceRuntimeValueContent; MutRef?: { location: JSONTraceLocation; snapshot: any[]; @@ -110,7 +138,7 @@ interface JSONTracePushEffect { } interface JSONTracePopEffect { - RuntimeValue?: JSONTraceRuntimeValue; + RuntimeValue?: JSONTraceRuntimeValueContent; MutRef?: { location: JSONTraceLocation; snapshot: any[]; @@ -127,7 +155,7 @@ interface JSONTraceEffect { interface JSONTraceCloseFrame { frame_id: number; gas_left: number; - return_: JSONTraceRuntimeValue[]; + return_: JSONTraceRuntimeValueContent[]; } interface JSONTraceEvent { @@ -179,42 +207,17 @@ export type TraceEvent = type: TraceEventKind.OpenFrame, id: number, name: string, - modInfo: ModuleInfo, + fileHash: string + isNative: boolean, localsTypes: string[], - paramValues: TraceValue[] + localsNames: string[], + paramValues: RuntimeValueType[] + optimizedLines: number[] } | { type: TraceEventKind.CloseFrame, id: number } - | { type: TraceEventKind.Instruction, pc: number, kind: TraceInstructionKind } + | { type: TraceEventKind.Instruction, pc: number, loc: ILoc, kind: TraceInstructionKind } | { type: TraceEventKind.Effect, effect: EventEffect }; -/** - * Kind of a location in the trace. - */ -export enum TraceLocKind { - Local = 'Local' - // TODO: other location types -} - -/** - * Location in the trace. - */ -export type TraceLocation = - | { type: TraceLocKind.Local, frameId: number, localIndex: number }; - -/** - * Kind of a value in the trace. - */ -export enum TraceValKind { - Runtime = 'RuntimeValue' - // TODO: other value types -} - -/** - * Value in the trace. - */ -export type TraceValue = - | { type: TraceValKind.Runtime, value: RuntimeValueType }; - /** * Kind of an effect of an instruction. */ @@ -227,7 +230,7 @@ export enum TraceEffectKind { * Effect of an instruction. */ export type EventEffect = - | { type: TraceEffectKind.Write, location: TraceLocation, value: TraceValue }; + | { type: TraceEffectKind.Write, loc: IRuntimeVariableLoc, value: RuntimeValueType }; /** * Execution trace consisting of a sequence of trace events. @@ -241,6 +244,30 @@ interface ITrace { * the last variable access). */ localLifetimeEnds: Map; + + /** + * Maps file path to the lines of code present in the trace instructions + * in functions defined in the file. + */ + tracedLines: Map>; +} + +/** + * Information about the frame being currently processsed used during trace generation. + */ +interface ITraceGenFrameInfo { + /** + * Frame ID. + */ + ID: number; + /** + * PC locations traced in the frame + */ + pcLocs: ILoc[]; + /** + * Path to a file containing function represented by the frame. + */ + filePath: string; } /** @@ -248,8 +275,13 @@ interface ITrace { * * @param traceFilePath path to the trace JSON file. * @returns execution trace. + * @throws Error with a descriptive error message if reading trace has failed. */ -export function readTrace(traceFilePath: string): ITrace { +export function readTrace( + traceFilePath: string, + sourceMapsMap: Map, + filesMap: Map +): ITrace { const traceJSON: JSONTraceRootObject = JSON.parse(fs.readFileSync(traceFilePath, 'utf8')); const events: TraceEvent[] = []; // We compute the end of lifetime for a local variable as follows. @@ -272,13 +304,15 @@ export function readTrace(traceFilePath: string): ITrace { // the loop const localLifetimeEnds = new Map(); const locaLifetimeEndsMax = new Map(); - let frameIDs = []; + const tracedLines = new Map>(); + // stack of frame infos OpenFrame and popped on CloseFrame + const frameInfoStack: ITraceGenFrameInfo[] = []; for (const event of traceJSON.events) { if (event.OpenFrame) { const localsTypes = []; const frame = event.OpenFrame.frame; for (const type of frame.locals_types) { - localsTypes.push(JSONTraceTypeToString(type.type_)); + localsTypes.push(JSONTraceTypeToString(type.type_, type.ref_type)); } // process parameters - store their values in trace and set their // initial lifetimes @@ -287,39 +321,85 @@ export function readTrace(traceFilePath: string): ITrace { for (let i = 0; i < frame.parameters.length; i++) { const value = frame.parameters[i]; if (value) { - const runtimeValue: TraceValue = - { - type: TraceValKind.Runtime, - value: traceValueFromJSON(value.RuntimeValue.value) - }; + const runtimeValue: RuntimeValueType = 'RuntimeValue' in value + ? traceRuntimeValueFromJSON(value.RuntimeValue.value) + : traceRefValueFromJSON(value); + paramValues.push(runtimeValue); lifetimeEnds[i] = FRAME_LIFETIME; } } localLifetimeEnds.set(frame.frame_id, lifetimeEnds); + const modInfo = { + addr: frame.module.address, + name: frame.module.name + }; + const sourceMap = sourceMapsMap.get(JSON.stringify(modInfo)); + if (!sourceMap) { + throw new Error('Source map for module ' + + modInfo.name + + ' in package ' + + modInfo.addr + + ' not found'); + } + const funEntry = sourceMap.functions.get(frame.function_name); + if (!funEntry) { + throw new Error('Cannot find function entry in source map for function: ' + + frame.function_name); + } events.push({ type: TraceEventKind.OpenFrame, id: frame.frame_id, name: frame.function_name, - modInfo: { - addr: frame.module.address, - name: frame.module.name - }, + fileHash: sourceMap.fileHash, + isNative: frame.is_native, localsTypes, + localsNames: funEntry.localsNames, paramValues, + optimizedLines: sourceMap.optimizedLines + }); + const currentFile = filesMap.get(sourceMap.fileHash); + + if (!currentFile) { + throw new Error(`Cannot find file with hash: ${sourceMap.fileHash}`); + } + frameInfoStack.push({ + ID: frame.frame_id, + pcLocs: funEntry.pcLocs, + filePath: currentFile.path }); - frameIDs.push(frame.frame_id); } else if (event.CloseFrame) { events.push({ type: TraceEventKind.CloseFrame, id: event.CloseFrame.frame_id }); - frameIDs.pop(); + frameInfoStack.pop(); } else if (event.Instruction) { const name = event.Instruction.instruction; + const frameInfo = frameInfoStack[frameInfoStack.length - 1]; + const fid = frameInfo.ID; + const pcLocs = frameInfo.pcLocs; + // if map does not contain an entry for a PC that can be found in the trace file, + // it means that the position of the last PC in the source map should be used + let loc = event.Instruction.pc >= pcLocs.length + ? pcLocs[pcLocs.length - 1] + : pcLocs[event.Instruction.pc]; + + if (!loc) { + throw new Error('Cannot find location for PC: ' + + event.Instruction.pc + + ' in frame: ' + + fid); + } + + const filePath = frameInfo.filePath; + const lines = tracedLines.get(filePath) || new Set(); + lines.add(loc.line); + tracedLines.set(filePath, lines); events.push({ type: TraceEventKind.Instruction, pc: event.Instruction.pc, + loc, kind: name in TraceInstructionKind ? TraceInstructionKind[name as keyof typeof TraceInstructionKind] : TraceInstructionKind.UNKNOWN @@ -327,7 +407,7 @@ export function readTrace(traceFilePath: string): ITrace { // Set end of lifetime for all locals to the max instruction PC ever seen // for a given local (if they are live after this instructions, they will // be reset to INFINITE_LIFETIME when processing subsequent effects). - const currentFrameID = frameIDs[frameIDs.length - 1]; + const currentFrameID = frameInfoStack[frameInfoStack.length - 1].ID; const lifetimeEnds = localLifetimeEnds.get(currentFrameID) || []; const lifetimeEndsMax = locaLifetimeEndsMax.get(currentFrameID) || []; for (let i = 0; i < lifetimeEnds.length; i++) { @@ -349,53 +429,50 @@ export function readTrace(traceFilePath: string): ITrace { // if a local is read or written, set its end of lifetime // to infinite (end of frame) const location = effect.Write ? effect.Write.location : effect.Read!.location; - // there must be at least one frame on the stack when processing a write effect - // so we can safely access the last frame ID - const currentFrameID = frameIDs[frameIDs.length - 1]; - const localIndex = processJSONLocation(location, localLifetimeEnds, currentFrameID); - if (localIndex === undefined) { - continue; - } + const loc = processJSONLocalLocation(location, localLifetimeEnds); if (effect.Write) { - const value = traceValueFromJSON(effect.Write.root_value_after_write.RuntimeValue.value); - const traceValue: TraceValue = { - type: TraceValKind.Runtime, - value - }; - const traceLocation: TraceLocation = { - type: TraceLocKind.Local, - frameId: currentFrameID, - localIndex - }; + if (!loc) { + throw new Error('Unsupported location type in Write effect'); + } + // process a write only if the location is supported + const value = 'RuntimeValue' in effect.Write.root_value_after_write + ? traceRuntimeValueFromJSON(effect.Write.root_value_after_write.RuntimeValue.value) + : traceRefValueFromJSON(effect.Write.root_value_after_write); events.push({ type: TraceEventKind.Effect, effect: { type: TraceEffectKind.Write, - location: traceLocation, - value: traceValue + loc, + value } }); } } } } - return { events, localLifetimeEnds }; + return { events, localLifetimeEnds, tracedLines }; } /** * Converts a JSON trace type to a string representation. */ -function JSONTraceTypeToString(type: JSONBaseType): string { - if (typeof type === 'string') { - return type; - } else if ('vector' in type) { - return `vector<${JSONTraceTypeToString(type.vector)}>`; +function JSONTraceTypeToString(baseType: JSONBaseType, refType?: JSONTraceRefType): string { + const refPrefix = refType === JSONTraceRefType.Mut + ? '&mut ' + : (refType === JSONTraceRefType.Imm + ? '&' + : ''); + if (typeof baseType === 'string') { + return refPrefix + baseType; + } else if ('vector' in baseType) { + return refPrefix + `vector<${JSONTraceTypeToString(baseType.vector)}>`; } else { - return JSONTraceAddressToHexString(type.struct.address) + return refPrefix + + JSONTraceAddressToHexString(baseType.struct.address) + "::" - + type.struct.module + + baseType.struct.module + "::" - + type.struct.name; + + baseType.struct.name; } } @@ -415,45 +492,83 @@ function JSONTraceAddressToHexString(address: string): string { } } -/// Processes a location in a JSON trace (sets the end of lifetime for a local variable) -/// and returns the local index if the location is a local variable in the current frame. -function processJSONLocation( - location: JSONTraceLocation, - localLifetimeEnds: Map, - currentFrameID: number -): number | undefined { - // TODO: handle Global and Indexed for other frames - if ('Local' in location) { - const frameId = location.Local[0]; - const localIndex = location.Local[1]; - const lifetimeEnds = localLifetimeEnds.get(frameId) || []; - lifetimeEnds[localIndex] = FRAME_LIFETIME; - localLifetimeEnds.set(frameId, lifetimeEnds); - return localIndex; - } else if ('Indexed' in location) { - const frameId = location.Indexed[0].Local[0]; - if (frameId === currentFrameID) { - const localIndex = location.Indexed[0].Local[1]; - const lifetimeEnds = localLifetimeEnds.get(frameId) || []; +/** + * Processes a location of a local variable in a JSON trace: sets the end of its lifetime + * when requested and returns its location + * @param traceLocation location in the trace. + * @param localLifetimeEnds map of local variable lifetimes (defined if local variable + * lifetime should happen). + * @returns variable location. + */ +function processJSONLocalLocation( + traceLocation: JSONTraceLocation, + localLifetimeEnds?: Map, +): IRuntimeVariableLoc | undefined { + if ('Local' in traceLocation) { + const frameID = traceLocation.Local[0]; + const localIndex = traceLocation.Local[1]; + if (localLifetimeEnds) { + const lifetimeEnds = localLifetimeEnds.get(frameID) || []; lifetimeEnds[localIndex] = FRAME_LIFETIME; - localLifetimeEnds.set(frameId, lifetimeEnds); - return localIndex; + localLifetimeEnds.set(frameID, lifetimeEnds); + } + return { frameID, localIndex }; + } else if ('Indexed' in traceLocation) { + return processJSONLocalLocation(traceLocation.Indexed[0], localLifetimeEnds); + } else { + // Currently, there is nothing that needs to be done for 'Global' locations, + // neither with respect to lifetime nor with respect to location itself. + // This is because `Global` locations currently only represent read-only + // refererence values returned from native functions. If there ever was + // a native functino that would return a mutable reference, we should + // consider how to handle value changes via such reference, but it's unlikely + // that such a function would ever be added to either Move stdlib or + // the Sui framework. + return undefined; + } +} + +/** + * Converts a JSON trace reference value to a runtime value. + * + * @param value JSON trace reference value. + * @returns runtime value. + * @throws Error with a descriptive error message if conversion has failed. + */ +function traceRefValueFromJSON(value: JSONTraceRefValue): RuntimeValueType { + if ('MutRef' in value) { + const loc = processJSONLocalLocation(value.MutRef.location); + if (!loc) { + throw new Error('Unsupported location type in MutRef'); + } + const ret: IRuntimeRefValue = { mutable: true, loc }; + return ret; + } else { + const loc = processJSONLocalLocation(value.ImmRef.location); + if (!loc) { + throw new Error('Unsupported location type in ImmRef'); } + const ret: IRuntimeRefValue = { mutable: false, loc }; + return ret; } - return undefined; } -/// Converts a JSON trace value to a runtime trace value. -function traceValueFromJSON(value: JSONTraceValueType): RuntimeValueType { +/** + * Converts a JSON trace runtime value to a runtime trace value. + * + * @param value JSON trace runtime value. + * @returns runtime trace value. + */ +function traceRuntimeValueFromJSON(value: JSONTraceRuntimeValueType): RuntimeValueType { if (typeof value === 'boolean' || typeof value === 'number' || typeof value === 'string') { return String(value); } else if (Array.isArray(value)) { - return value.map(item => traceValueFromJSON(item)); + return value.map(item => traceRuntimeValueFromJSON(item)); } else { const fields: [string, RuntimeValueType][] = - Object.entries(value.fields).map(([key, value]) => [key, traceValueFromJSON(value)]); + Object.entries(value.fields).map(([key, value]) => [key, traceRuntimeValueFromJSON(value)]); const compoundValue: IRuntimeCompundValue = { fields, type: value.type, diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/.gitignore b/external-crates/move/crates/move-analyzer/trace-adapter/tests/.gitignore new file mode 100644 index 0000000000000..4946de71472b6 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/.gitignore @@ -0,0 +1,6 @@ +**/dependency/* +**/dependencies/* +*.mvsm +*.yaml +*~ +!**/build/ diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/Move.toml b/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/Move.toml new file mode 100644 index 0000000000000..2402f9f6a7a6a --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "breakpoints_line" +edition = "2024.beta" + +[dependencies] +MoveStdlib = { local = "../../../../move-stdlib" } + +[addresses] +breakpoints_line = "0x0" +std = "0x1" diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/build/breakpoints_line/bytecode_modules/m.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/build/breakpoints_line/bytecode_modules/m.mv new file mode 100644 index 0000000000000..85130144db6d7 Binary files /dev/null and b/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/build/breakpoints_line/bytecode_modules/m.mv differ diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/build/breakpoints_line/source_maps/m.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/build/breakpoints_line/source_maps/m.json new file mode 100644 index 0000000000000..621b2955ba6dc --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/build/breakpoints_line/source_maps/m.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":218,"end":219},"module_name":["0000000000000000000000000000000000000000000000000000000000000000","m"],"struct_map":{},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":226,"end":229},"type_parameters":[],"parameters":[["p#0#0",{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":230,"end":231}]],"returns":[{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":239,"end":242}],"locals":[["%#1",{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":263,"end":322}],["res#1#0",{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":257,"end":260}]],"nops":{},"code_map":{"0":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":267,"end":268},"1":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":271,"end":272},"2":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":269,"end":270},"3":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":263,"end":322},"4":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":284,"end":285},"5":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":288,"end":289},"6":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":286,"end":287},"7":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":263,"end":322},"9":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":311,"end":312},"10":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":315,"end":316},"11":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":313,"end":314},"12":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":263,"end":322},"14":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":253,"end":260},"15":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":336,"end":339},"16":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":342,"end":344},"17":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":340,"end":341},"18":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":329,"end":376},"20":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":362,"end":365},"21":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":368,"end":369},"22":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":366,"end":367},"23":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":356,"end":359},"24":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":329,"end":376},"25":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":388,"end":391},"26":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":394,"end":395},"27":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":392,"end":393},"28":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":382,"end":385},"29":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":408,"end":411},"30":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":414,"end":416},"31":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":412,"end":413},"32":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":401,"end":448},"34":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":434,"end":437},"35":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":440,"end":441},"36":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":438,"end":439},"37":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":428,"end":431},"38":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":401,"end":448},"39":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":454,"end":457}},"is_native":false},"1":{"definition_location":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":473,"end":477},"type_parameters":[],"parameters":[],"returns":[],"locals":[["_res#1#0",{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":494,"end":498}]],"nops":{},"code_map":{"0":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":505,"end":506},"1":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":501,"end":507},"2":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":490,"end":498},"3":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":520,"end":524},"4":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":531,"end":535},"5":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":527,"end":536},"6":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":525,"end":526},"7":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":513,"end":517},"8":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":549,"end":553},"9":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":560,"end":564},"10":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":556,"end":565},"11":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":554,"end":555},"12":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":542,"end":546},"13":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":565,"end":566}},"is_native":false},"2":{"definition_location":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":193,"end":627},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[91,218,138,99,176,49,253,88,125,5,135,72,97,64,129,189,114,229,240,149,48,50,5,204,208,67,12,41,142,185,32,225],"start":193,"end":627}},"is_native":false}},"constant_map":{}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/build/breakpoints_line/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/build/breakpoints_line/sources/m.move new file mode 100644 index 0000000000000..d7a70bc448f21 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/build/breakpoints_line/sources/m.move @@ -0,0 +1,30 @@ +// Test line breakpoints: +// - setting valid and invalid breakpoints +// - break at a breakpoint in the callee +// - break at a breakpoint after the loop +// - break at the breakpoint in the loop +module breakpoints_line::m; + +fun foo(p: u64): u64 { + let mut res = if (p < 1) { + p + p + } else { + p + 1 + }; + + while (res < 10) { + res = res + 1; + }; + res = res + p; + while (res < 16) { + res = res + 1; + }; + res +} + +#[test] +fun test() { + let mut _res = foo(1); + _res = _res + foo(_res); + _res = _res + foo(_res); // to force another unoptimized read to keep `res` visible +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/sources/m.move new file mode 100644 index 0000000000000..d7a70bc448f21 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/sources/m.move @@ -0,0 +1,30 @@ +// Test line breakpoints: +// - setting valid and invalid breakpoints +// - break at a breakpoint in the callee +// - break at a breakpoint after the loop +// - break at the breakpoint in the loop +module breakpoints_line::m; + +fun foo(p: u64): u64 { + let mut res = if (p < 1) { + p + p + } else { + p + 1 + }; + + while (res < 10) { + res = res + 1; + }; + res = res + p; + while (res < 16) { + res = res + 1; + }; + res +} + +#[test] +fun test() { + let mut _res = foo(1); + _res = _res + foo(_res); + _res = _res + foo(_res); // to force another unoptimized read to keep `res` visible +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/test.exp b/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/test.exp new file mode 100644 index 0000000000000..b9ff142a58d84 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/test.exp @@ -0,0 +1,63 @@ +current frame stack: + function: test (line 27) + scope 0 : +line breakpoints + m.move + 12 + 18 + 20 +current frame stack: + function: test (line 27) + scope 0 : + function: foo (line 12) + scope 0 : + p : 1 + type: u64 + +line breakpoints + m.move + 12 + 18 + 20 +current frame stack: + function: test (line 27) + scope 0 : + function: foo (line 18) + scope 0 : + p : 1 + type: u64 + + res : 10 + type: u64 + +line breakpoints + m.move + 12 + 18 + 20 +current frame stack: + function: test (line 27) + scope 0 : + function: foo (line 20) + scope 0 : + res : 11 + type: u64 + +line breakpoints + m.move + 12 + 18 + 20 +current frame stack: + function: test (line 27) + scope 0 : + function: foo (line 20) + scope 0 : + res : 12 + type: u64 + +line breakpoints + m.move + 12 + 18 + 20 diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/trace.spec.js b/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/trace.spec.js new file mode 100644 index 0000000000000..80b5d47fb64d3 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/trace.spec.js @@ -0,0 +1,27 @@ +const path = require('path'); +let action = (runtime) => { + const filePath = path.join(__dirname, 'sources', `m.move`); + let res = ''; + runtime.setLineBreakpoints(filePath, [ + 10, // invalid (in if branch not traced) + 12, // valid (in traced if branch) + 14, // invalid (empty line) + 18, // valid (past loop) + 20 // valid (in loop) + ]); + res += runtime.toString(); + // advance to the caller + runtime.continue(); + res += runtime.toString(); + // advance beyond the loop + runtime.continue(); + res += runtime.toString(); + // advance into the loop + runtime.continue(); + res += runtime.toString(); + // advance into the loop again + runtime.continue(); + res += runtime.toString(); + return res; +}; +run_spec(__dirname, action); diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/traces/breakpoints_line__m__test.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/traces/breakpoints_line__m__test.json new file mode 100644 index 0000000000000..7527c40759090 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/breakpoints_line/traces/breakpoints_line__m__test.json @@ -0,0 +1 @@ +{"version":1,"events":[{"OpenFrame":{"frame":{"frame_id":0,"function_name":"test","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":1,"type_instantiation":[],"parameters":[],"return_types":[],"locals_types":[{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999997,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999997,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":4,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":1}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999997}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999978,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":1}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999975,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999972,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Push":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999971,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999953,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":1}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999950,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999947,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Push":{"RuntimeValue":{"value":2}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999946,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":2}}}},{"Effect":{"Write":{"location":{"Local":[4,1]},"root_value_after_write":{"RuntimeValue":{"value":2}}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999928,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,1]},"root_value_read":{"RuntimeValue":{"value":2}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":2}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999999927,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":2}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":2}}}}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999909,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":2}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":2}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999906,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999903,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":2}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999902,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999901,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999883,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":2}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":2}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999880,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999877,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":2}}}},{"Effect":{"Push":{"RuntimeValue":{"value":3}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999876,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":3}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":3}}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999875,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999857,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":3}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":3}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999854,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999851,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":3}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999850,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999849,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999831,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":3}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":3}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999828,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999825,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":3}}}},{"Effect":{"Push":{"RuntimeValue":{"value":4}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999824,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":4}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":4}}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999823,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999805,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":4}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":4}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999802,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999799,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":4}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999798,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999797,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999779,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":4}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":4}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999776,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999773,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":4}}}},{"Effect":{"Push":{"RuntimeValue":{"value":5}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999772,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":5}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":5}}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999771,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999753,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":5}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":5}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999750,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999747,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":5}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999746,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999745,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999727,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":5}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":5}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999724,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999721,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":5}}}},{"Effect":{"Push":{"RuntimeValue":{"value":6}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999720,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":6}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":6}}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999719,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999701,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":6}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":6}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999698,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999695,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":6}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999694,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999693,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999675,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":6}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":6}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999672,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999669,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":6}}}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999668,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":7}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":7}}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999667,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999649,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":7}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999646,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999643,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":7}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999642,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999641,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999623,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":7}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999620,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999617,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":7}}}},{"Effect":{"Push":{"RuntimeValue":{"value":8}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999616,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":8}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":8}}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999615,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999597,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":8}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":8}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999594,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999591,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":8}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999590,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999589,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999571,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":8}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":8}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999568,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999565,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":8}}}},{"Effect":{"Push":{"RuntimeValue":{"value":9}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999564,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":9}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":9}}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999563,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999545,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":9}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":9}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999542,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999539,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":9}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999538,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999537,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999519,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":9}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":9}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999516,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999513,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":9}}}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999512,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":10}}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999511,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999493,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":10}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999490,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999487,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Push":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999486,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":25,"gas_left":999999468,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":10}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":26,"gas_left":999999450,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":1}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":27,"gas_left":999999447,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Push":{"RuntimeValue":{"value":11}}}},{"Instruction":{"type_parameters":[],"pc":28,"gas_left":999999446,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":11}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":11}}}}},{"Instruction":{"type_parameters":[],"pc":29,"gas_left":999999428,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":11}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":11}}}},{"Instruction":{"type_parameters":[],"pc":30,"gas_left":999999425,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":31,"gas_left":999999422,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":11}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":32,"gas_left":999999421,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":33,"gas_left":999999420,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":34,"gas_left":999999402,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":11}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":11}}}},{"Instruction":{"type_parameters":[],"pc":35,"gas_left":999999399,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":36,"gas_left":999999396,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":11}}}},{"Effect":{"Push":{"RuntimeValue":{"value":12}}}},{"Instruction":{"type_parameters":[],"pc":37,"gas_left":999999395,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":12}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":12}}}}},{"Instruction":{"type_parameters":[],"pc":38,"gas_left":999999394,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":29,"gas_left":999999376,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":12}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":12}}}},{"Instruction":{"type_parameters":[],"pc":30,"gas_left":999999373,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":31,"gas_left":999999370,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":12}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":32,"gas_left":999999369,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":33,"gas_left":999999368,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":34,"gas_left":999999350,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":12}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":12}}}},{"Instruction":{"type_parameters":[],"pc":35,"gas_left":999999347,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":36,"gas_left":999999344,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":12}}}},{"Effect":{"Push":{"RuntimeValue":{"value":13}}}},{"Instruction":{"type_parameters":[],"pc":37,"gas_left":999999343,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":13}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":13}}}}},{"Instruction":{"type_parameters":[],"pc":38,"gas_left":999999342,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":29,"gas_left":999999324,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":13}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":13}}}},{"Instruction":{"type_parameters":[],"pc":30,"gas_left":999999321,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":31,"gas_left":999999318,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":13}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":32,"gas_left":999999317,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":33,"gas_left":999999316,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":34,"gas_left":999999298,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":13}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":13}}}},{"Instruction":{"type_parameters":[],"pc":35,"gas_left":999999295,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":36,"gas_left":999999292,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":13}}}},{"Effect":{"Push":{"RuntimeValue":{"value":14}}}},{"Instruction":{"type_parameters":[],"pc":37,"gas_left":999999291,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":14}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":14}}}}},{"Instruction":{"type_parameters":[],"pc":38,"gas_left":999999290,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":29,"gas_left":999999272,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":14}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":14}}}},{"Instruction":{"type_parameters":[],"pc":30,"gas_left":999999269,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":31,"gas_left":999999266,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":14}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":32,"gas_left":999999265,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":33,"gas_left":999999264,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":34,"gas_left":999999246,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":14}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":14}}}},{"Instruction":{"type_parameters":[],"pc":35,"gas_left":999999243,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":36,"gas_left":999999240,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":14}}}},{"Effect":{"Push":{"RuntimeValue":{"value":15}}}},{"Instruction":{"type_parameters":[],"pc":37,"gas_left":999999239,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":15}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":15}}}}},{"Instruction":{"type_parameters":[],"pc":38,"gas_left":999999238,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":29,"gas_left":999999220,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":15}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":15}}}},{"Instruction":{"type_parameters":[],"pc":30,"gas_left":999999217,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":31,"gas_left":999999214,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":15}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":32,"gas_left":999999213,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":33,"gas_left":999999212,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":34,"gas_left":999999194,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":15}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":15}}}},{"Instruction":{"type_parameters":[],"pc":35,"gas_left":999999191,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":36,"gas_left":999999188,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":15}}}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":37,"gas_left":999999187,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":16}}}}},{"Instruction":{"type_parameters":[],"pc":38,"gas_left":999999186,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":29,"gas_left":999999168,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":16}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":30,"gas_left":999999165,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":31,"gas_left":999999162,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Push":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":32,"gas_left":999999161,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":39,"gas_left":999999143,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":16}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":40,"gas_left":999999142,"instruction":"RET"}},{"CloseFrame":{"frame_id":4,"return_":[{"RuntimeValue":{"value":16}}],"gas_left":999999142}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999141,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Write":{"location":{"Local":[0,0]},"root_value_after_write":{"RuntimeValue":{"value":16}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999123,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":16}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999105,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":16}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999105,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":409,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":16}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999105}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999086,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[409,0]},"root_value_read":{"RuntimeValue":{"value":16}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999083,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999080,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Push":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999079,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999061,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[409,0]},"root_value_read":{"RuntimeValue":{"value":16}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999058,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999055,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Push":{"RuntimeValue":{"value":17}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999054,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":17}}}},{"Effect":{"Write":{"location":{"Local":[409,1]},"root_value_after_write":{"RuntimeValue":{"value":17}}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999036,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[409,1]},"root_value_read":{"RuntimeValue":{"value":17}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":17}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999999035,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":17}}}},{"Effect":{"Write":{"location":{"Local":[409,2]},"root_value_after_write":{"RuntimeValue":{"value":17}}}}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999017,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[409,2]},"root_value_read":{"RuntimeValue":{"value":17}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":17}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999014,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999011,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":17}}}},{"Effect":{"Push":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999010,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":25,"gas_left":999998992,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[409,2]},"root_value_read":{"RuntimeValue":{"value":17}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":17}}}},{"Instruction":{"type_parameters":[],"pc":26,"gas_left":999998974,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[409,0]},"root_value_read":{"RuntimeValue":{"value":16}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":27,"gas_left":999998971,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":17}}}},{"Effect":{"Push":{"RuntimeValue":{"value":33}}}},{"Instruction":{"type_parameters":[],"pc":28,"gas_left":999998970,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":33}}}},{"Effect":{"Write":{"location":{"Local":[409,2]},"root_value_after_write":{"RuntimeValue":{"value":33}}}}},{"Instruction":{"type_parameters":[],"pc":29,"gas_left":999998952,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[409,2]},"root_value_read":{"RuntimeValue":{"value":33}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":33}}}},{"Instruction":{"type_parameters":[],"pc":30,"gas_left":999998949,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":31,"gas_left":999998946,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":33}}}},{"Effect":{"Push":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":32,"gas_left":999998945,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":39,"gas_left":999998927,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[409,2]},"root_value_read":{"RuntimeValue":{"value":33}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":33}}}},{"Instruction":{"type_parameters":[],"pc":40,"gas_left":999998926,"instruction":"RET"}},{"CloseFrame":{"frame_id":409,"return_":[{"RuntimeValue":{"value":33}}],"gas_left":999998926}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999998923,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":33}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Push":{"RuntimeValue":{"value":49}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999998922,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":49}}}},{"Effect":{"Write":{"location":{"Local":[0,0]},"root_value_after_write":{"RuntimeValue":{"value":49}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999998904,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":49}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":49}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999998886,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":49}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":49}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999998886,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":493,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":49}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999998886}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999998867,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[493,0]},"root_value_read":{"RuntimeValue":{"value":49}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":49}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999998864,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999998861,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":49}}}},{"Effect":{"Push":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999998860,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999998842,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[493,0]},"root_value_read":{"RuntimeValue":{"value":49}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":49}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999998839,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999998836,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":49}}}},{"Effect":{"Push":{"RuntimeValue":{"value":50}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999998835,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":50}}}},{"Effect":{"Write":{"location":{"Local":[493,1]},"root_value_after_write":{"RuntimeValue":{"value":50}}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999998817,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[493,1]},"root_value_read":{"RuntimeValue":{"value":50}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":50}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999998816,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":50}}}},{"Effect":{"Write":{"location":{"Local":[493,2]},"root_value_after_write":{"RuntimeValue":{"value":50}}}}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999998798,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[493,2]},"root_value_read":{"RuntimeValue":{"value":50}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":50}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999998795,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":10}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999998792,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":10}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":50}}}},{"Effect":{"Push":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999998791,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":25,"gas_left":999998773,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[493,2]},"root_value_read":{"RuntimeValue":{"value":50}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":50}}}},{"Instruction":{"type_parameters":[],"pc":26,"gas_left":999998755,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[493,0]},"root_value_read":{"RuntimeValue":{"value":49}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":49}}}},{"Instruction":{"type_parameters":[],"pc":27,"gas_left":999998752,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":49}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":50}}}},{"Effect":{"Push":{"RuntimeValue":{"value":99}}}},{"Instruction":{"type_parameters":[],"pc":28,"gas_left":999998751,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":99}}}},{"Effect":{"Write":{"location":{"Local":[493,2]},"root_value_after_write":{"RuntimeValue":{"value":99}}}}},{"Instruction":{"type_parameters":[],"pc":29,"gas_left":999998733,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[493,2]},"root_value_read":{"RuntimeValue":{"value":99}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":99}}}},{"Instruction":{"type_parameters":[],"pc":30,"gas_left":999998730,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":16}}}},{"Instruction":{"type_parameters":[],"pc":31,"gas_left":999998727,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":16}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":99}}}},{"Effect":{"Push":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":32,"gas_left":999998726,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":false}}}},{"Instruction":{"type_parameters":[],"pc":39,"gas_left":999998708,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[493,2]},"root_value_read":{"RuntimeValue":{"value":99}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":99}}}},{"Instruction":{"type_parameters":[],"pc":40,"gas_left":999998707,"instruction":"RET"}},{"CloseFrame":{"frame_id":493,"return_":[{"RuntimeValue":{"value":99}}],"gas_left":999998707}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999998704,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":99}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":49}}}},{"Effect":{"Push":{"RuntimeValue":{"value":148}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999998703,"instruction":"POP"}},{"Effect":{"Pop":{"RuntimeValue":{"value":148}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999998702,"instruction":"RET"}},{"CloseFrame":{"frame_id":0,"return_":[],"gas_left":999998702}}]} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/Move.toml b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/Move.toml new file mode 100644 index 0000000000000..3ae5f158e61ac --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "compound" +edition = "2024.beta" + +[dependencies] +MoveStdlib = { local = "../../../../move-stdlib" } + +[addresses] +compound = "0x0" +std = "0x1" diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/bytecode_modules/dependencies/MoveStdlib/vector.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/bytecode_modules/dependencies/MoveStdlib/vector.mv new file mode 100644 index 0000000000000..79c6d2eb99157 Binary files /dev/null and b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/bytecode_modules/dependencies/MoveStdlib/vector.mv differ diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/bytecode_modules/m.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/bytecode_modules/m.mv new file mode 100644 index 0000000000000..ed59f3dde6851 Binary files /dev/null and b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/bytecode_modules/m.mv differ diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/source_maps/dependencies/MoveStdlib/vector.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/source_maps/dependencies/MoveStdlib/vector.json new file mode 100644 index 0000000000000..126416a07eb7e --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/source_maps/dependencies/MoveStdlib/vector.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":261,"end":267},"module_name":["0000000000000000000000000000000000000000000000000000000000000001","vector"],"struct_map":{},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1202,"end":1207},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1208,"end":1215}]],"parameters":[],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1220,"end":1235}],"locals":[],"nops":{},"code_map":{},"is_native":true},"1":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1329,"end":1335},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1336,"end":1343}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1345,"end":1346}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1367,"end":1370}],"locals":[],"nops":{},"code_map":{},"is_native":true},"2":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1563,"end":1569},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1570,"end":1577}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1579,"end":1580}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1600,"end":1601}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1609,"end":1617}],"locals":[],"nops":{},"code_map":{},"is_native":true},"3":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1724,"end":1733},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1734,"end":1741}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1743,"end":1744}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1768,"end":1769}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"4":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1968,"end":1978},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1979,"end":1986}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1988,"end":1989}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2013,"end":2014}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2022,"end":2034}],"locals":[],"nops":{},"code_map":{},"is_native":true},"5":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2170,"end":2178},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2179,"end":2186}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2188,"end":2189}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2214,"end":2221}],"locals":[],"nops":{},"code_map":{},"is_native":true},"6":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2342,"end":2355},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2356,"end":2363}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2365,"end":2366}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"7":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2561,"end":2565},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2566,"end":2573}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2575,"end":2576}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2600,"end":2601}],["j#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2608,"end":2609}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"8":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2694,"end":2703},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2704,"end":2711}]],"parameters":[["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2713,"end":2714}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2726,"end":2741}],"locals":[["v#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2760,"end":2761}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2764,"end":2771},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2756,"end":2761},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2781,"end":2782},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2793,"end":2794},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2781,"end":2795},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2805,"end":2806}},"is_native":false},"9":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2900,"end":2907},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2908,"end":2915}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2917,"end":2918}]],"returns":[],"locals":[["back_index#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3057,"end":3067}],["front_index#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3024,"end":3035}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2956,"end":2959}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2962,"end":2963},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2962,"end":2972},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2956,"end":2959},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2986,"end":2989},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2993,"end":2994},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2990,"end":2992},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2982,"end":3005},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2996,"end":3005},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3038,"end":3039},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3020,"end":3035},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3070,"end":3073},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3076,"end":3077},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3074,"end":3075},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3053,"end":3067},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3094,"end":3105},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3108,"end":3118},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3106,"end":3107},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3087,"end":3260},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3134,"end":3135},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3141,"end":3152},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3154,"end":3164},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3134,"end":3165},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3193,"end":3204},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3207,"end":3208},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3205,"end":3206},"28":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3179,"end":3190},"29":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3235,"end":3245},"30":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3248,"end":3249},"31":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3246,"end":3247},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3222,"end":3232},"33":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3087,"end":3260}},"is_native":false},"10":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3363,"end":3369},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3370,"end":3377}]],"parameters":[["lhs#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3379,"end":3382}],["other#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3410,"end":3415}]],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3444,"end":3449},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3444,"end":3459},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3477,"end":3482},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3477,"end":3493},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3476,"end":3477},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3469,"end":3526},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3495,"end":3498},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3509,"end":3514},"9":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3509,"end":3525},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3495,"end":3526},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3469,"end":3526},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3536,"end":3557},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3536,"end":3541},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3536,"end":3557},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3557,"end":3558}},"is_native":false},"11":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3660,"end":3668},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3669,"end":3676}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3678,"end":3679}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3700,"end":3704}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3715,"end":3716},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3715,"end":3725},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3729,"end":3730},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3726,"end":3728},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3715,"end":3730}},"is_native":false},"12":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3836,"end":3844},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3845,"end":3852}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3854,"end":3855}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3875,"end":3876}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3889,"end":3893}],"locals":[["i#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3912,"end":3913}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3931,"end":3934}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3916,"end":3917},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3908,"end":3913},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3937,"end":3938},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3937,"end":3947},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3931,"end":3934},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3964,"end":3965},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3968,"end":3971},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3966,"end":3967},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3957,"end":4048},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3992,"end":3993},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3994,"end":3995},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3991,"end":3996},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4000,"end":4001},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3997,"end":3999},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3987,"end":4014},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4003,"end":4014},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4010,"end":4014},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4003,"end":4014},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4032,"end":4033},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4036,"end":4037},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4034,"end":4035},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4028,"end":4029},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3957,"end":4048},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4058,"end":4063}},"is_native":false},"13":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4196,"end":4204},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4205,"end":4212}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4214,"end":4215}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4235,"end":4236}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4250,"end":4254},{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4256,"end":4259}],"locals":[["i#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4279,"end":4280}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4298,"end":4301}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4283,"end":4284},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4275,"end":4280},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4304,"end":4305},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4304,"end":4314},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4298,"end":4301},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4331,"end":4332},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4335,"end":4338},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4333,"end":4334},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4324,"end":4420},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4359,"end":4360},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4361,"end":4362},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4358,"end":4363},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4367,"end":4368},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4364,"end":4366},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4354,"end":4386},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4370,"end":4386},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4378,"end":4382},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4384,"end":4385},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4370,"end":4386},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4404,"end":4405},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4408,"end":4409},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4406,"end":4407},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4400,"end":4401},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4324,"end":4420},"28":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4430,"end":4440},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4431,"end":4436},"33":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4438,"end":4439},"34":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4430,"end":4440}},"is_native":false},"14":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4660,"end":4666},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4667,"end":4674}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4676,"end":4677}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4705,"end":4706}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4714,"end":4721}],"locals":[["%#1",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4897,"end":4898}],["%#2",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4891}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4740,"end":4743}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4746,"end":4747},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4746,"end":4756},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4736,"end":4743},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4804,"end":4805},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4809,"end":4812},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4806,"end":4808},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4800,"end":4840},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4814,"end":4840},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4820,"end":4840},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4814,"end":4840},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4857,"end":4860},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4863,"end":4864},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4861,"end":4862},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4851,"end":4854},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4881,"end":4882},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4885,"end":4888},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4883,"end":4884},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4874,"end":4917},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4891},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4897,"end":4898},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4906,"end":4907},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4910,"end":4911},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4908,"end":4909},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4902,"end":4903},"28":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4891},"29":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4897,"end":4898},"30":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4913,"end":4914},"31":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4917},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4874,"end":4917},"33":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4927,"end":4928},"34":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4927,"end":4939}},"is_native":false},"15":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5290,"end":5296},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5297,"end":5304}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5306,"end":5307}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5331,"end":5332}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5347,"end":5348}]],"returns":[],"locals":[["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5369,"end":5372}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5375,"end":5376},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5375,"end":5385},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5369,"end":5372},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5426,"end":5427},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5430,"end":5433},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5428,"end":5429},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5422,"end":5461},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5435,"end":5461},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5441,"end":5461},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5435,"end":5461},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5472,"end":5473},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5484,"end":5485},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5472,"end":5486},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5503,"end":5504},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5507,"end":5510},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5505,"end":5506},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5496,"end":5573},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5526,"end":5527},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5533,"end":5534},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5536,"end":5539},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5526,"end":5540},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5558,"end":5559},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5562,"end":5563},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5560,"end":5561},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5554,"end":5555},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5496,"end":5573}},"is_native":false},"16":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5812,"end":5823},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5824,"end":5831}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5833,"end":5834}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5858,"end":5859}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5867,"end":5874}],"locals":[["last_idx#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5943,"end":5951}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5894,"end":5895},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5894,"end":5906},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5893,"end":5894},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5885,"end":5929},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5908,"end":5928},"9":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5885,"end":5929},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5954,"end":5955},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5954,"end":5964},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5967,"end":5968},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5965,"end":5966},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5943,"end":5951},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5978,"end":5979},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5985,"end":5986},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5988,"end":5996},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5978,"end":5997},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6007,"end":6008},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6007,"end":6019}},"is_native":false},"17":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":249,"end":13981},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":249,"end":13981}},"is_native":false}},"constant_map":{"EINDEX_OUT_OF_BOUNDS":0}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/source_maps/m.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/source_maps/m.json new file mode 100644 index 0000000000000..2b19308a79b3b --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/source_maps/m.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":98,"end":99},"module_name":["0000000000000000000000000000000000000000000000000000000000000000","m"],"struct_map":{"0":{"definition_location":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":231,"end":241},"type_parameters":[],"fields":[{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":257,"end":269},{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":280,"end":290},{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":306,"end":324},{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":340,"end":356},{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":375,"end":391}]},"1":{"definition_location":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":432,"end":444},"type_parameters":[],"fields":[{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":466,"end":471}]}},"enum_map":{"0":{"definition_location":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":114,"end":122},"type_parameters":[],"variants":[[["PositionalVariant",{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":138,"end":165}],[{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":156,"end":159},{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":161,"end":164}]],[["NamedVariant",{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":171,"end":212}],[{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":186,"end":192},{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":199,"end":205}]]]}},"function_map":{"0":{"definition_location":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":485,"end":488},"type_parameters":[],"parameters":[["some_struct#0#0",{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":493,"end":504}],["p#0#0",{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":518,"end":519}]],"returns":[{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":527,"end":537}],"locals":[["named_variant#1#0",{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":605,"end":618}],["pos_variant#1#0",{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":548,"end":559}],["v#1#0",{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":699,"end":700}],["v_struct#1#0",{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":733,"end":741}]],"nops":{},"code_map":{"0":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":590,"end":591},"1":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":593,"end":594},"2":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":562,"end":595},"3":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":548,"end":559},"4":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":662,"end":663},"5":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":681,"end":682},"6":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":621,"end":689},"7":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":605,"end":618},"8":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":721,"end":722},"9":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":703,"end":723},"10":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":699,"end":700},"11":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":784,"end":785},"12":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":762,"end":787},"13":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":744,"end":788},"14":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":733,"end":741},"15":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":822,"end":823},"16":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":795,"end":819},"18":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":795,"end":823},"19":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":854,"end":865},"20":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":829,"end":851},"22":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":829,"end":865},"23":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":904,"end":917},"24":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":871,"end":901},"26":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":871,"end":917},"27":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":954,"end":955},"28":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":923,"end":951},"30":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":923,"end":955},"31":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":992,"end":1000},"32":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":961,"end":989},"34":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":961,"end":1000},"35":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1007,"end":1018}},"is_native":false},"1":{"definition_location":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1026,"end":1037},"type_parameters":[],"parameters":[],"returns":[{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1041,"end":1051}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1093,"end":1094},"1":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1144,"end":1145},"2":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1147,"end":1148},"3":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1116,"end":1149},"4":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1207,"end":1208},"5":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1210,"end":1211},"6":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1179,"end":1212},"7":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1258,"end":1259},"8":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1240,"end":1260},"9":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1328,"end":1329},"10":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1306,"end":1331},"11":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1288,"end":1332},"12":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1058,"end":1339}},"is_native":false},"2":{"definition_location":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1355,"end":1359},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1386,"end":1399},"1":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1422,"end":1424},"2":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1405,"end":1425},"4":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":1425,"end":1426}},"is_native":false},"3":{"definition_location":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":81,"end":1428},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[123,47,2,242,89,17,168,119,38,143,101,6,62,112,40,165,19,85,26,58,91,184,149,65,204,94,41,244,153,66,177,132],"start":81,"end":1428}},"is_native":false}},"constant_map":{}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/sources/dependencies/MoveStdlib/vector.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/sources/dependencies/MoveStdlib/vector.move new file mode 100644 index 0000000000000..55c1abac34b74 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/sources/dependencies/MoveStdlib/vector.move @@ -0,0 +1,364 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +#[defines_primitive(vector)] +/// A variable-sized container that can hold any type. Indexing is 0-based, and +/// vectors are growable. This module has many native functions. +module std::vector { + /// Allows calling `.to_string()` on a vector of `u8` to get a utf8 `String`. + public use fun std::string::utf8 as vector.to_string; + + /// Allows calling `.try_to_string()` on a vector of `u8` to get a utf8 `String`. + /// This will return `None` if the vector is not valid utf8. + public use fun std::string::try_utf8 as vector.try_to_string; + + /// Allows calling `.to_ascii_string()` on a vector of `u8` to get an `ascii::String`. + public use fun std::ascii::string as vector.to_ascii_string; + + /// Allows calling `.try_to_ascii_string()` on a vector of `u8` to get an + /// `ascii::String`. This will return `None` if the vector is not valid ascii. + public use fun std::ascii::try_string as vector.try_to_ascii_string; + + /// The index into the vector is out of bounds + const EINDEX_OUT_OF_BOUNDS: u64 = 0x20000; + + #[bytecode_instruction] + /// Create an empty vector. + public native fun empty(): vector; + + #[bytecode_instruction] + /// Return the length of the vector. + public native fun length(v: &vector): u64; + + #[syntax(index)] + #[bytecode_instruction] + /// Acquire an immutable reference to the `i`th element of the vector `v`. + /// Aborts if `i` is out of bounds. + public native fun borrow(v: &vector, i: u64): ∈ + + #[bytecode_instruction] + /// Add element `e` to the end of the vector `v`. + public native fun push_back(v: &mut vector, e: Element); + + #[syntax(index)] + #[bytecode_instruction] + /// Return a mutable reference to the `i`th element in the vector `v`. + /// Aborts if `i` is out of bounds. + public native fun borrow_mut(v: &mut vector, i: u64): &mut Element; + + #[bytecode_instruction] + /// Pop an element from the end of vector `v`. + /// Aborts if `v` is empty. + public native fun pop_back(v: &mut vector): Element; + + #[bytecode_instruction] + /// Destroy the vector `v`. + /// Aborts if `v` is not empty. + public native fun destroy_empty(v: vector); + + #[bytecode_instruction] + /// Swaps the elements at the `i`th and `j`th indices in the vector `v`. + /// Aborts if `i` or `j` is out of bounds. + public native fun swap(v: &mut vector, i: u64, j: u64); + + /// Return an vector of size one containing element `e`. + public fun singleton(e: Element): vector { + let mut v = empty(); + v.push_back(e); + v + } + + /// Reverses the order of the elements in the vector `v` in place. + public fun reverse(v: &mut vector) { + let len = v.length(); + if (len == 0) return (); + + let mut front_index = 0; + let mut back_index = len - 1; + while (front_index < back_index) { + v.swap(front_index, back_index); + front_index = front_index + 1; + back_index = back_index - 1; + } + } + + /// Pushes all of the elements of the `other` vector into the `lhs` vector. + public fun append(lhs: &mut vector, mut other: vector) { + other.reverse(); + while (!other.is_empty()) lhs.push_back(other.pop_back()); + other.destroy_empty(); + } + + /// Return `true` if the vector `v` has no elements and `false` otherwise. + public fun is_empty(v: &vector): bool { + v.length() == 0 + } + + /// Return true if `e` is in the vector `v`. + /// Otherwise, returns false. + public fun contains(v: &vector, e: &Element): bool { + let mut i = 0; + let len = v.length(); + while (i < len) { + if (&v[i] == e) return true; + i = i + 1; + }; + false + } + + /// Return `(true, i)` if `e` is in the vector `v` at index `i`. + /// Otherwise, returns `(false, 0)`. + public fun index_of(v: &vector, e: &Element): (bool, u64) { + let mut i = 0; + let len = v.length(); + while (i < len) { + if (&v[i] == e) return (true, i); + i = i + 1; + }; + (false, 0) + } + + /// Remove the `i`th element of the vector `v`, shifting all subsequent elements. + /// This is O(n) and preserves ordering of elements in the vector. + /// Aborts if `i` is out of bounds. + public fun remove(v: &mut vector, mut i: u64): Element { + let mut len = v.length(); + // i out of bounds; abort + if (i >= len) abort EINDEX_OUT_OF_BOUNDS; + + len = len - 1; + while (i < len) v.swap(i, { i = i + 1; i }); + v.pop_back() + } + + /// Insert `e` at position `i` in the vector `v`. + /// If `i` is in bounds, this shifts the old `v[i]` and all subsequent elements to the right. + /// If `i == v.length()`, this adds `e` to the end of the vector. + /// This is O(n) and preserves ordering of elements in the vector. + /// Aborts if `i > v.length()` + public fun insert(v: &mut vector, e: Element, mut i: u64) { + let len = v.length(); + // i too big abort + if (i > len) abort EINDEX_OUT_OF_BOUNDS; + + v.push_back(e); + while (i < len) { + v.swap(i, len); + i = i + 1 + } + } + + /// Swap the `i`th element of the vector `v` with the last element and then pop the vector. + /// This is O(1), but does not preserve ordering of elements in the vector. + /// Aborts if `i` is out of bounds. + public fun swap_remove(v: &mut vector, i: u64): Element { + assert!(!v.is_empty(), EINDEX_OUT_OF_BOUNDS); + let last_idx = v.length() - 1; + v.swap(i, last_idx); + v.pop_back() + } + + // === Macros === + + /// Create a vector of length `n` by calling the function `f` on each index. + public macro fun tabulate<$T>($n: u64, $f: |u64| -> $T): vector<$T> { + let mut v = vector[]; + let n = $n; + n.do!(|i| v.push_back($f(i))); + v + } + + /// Destroy the vector `v` by calling `f` on each element and then destroying the vector. + /// Does not preserve the order of elements in the vector (starts from the end of the vector). + public macro fun destroy<$T>($v: vector<$T>, $f: |$T|) { + let mut v = $v; + while (!v.is_empty()) $f(v.pop_back()); + v.destroy_empty(); + } + + /// Destroy the vector `v` by calling `f` on each element and then destroying the vector. + /// Preserves the order of elements in the vector. + public macro fun do<$T>($v: vector<$T>, $f: |$T|) { + let mut v = $v; + v.reverse(); + while (!v.is_empty()) $f(v.pop_back()); + v.destroy_empty(); + } + + /// Perform an action `f` on each element of the vector `v`. The vector is not modified. + public macro fun do_ref<$T>($v: &vector<$T>, $f: |&$T|) { + let v = $v; + v.length().do!(|i| $f(&v[i])) + } + + /// Perform an action `f` on each element of the vector `v`. + /// The function `f` takes a mutable reference to the element. + public macro fun do_mut<$T>($v: &mut vector<$T>, $f: |&mut $T|) { + let v = $v; + v.length().do!(|i| $f(&mut v[i])) + } + + /// Map the vector `v` to a new vector by applying the function `f` to each element. + /// Preserves the order of elements in the vector, first is called first. + public macro fun map<$T, $U>($v: vector<$T>, $f: |$T| -> $U): vector<$U> { + let v = $v; + let mut r = vector[]; + v.do!(|e| r.push_back($f(e))); + r + } + + /// Map the vector `v` to a new vector by applying the function `f` to each element. + /// Preserves the order of elements in the vector, first is called first. + public macro fun map_ref<$T, $U>($v: &vector<$T>, $f: |&$T| -> $U): vector<$U> { + let v = $v; + let mut r = vector[]; + v.do_ref!(|e| r.push_back($f(e))); + r + } + + /// Filter the vector `v` by applying the function `f` to each element. + /// Return a new vector containing only the elements for which `f` returns `true`. + public macro fun filter<$T: drop>($v: vector<$T>, $f: |&$T| -> bool): vector<$T> { + let v = $v; + let mut r = vector[]; + v.do!(|e| if ($f(&e)) r.push_back(e)); + r + } + + /// Split the vector `v` into two vectors by applying the function `f` to each element. + /// Return a tuple containing two vectors: the first containing the elements for which `f` returns `true`, + /// and the second containing the elements for which `f` returns `false`. + public macro fun partition<$T>($v: vector<$T>, $f: |&$T| -> bool): (vector<$T>, vector<$T>) { + let v = $v; + let mut r1 = vector[]; + let mut r2 = vector[]; + v.do!(|e| if ($f(&e)) r1.push_back(e) else r2.push_back(e)); + (r1, r2) + } + + /// Finds the index of first element in the vector `v` that satisfies the predicate `f`. + /// Returns `some(index)` if such an element is found, otherwise `none()`. + public macro fun find_index<$T>($v: &vector<$T>, $f: |&$T| -> bool): Option { + let v = $v; + 'find_index: { + v.length().do!(|i| if ($f(&v[i])) return 'find_index option::some(i)); + option::none() + } + } + + /// Count how many elements in the vector `v` satisfy the predicate `f`. + public macro fun count<$T>($v: &vector<$T>, $f: |&$T| -> bool): u64 { + let v = $v; + let mut count = 0; + v.do_ref!(|e| if ($f(e)) count = count + 1); + count + } + + /// Reduce the vector `v` to a single value by applying the function `f` to each element. + /// Similar to `fold_left` in Rust and `reduce` in Python and JavaScript. + public macro fun fold<$T, $Acc>($v: vector<$T>, $init: $Acc, $f: |$Acc, $T| -> $Acc): $Acc { + let v = $v; + let mut acc = $init; + v.do!(|e| acc = $f(acc, e)); + acc + } + + /// Whether any element in the vector `v` satisfies the predicate `f`. + /// If the vector is empty, returns `false`. + public macro fun any<$T>($v: &vector<$T>, $f: |&$T| -> bool): bool { + let v = $v; + 'any: { + v.do_ref!(|e| if ($f(e)) return 'any true); + false + } + } + + /// Whether all elements in the vector `v` satisfy the predicate `f`. + /// If the vector is empty, returns `true`. + public macro fun all<$T>($v: &vector<$T>, $f: |&$T| -> bool): bool { + let v = $v; + 'all: { + v.do_ref!(|e| if (!$f(e)) return 'all false); + true + } + } + + /// Destroys two vectors `v1` and `v2` by calling `f` to each pair of elements. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do<$T1, $T2>($v1: vector<$T1>, $v2: vector<$T2>, $f: |$T1, $T2|) { + let v1 = $v1; + let mut v2 = $v2; + v2.reverse(); + let len = v1.length(); + assert!(len == v2.length()); + v1.do!(|el1| $f(el1, v2.pop_back())); + } + + /// Destroys two vectors `v1` and `v2` by calling `f` to each pair of elements. + /// Aborts if the vectors are not of the same length. + /// Starts from the end of the vectors. + public macro fun zip_do_reverse<$T1, $T2>($v1: vector<$T1>, $v2: vector<$T2>, $f: |$T1, $T2|) { + let v1 = $v1; + let mut v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + v1.destroy!(|el1| $f(el1, v2.pop_back())); + } + + /// Iterate through `v1` and `v2` and apply the function `f` to references of each pair of + /// elements. The vectors are not modified. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do_ref<$T1, $T2>($v1: &vector<$T1>, $v2: &vector<$T2>, $f: |&$T1, &$T2|) { + let v1 = $v1; + let v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + len.do!(|i| $f(&v1[i], &v2[i])); + } + + /// Iterate through `v1` and `v2` and apply the function `f` to mutable references of each pair + /// of elements. The vectors may be modified. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do_mut<$T1, $T2>( + $v1: &mut vector<$T1>, + $v2: &mut vector<$T2>, + $f: |&mut $T1, &mut $T2|, + ) { + let v1 = $v1; + let v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + len.do!(|i| $f(&mut v1[i], &mut v2[i])); + } + + /// Destroys two vectors `v1` and `v2` by applying the function `f` to each pair of elements. + /// The returned values are collected into a new vector. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_map<$T1, $T2, $U>( + $v1: vector<$T1>, + $v2: vector<$T2>, + $f: |$T1, $T2| -> $U, + ): vector<$U> { + let mut r = vector[]; + zip_do!($v1, $v2, |el1, el2| r.push_back($f(el1, el2))); + r + } + + /// Iterate through `v1` and `v2` and apply the function `f` to references of each pair of + /// elements. The returned values are collected into a new vector. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_map_ref<$T1, $T2, $U>( + $v1: &vector<$T1>, + $v2: &vector<$T2>, + $f: |&$T1, &$T2| -> $U, + ): vector<$U> { + let mut r = vector[]; + zip_do_ref!($v1, $v2, |el1, el2| r.push_back($f(el1, el2))); + r + } +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/sources/m.move new file mode 100644 index 0000000000000..193501e8267d0 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/build/compound/sources/m.move @@ -0,0 +1,54 @@ +// Test tracking values of compound type variables +// (structs, enums, vectors). +module compound::m; + +public enum SomeEnum has drop { + PositionalVariant(u64, u64), + NamedVariant { field1: u64, field2: u64 }, +} + +public struct SomeStruct has drop { + simple_field: u64, + enum_field: SomeEnum, + another_enum_field: SomeEnum, + vec_simple_field: vector, + vec_struct_field: vector, +} + +public struct SimpleStruct has drop, copy { + field: u64, +} + +fun foo(mut some_struct: SomeStruct, p: u64): SomeStruct { + let pos_variant = SomeEnum::PositionalVariant(p, p); + let named_variant = SomeEnum::NamedVariant { + field1: p, + field2: p, + }; + let v = vector::singleton(p); + let v_struct = vector::singleton(SimpleStruct { field: p }); + + some_struct.simple_field = p; + some_struct.enum_field = pos_variant; + some_struct.another_enum_field = named_variant; + some_struct.vec_simple_field = v; + some_struct.vec_struct_field = v_struct; + + some_struct +} + +fun some_struct(): SomeStruct { + SomeStruct { + simple_field: 0, + enum_field: SomeEnum::PositionalVariant(0, 0), + another_enum_field: SomeEnum::PositionalVariant(0, 0), + vec_simple_field: vector::singleton(0), + vec_struct_field: vector::singleton(SimpleStruct { field: 0 }), + } +} + +#[test] +fun test() { + let some_struct = some_struct(); + foo(some_struct, 42); +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/sources/m.move new file mode 100644 index 0000000000000..193501e8267d0 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/sources/m.move @@ -0,0 +1,54 @@ +// Test tracking values of compound type variables +// (structs, enums, vectors). +module compound::m; + +public enum SomeEnum has drop { + PositionalVariant(u64, u64), + NamedVariant { field1: u64, field2: u64 }, +} + +public struct SomeStruct has drop { + simple_field: u64, + enum_field: SomeEnum, + another_enum_field: SomeEnum, + vec_simple_field: vector, + vec_struct_field: vector, +} + +public struct SimpleStruct has drop, copy { + field: u64, +} + +fun foo(mut some_struct: SomeStruct, p: u64): SomeStruct { + let pos_variant = SomeEnum::PositionalVariant(p, p); + let named_variant = SomeEnum::NamedVariant { + field1: p, + field2: p, + }; + let v = vector::singleton(p); + let v_struct = vector::singleton(SimpleStruct { field: p }); + + some_struct.simple_field = p; + some_struct.enum_field = pos_variant; + some_struct.another_enum_field = named_variant; + some_struct.vec_simple_field = v; + some_struct.vec_struct_field = v_struct; + + some_struct +} + +fun some_struct(): SomeStruct { + SomeStruct { + simple_field: 0, + enum_field: SomeEnum::PositionalVariant(0, 0), + another_enum_field: SomeEnum::PositionalVariant(0, 0), + vec_simple_field: vector::singleton(0), + vec_struct_field: vector::singleton(SimpleStruct { field: 0 }), + } +} + +#[test] +fun test() { + let some_struct = some_struct(); + foo(some_struct, 42); +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/test.exp b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/test.exp new file mode 100644 index 0000000000000..f04773e0e1867 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/test.exp @@ -0,0 +1,55 @@ +current frame stack: + function: test (line 53) + scope 0 : + function: foo (line 23) + scope 0 : + some_struct : (0x0::m::SomeStruct) { + simple_field : 0 + enum_field : (0x0::m::SomeEnum::PositionalVariant) { + pos0 : 0 + pos1 : 0 + } + another_enum_field : (0x0::m::SomeEnum::PositionalVariant) { + pos0 : 0 + pos1 : 0 + } + vec_simple_field : [ + 0 : 0 + ] + vec_struct_field : [ + 0 : (0x0::m::SimpleStruct) { + field : 0 + } + ] + } + type: 0x0::m::SomeStruct + + p : 42 + type: u64 + +current frame stack: + function: test (line 53) + scope 0 : + function: foo (line 37) + scope 0 : + some_struct : (0x0::m::SomeStruct) { + simple_field : 42 + enum_field : (0x0::m::SomeEnum::PositionalVariant) { + pos0 : 42 + pos1 : 42 + } + another_enum_field : (0x0::m::SomeEnum::NamedVariant) { + field1 : 42 + field2 : 42 + } + vec_simple_field : [ + 0 : 42 + ] + vec_struct_field : [ + 0 : (0x0::m::SimpleStruct) { + field : 42 + } + ] + } + type: 0x0::m::SomeStruct + diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/trace.spec.js b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/trace.spec.js new file mode 100644 index 0000000000000..9837778eaf084 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/trace.spec.js @@ -0,0 +1,23 @@ +let action = (runtime) => { + let res = ''; + // step over a function creating a complex struct + runtime.step(true); + // step into a function + runtime.step(false); + res += runtime.toString(); + // advance until all struct fields are updated + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + res += runtime.toString(); + return res; +}; +run_spec(__dirname, action); diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/traces/compound__m__test.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/traces/compound__m__test.json new file mode 100644 index 0000000000000..1b4e077a6155d --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/compound/traces/compound__m__test.json @@ -0,0 +1 @@ +{"version":1,"events":[{"OpenFrame":{"frame":{"frame_id":0,"function_name":"test","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":2,"type_instantiation":[],"parameters":[],"return_types":[],"locals_types":[],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":1000000000,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":2,"function_name":"some_struct","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":1,"type_instantiation":[],"parameters":[],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeStruct","type_args":[]}},"ref_type":null}],"locals_types":[],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999996,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999993,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999990,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999986,"instruction":"PACK_VARIANT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999983,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999980,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999976,"instruction":"PACK_VARIANT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}}}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999973,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999973,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":24,"function_name":"singleton","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"vector"},"binary_member_index":8,"type_instantiation":["u64"],"parameters":[{"RuntimeValue":{"value":0}}],"return_types":[{"type_":{"vector":"u64"},"ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null},{"type_":{"vector":"u64"},"ref_type":null}],"is_native":false},"gas_left":999999973}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999962,"instruction":"VEC_PACK"}},{"Effect":{"Push":{"RuntimeValue":{"value":[]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999961,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[]}}}},{"Effect":{"Write":{"location":{"Local":[24,1]},"root_value_after_write":{"RuntimeValue":{"value":[]}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999951,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[24,1]},"root_value_read":{"RuntimeValue":{"value":[]}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[24,1]},"snapshot":[]}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999933,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[24,0]},"root_value_read":{"RuntimeValue":{"value":0}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999932,"instruction":"VEC_PUSH_BACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[24,1]},"snapshot":[]}}}},{"Effect":{"Write":{"location":{"Local":[24,1]},"root_value_after_write":{"RuntimeValue":{"value":[0]}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999922,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[24,1]},"root_value_read":{"RuntimeValue":{"value":[0]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[0]}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999921,"instruction":"RET"}},{"CloseFrame":{"frame_id":24,"return_":[{"RuntimeValue":{"value":[0]}}],"gas_left":999999921}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999918,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999914,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":0}}}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999914,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":51,"function_name":"singleton","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"vector"},"binary_member_index":8,"type_instantiation":[{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SimpleStruct","type_args":[]}}],"parameters":[{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":0}}}}],"return_types":[{"type_":{"vector":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SimpleStruct","type_args":[]}}},"ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SimpleStruct","type_args":[]}},"ref_type":null},{"type_":{"vector":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SimpleStruct","type_args":[]}}},"ref_type":null}],"is_native":false},"gas_left":999999914}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999903,"instruction":"VEC_PACK"}},{"Effect":{"Push":{"RuntimeValue":{"value":[]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999902,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[]}}}},{"Effect":{"Write":{"location":{"Local":[51,1]},"root_value_after_write":{"RuntimeValue":{"value":[]}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999892,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[51,1]},"root_value_read":{"RuntimeValue":{"value":[]}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[51,1]},"snapshot":[]}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999872,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[51,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":0}}}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":0}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999871,"instruction":"VEC_PUSH_BACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":0}}}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[51,1]},"snapshot":[]}}}},{"Effect":{"Write":{"location":{"Local":[51,1]},"root_value_after_write":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999849,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[51,1]},"root_value_read":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999848,"instruction":"RET"}},{"CloseFrame":{"frame_id":51,"return_":[{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}],"gas_left":999999848}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999844,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":[0]}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":0,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999843,"instruction":"RET"}},{"CloseFrame":{"frame_id":2,"return_":[{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":0,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}],"gas_left":999999843}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999840,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999840,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":84,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":0,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}},{"RuntimeValue":{"value":42}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeStruct","type_args":[]}},"ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeStruct","type_args":[]}},"ref_type":null},{"type_":"u64","ref_type":null},{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeEnum","type_args":[]}},"ref_type":null},{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeEnum","type_args":[]}},"ref_type":null},{"type_":{"vector":"u64"},"ref_type":null},{"type_":{"vector":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SimpleStruct","type_args":[]}}},"ref_type":null}],"is_native":false},"gas_left":999999840}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999821,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,1]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999803,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,1]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999799,"instruction":"PACK_VARIANT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999798,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}}}}}},{"Effect":{"Write":{"location":{"Local":[84,3]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999780,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,1]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999762,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,1]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999758,"instruction":"PACK_VARIANT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}}}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999757,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}}}}}},{"Effect":{"Write":{"location":{"Local":[84,2]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}}}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999739,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,1]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999739,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":115,"function_name":"singleton","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"vector"},"binary_member_index":8,"type_instantiation":["u64"],"parameters":[{"RuntimeValue":{"value":42}}],"return_types":[{"type_":{"vector":"u64"},"ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null},{"type_":{"vector":"u64"},"ref_type":null}],"is_native":false},"gas_left":999999739}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999728,"instruction":"VEC_PACK"}},{"Effect":{"Push":{"RuntimeValue":{"value":[]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999727,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[]}}}},{"Effect":{"Write":{"location":{"Local":[115,1]},"root_value_after_write":{"RuntimeValue":{"value":[]}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999717,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[115,1]},"root_value_read":{"RuntimeValue":{"value":[]}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[115,1]},"snapshot":[]}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999699,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[115,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999698,"instruction":"VEC_PUSH_BACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[115,1]},"snapshot":[]}}}},{"Effect":{"Write":{"location":{"Local":[115,1]},"root_value_after_write":{"RuntimeValue":{"value":[42]}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999688,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[115,1]},"root_value_read":{"RuntimeValue":{"value":[42]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[42]}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999687,"instruction":"RET"}},{"CloseFrame":{"frame_id":115,"return_":[{"RuntimeValue":{"value":[42]}}],"gas_left":999999687}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999686,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[42]}}}},{"Effect":{"Write":{"location":{"Local":[84,4]},"root_value_after_write":{"RuntimeValue":{"value":[42]}}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999668,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,1]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999664,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":42}}}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999664,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":146,"function_name":"singleton","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"vector"},"binary_member_index":8,"type_instantiation":[{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SimpleStruct","type_args":[]}}],"parameters":[{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":42}}}}],"return_types":[{"type_":{"vector":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SimpleStruct","type_args":[]}}},"ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SimpleStruct","type_args":[]}},"ref_type":null},{"type_":{"vector":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SimpleStruct","type_args":[]}}},"ref_type":null}],"is_native":false},"gas_left":999999664}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999653,"instruction":"VEC_PACK"}},{"Effect":{"Push":{"RuntimeValue":{"value":[]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999652,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[]}}}},{"Effect":{"Write":{"location":{"Local":[146,1]},"root_value_after_write":{"RuntimeValue":{"value":[]}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999642,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[146,1]},"root_value_read":{"RuntimeValue":{"value":[]}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[146,1]},"snapshot":[]}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999622,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[146,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":42}}}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":42}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999621,"instruction":"VEC_PUSH_BACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":42}}}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[146,1]},"snapshot":[]}}}},{"Effect":{"Write":{"location":{"Local":[146,1]},"root_value_after_write":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999599,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[146,1]},"root_value_read":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999598,"instruction":"RET"}},{"CloseFrame":{"frame_id":146,"return_":[{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}],"gas_left":999999598}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999999597,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}}},{"Effect":{"Write":{"location":{"Local":[84,5]},"root_value_after_write":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}}}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999579,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,1]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999569,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":0,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[84,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":0,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999559,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[84,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":0,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[84,0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":0,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999541,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[84,0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":0,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Local":[84,0]},0]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999505,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,3]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}}}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}}}}}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999495,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[84,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999485,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[84,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[84,0]},1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999449,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[84,0]},1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}}}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Local":[84,0]},1]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999413,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,2]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}}}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}}}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999403,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[84,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":25,"gas_left":999999393,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[84,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[84,0]},2]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":26,"gas_left":999999357,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[84,0]},2]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":0,"pos1":0}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}}}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Local":[84,0]},2]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}}},{"Instruction":{"type_parameters":[],"pc":27,"gas_left":999999347,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,4]},"root_value_read":{"RuntimeValue":{"value":[42]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[42]}}}},{"Instruction":{"type_parameters":[],"pc":28,"gas_left":999999337,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[84,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":29,"gas_left":999999327,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[84,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[84,0]},3]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":30,"gas_left":999999317,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[84,0]},3]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[0],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":[42]}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Local":[84,0]},3]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[42],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}}},{"Instruction":{"type_parameters":[],"pc":31,"gas_left":999999295,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,5]},"root_value_read":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}}},{"Instruction":{"type_parameters":[],"pc":32,"gas_left":999999285,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[42],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[84,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[42],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":33,"gas_left":999999275,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[84,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[42],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[84,0]},4]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[42],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":34,"gas_left":999999253,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[84,0]},4]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[42],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":0}}]}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Local":[84,0]},4]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[42],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}}}}}},{"Instruction":{"type_parameters":[],"pc":35,"gas_left":999999137,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[84,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[42],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[42],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":36,"gas_left":999999136,"instruction":"RET"}},{"CloseFrame":{"frame_id":84,"return_":[{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[42],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}}}],"gas_left":999999136}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999135,"instruction":"POP"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"simple_field":42,"enum_field":{"type":"0x0::m::SomeEnum","variant_name":"PositionalVariant","variant_tag":0,"fields":{"pos0":42,"pos1":42}},"another_enum_field":{"type":"0x0::m::SomeEnum","variant_name":"NamedVariant","variant_tag":1,"fields":{"field1":42,"field2":42}},"vec_simple_field":[42],"vec_struct_field":[{"type":"0x0::m::SimpleStruct","fields":{"field":42}}]}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999134,"instruction":"RET"}},{"CloseFrame":{"frame_id":0,"return_":[],"gas_left":999999134}}]} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/Move.toml b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/Move.toml new file mode 100644 index 0000000000000..f8a896e2210ea --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "global_loc" +edition = "2024.beta" + +[dependencies] +Sui = { git = "https://github.com/MystenLabs/sui.git", subdir = "crates/sui-framework/packages/sui-framework", rev = "framework/mainnet" } + +[addresses] +global_loc = "0x0" +Sui = "0x2" diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/bytecode_modules/m.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/bytecode_modules/m.mv new file mode 100644 index 0000000000000..d24eb56fae5a6 Binary files /dev/null and b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/bytecode_modules/m.mv differ diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/dependencies/MoveStdlib/bcs.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/dependencies/MoveStdlib/bcs.json new file mode 100644 index 0000000000000..569ac0491d84c --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/dependencies/MoveStdlib/bcs.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[34,201,103,208,120,108,208,171,127,162,154,113,96,186,51,169,173,216,199,217,88,54,128,150,101,140,27,7,37,201,47,24],"start":395,"end":398},"module_name":["0000000000000000000000000000000000000000000000000000000000000001","bcs"],"struct_map":{},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[34,201,103,208,120,108,208,171,127,162,154,113,96,186,51,169,173,216,199,217,88,54,128,150,101,140,27,7,37,201,47,24],"start":510,"end":518},"type_parameters":[["MoveValue",{"file_hash":[34,201,103,208,120,108,208,171,127,162,154,113,96,186,51,169,173,216,199,217,88,54,128,150,101,140,27,7,37,201,47,24],"start":519,"end":528}]],"parameters":[["v#0#0",{"file_hash":[34,201,103,208,120,108,208,171,127,162,154,113,96,186,51,169,173,216,199,217,88,54,128,150,101,140,27,7,37,201,47,24],"start":530,"end":531}]],"returns":[{"file_hash":[34,201,103,208,120,108,208,171,127,162,154,113,96,186,51,169,173,216,199,217,88,54,128,150,101,140,27,7,37,201,47,24],"start":546,"end":556}],"locals":[],"nops":{},"code_map":{},"is_native":true},"1":{"definition_location":{"file_hash":[34,201,103,208,120,108,208,171,127,162,154,113,96,186,51,169,173,216,199,217,88,54,128,150,101,140,27,7,37,201,47,24],"start":383,"end":557},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[34,201,103,208,120,108,208,171,127,162,154,113,96,186,51,169,173,216,199,217,88,54,128,150,101,140,27,7,37,201,47,24],"start":383,"end":557}},"is_native":false}},"constant_map":{}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/dependencies/Sui/object.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/dependencies/Sui/object.json new file mode 100644 index 0000000000000..34fa8e3bae45c --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/dependencies/Sui/object.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":114,"end":120},"module_name":["0000000000000000000000000000000000000000000000000000000000000002","object"],"struct_map":{"0":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":1986,"end":1988},"type_parameters":[],"fields":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":2345,"end":2350}]},"1":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":2882,"end":2885},"type_parameters":[],"fields":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":2902,"end":2904}]}},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":2971,"end":2982},"type_parameters":[],"parameters":[["id#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":2983,"end":2985}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":2993,"end":3003}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3025,"end":3027},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3024,"end":3033},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3010,"end":3034}},"is_native":false},"1":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3096,"end":3109},"type_parameters":[],"parameters":[["id#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3110,"end":3112}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3120,"end":3127}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3134,"end":3136},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3134,"end":3142}},"is_native":false},"2":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3190,"end":3203},"type_parameters":[],"parameters":[["bytes#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3204,"end":3209}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3224,"end":3226}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3253,"end":3258},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3233,"end":3259},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3233,"end":3267}},"is_native":false},"3":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3316,"end":3331},"type_parameters":[],"parameters":[["bytes#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3332,"end":3337}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3349,"end":3351}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3363,"end":3368},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3358,"end":3370}},"is_native":false},"4":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3539,"end":3555},"type_parameters":[],"parameters":[["ctx#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3556,"end":3559}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3574,"end":3577}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3592,"end":3595},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3592,"end":3604},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3608,"end":3612},"3":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3605,"end":3607},"4":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3584,"end":3632},"6":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3614,"end":3631},"7":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3584,"end":3632},"8":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3668,"end":3694},"9":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3656,"end":3696},"10":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3638,"end":3703}},"is_native":false},"5":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3832,"end":3837},"type_parameters":[],"parameters":[],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3841,"end":3844}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3881,"end":3900},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3869,"end":3902},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":3851,"end":3909}},"is_native":false},"6":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4065,"end":4084},"type_parameters":[],"parameters":[],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4088,"end":4091}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4128,"end":4154},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4116,"end":4156},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4098,"end":4163}},"is_native":false},"7":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4294,"end":4310},"type_parameters":[],"parameters":[],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4314,"end":4317}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4354,"end":4367},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4342,"end":4369},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4324,"end":4376}},"is_native":false},"8":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4512,"end":4535},"type_parameters":[],"parameters":[],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4539,"end":4542}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4579,"end":4602},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4567,"end":4604},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4549,"end":4611}},"is_native":false},"9":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4752,"end":4758},"type_parameters":[],"parameters":[],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4762,"end":4765}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4802,"end":4815},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4790,"end":4817},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4772,"end":4824}},"is_native":false},"10":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4871,"end":4883},"type_parameters":[],"parameters":[["uid#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4884,"end":4887}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4896,"end":4899}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4907,"end":4910},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4906,"end":4913}},"is_native":false},"11":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4974,"end":4986},"type_parameters":[],"parameters":[["uid#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4987,"end":4990}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":4999,"end":5001}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5008,"end":5011},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5008,"end":5014}},"is_native":false},"12":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5062,"end":5074},"type_parameters":[],"parameters":[["uid#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5075,"end":5078}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5087,"end":5097}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5119,"end":5122},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5119,"end":5131},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5118,"end":5131},"3":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5104,"end":5132}},"is_native":false},"13":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5194,"end":5208},"type_parameters":[],"parameters":[["uid#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5209,"end":5212}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5221,"end":5228}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5235,"end":5238},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5235,"end":5247}},"is_native":false},"14":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5408,"end":5411},"type_parameters":[],"parameters":[["ctx#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5412,"end":5415}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5434,"end":5437}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5474,"end":5477},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5474,"end":5500},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5462,"end":5502},"3":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5444,"end":5509}},"is_native":false},"15":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5860,"end":5866},"type_parameters":[],"parameters":[["id#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5867,"end":5869}]],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5913,"end":5915},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5886,"end":5910},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5896,"end":5908},"3":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5921,"end":5939}},"is_native":false},"16":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5991,"end":5993},"type_parameters":[["T",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":5994,"end":5995}]],"parameters":[["obj#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6002,"end":6005}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6012,"end":6014}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6032,"end":6035},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6021,"end":6036},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6021,"end":6039}},"is_native":false},"17":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6094,"end":6103},"type_parameters":[["T",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6104,"end":6105}]],"parameters":[["obj#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6112,"end":6115}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6122,"end":6125}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6144,"end":6147},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6133,"end":6148},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6132,"end":6151}},"is_native":false},"18":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6221,"end":6229},"type_parameters":[["T",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6230,"end":6231}]],"parameters":[["obj#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6238,"end":6241}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6248,"end":6258}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6291,"end":6294},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6280,"end":6295},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6279,"end":6298},"3":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6265,"end":6299}},"is_native":false},"19":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6371,"end":6381},"type_parameters":[["T",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6382,"end":6383}]],"parameters":[["obj#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6390,"end":6393}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6400,"end":6407}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6425,"end":6428},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6414,"end":6429},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6414,"end":6438}},"is_native":false},"20":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6761,"end":6771},"type_parameters":[["T",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6772,"end":6773}]],"parameters":[["obj#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6780,"end":6783}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6790,"end":6794}],"locals":[],"nops":{},"code_map":{},"is_native":true},"21":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6889,"end":6906},"type_parameters":[],"parameters":[["bytes#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6907,"end":6912}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6924,"end":6927}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6949,"end":6954},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6934,"end":6955},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6976,"end":6981},"3":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6971,"end":6983},"4":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":6961,"end":6985}},"is_native":false},"22":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":7052,"end":7063},"type_parameters":[],"parameters":[["id#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":7064,"end":7066}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"23":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":7128,"end":7142},"type_parameters":[],"parameters":[["id#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":7143,"end":7145}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"24":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":7228,"end":7240},"type_parameters":[],"parameters":[["ctx#0#0",{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":7241,"end":7244}]],"returns":[{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":7259,"end":7261}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":7280,"end":7283},"1":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":7280,"end":7308},"2":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":7268,"end":7310}},"is_native":false},"25":{"definition_location":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":102,"end":7312},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[103,169,24,233,31,232,113,209,174,110,82,17,119,136,52,139,67,161,201,191,142,219,94,29,103,80,209,55,36,30,13,204],"start":102,"end":7312}},"is_native":false}},"constant_map":{"ENotSystemAddress":6,"SUI_AUTHENTICATOR_STATE_ID":2,"SUI_BRIDGE_ID":5,"SUI_CLOCK_OBJECT_ID":1,"SUI_DENY_LIST_OBJECT_ID":4,"SUI_RANDOM_ID":3,"SUI_SYSTEM_STATE_OBJECT_ID":0}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/dependencies/Sui/tx_context.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/dependencies/Sui/tx_context.json new file mode 100644 index 0000000000000..a85ccb124ed83 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/dependencies/Sui/tx_context.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":87,"end":97},"module_name":["0000000000000000000000000000000000000000000000000000000000000002","tx_context"],"struct_map":{"0":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":705,"end":714},"type_parameters":[],"fields":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":798,"end":804},{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":859,"end":866},{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":917,"end":922},{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":977,"end":995},{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1146,"end":1157}]}},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1253,"end":1259},"type_parameters":[],"parameters":[["self#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1260,"end":1264}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1279,"end":1286}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1293,"end":1297},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1293,"end":1304}},"is_native":false},"1":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1432,"end":1438},"type_parameters":[],"parameters":[["self#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1439,"end":1443}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1458,"end":1469}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1477,"end":1481},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1476,"end":1489}},"is_native":false},"2":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1533,"end":1538},"type_parameters":[],"parameters":[["self#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1539,"end":1543}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1558,"end":1561}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1568,"end":1572},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1568,"end":1578}},"is_native":false},"3":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1662,"end":1680},"type_parameters":[],"parameters":[["self#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1681,"end":1685}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1700,"end":1703}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1710,"end":1714},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1710,"end":1733}},"is_native":false},"4":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1949,"end":1969},"type_parameters":[],"parameters":[["ctx#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1970,"end":1973}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":1992,"end":1999}],"locals":[["id#1#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2049,"end":2051}],["ids_created#1#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2010,"end":2021}]],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2024,"end":2027},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2024,"end":2039},"3":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2010,"end":2021},"4":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2066,"end":2069},"5":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2065,"end":2077},"6":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2064,"end":2077},"7":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2079,"end":2090},"8":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2054,"end":2091},"9":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2049,"end":2051},"10":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2115,"end":2126},"11":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2129,"end":2130},"12":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2127,"end":2128},"13":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2097,"end":2100},"14":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2097,"end":2112},"15":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2097,"end":2130},"16":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2136,"end":2138}},"is_native":false},"5":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2279,"end":2290},"type_parameters":[],"parameters":[["self#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2291,"end":2295}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2310,"end":2313}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2320,"end":2324},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2320,"end":2336}},"is_native":false},"6":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2423,"end":2432},"type_parameters":[],"parameters":[["tx_hash#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2433,"end":2440}],["ids_created#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2454,"end":2465}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2473,"end":2480}],"locals":[],"nops":{},"code_map":{},"is_native":true},"7":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2578,"end":2581},"type_parameters":[],"parameters":[["sender#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2587,"end":2593}],["tx_hash#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2608,"end":2615}],["epoch#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2633,"end":2638}],["epoch_timestamp_ms#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2649,"end":2667}],["ids_created#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2678,"end":2689}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2699,"end":2708}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2723,"end":2730},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2723,"end":2739},"2":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2743,"end":2757},"3":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2740,"end":2742},"4":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2715,"end":2776},"6":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2759,"end":2775},"7":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2715,"end":2776},"8":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2794,"end":2800},"9":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2802,"end":2809},"10":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2811,"end":2816},"11":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2818,"end":2836},"12":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2838,"end":2849},"13":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2782,"end":2851}},"is_native":false},"8":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2959,"end":2972},"type_parameters":[],"parameters":[["addr#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2978,"end":2982}],["hint#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":2997,"end":3001}],["epoch#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3012,"end":3017}],["epoch_timestamp_ms#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3028,"end":3046}],["ids_created#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3057,"end":3068}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3078,"end":3087}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3098,"end":3102},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3128,"end":3132},"2":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3104,"end":3133},"3":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3135,"end":3140},"4":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3142,"end":3160},"5":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3162,"end":3173},"6":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3094,"end":3174}},"is_native":false},"9":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3245,"end":3250},"type_parameters":[],"parameters":[],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3254,"end":3263}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3361,"end":3365},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3284,"end":3351},"2":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3376,"end":3377},"3":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3379,"end":3380},"4":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3382,"end":3383},"5":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3357,"end":3384}},"is_native":false},"10":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3527,"end":3550},"type_parameters":[],"parameters":[["hint#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3551,"end":3555}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3563,"end":3573}],"locals":[["tx_hash#1#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3588,"end":3595}]],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3617,"end":3622},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3598,"end":3623},"2":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3584,"end":3595},"3":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3636,"end":3643},"4":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3636,"end":3652},"5":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3655,"end":3669},"6":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3653,"end":3654},"7":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3629,"end":3691},"9":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3671,"end":3678},"10":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3689,"end":3690},"11":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3671,"end":3691},"12":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3629,"end":3691},"13":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3697,"end":3704}},"is_native":false},"11":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3732,"end":3747},"type_parameters":[],"parameters":[["self#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3748,"end":3752}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3767,"end":3770}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3789,"end":3793},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3777,"end":3794}},"is_native":false},"12":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3868,"end":3890},"type_parameters":[],"parameters":[["self#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3891,"end":3895}]],"returns":[{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3910,"end":3917}],"locals":[["ids_created#1#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3928,"end":3939}]],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3942,"end":3946},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3942,"end":3958},"3":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3928,"end":3939},"4":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3972,"end":3983},"5":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3986,"end":3987},"6":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3984,"end":3985},"7":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3964,"end":4003},"11":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3989,"end":4002},"12":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":3964,"end":4003},"13":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4021,"end":4025},"14":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4020,"end":4033},"15":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4019,"end":4033},"16":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4035,"end":4046},"17":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4049,"end":4050},"18":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4047,"end":4048},"19":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4009,"end":4051}},"is_native":false},"13":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4079,"end":4101},"type_parameters":[],"parameters":[["self#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4102,"end":4106}]],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4143,"end":4147},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4143,"end":4153},"3":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4156,"end":4157},"4":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4154,"end":4155},"5":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4130,"end":4134},"6":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4130,"end":4140},"7":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4130,"end":4157}},"is_native":false},"14":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4185,"end":4210},"type_parameters":[],"parameters":[["self#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4211,"end":4215}],["delta_ms#0#0",{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4233,"end":4241}]],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4280,"end":4284},"1":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4280,"end":4303},"3":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4306,"end":4314},"4":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4304,"end":4305},"5":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4254,"end":4258},"6":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4254,"end":4277},"7":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":4254,"end":4314}},"is_native":false},"15":{"definition_location":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":75,"end":4316},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[222,177,27,65,112,21,18,238,244,38,237,149,174,129,82,131,37,75,210,46,15,2,202,133,208,202,6,116,27,117,173,96],"start":75,"end":4316}},"is_native":false}},"constant_map":{"EBadTxHashLength":1,"ENoIDsCreated":2,"TX_HASH_LENGTH":0}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/m.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/m.json new file mode 100644 index 0000000000000..e82c0419837d9 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/source_maps/m.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":70,"end":71},"module_name":["0000000000000000000000000000000000000000000000000000000000000000","m"],"struct_map":{"0":{"definition_location":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":88,"end":98},"type_parameters":[],"fields":[{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":113,"end":115},{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":126,"end":129}]}},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":142,"end":145},"type_parameters":[],"parameters":[["o#0#0",{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":146,"end":147}],["p#0#0",{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":161,"end":162}]],"returns":[{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":169,"end":172}],"locals":[["%#1",{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":187,"end":201}],["%#2",{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":187,"end":212}],["n#1#0",{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":183,"end":184}],["num#1#0",{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":242,"end":245}]],"nops":{},"code_map":{"0":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":198,"end":200},"1":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":187,"end":201},"4":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":187,"end":212},"6":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":187,"end":215},"7":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":213,"end":214},"8":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":187,"end":215},"10":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":183,"end":184},"11":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":250,"end":251},"12":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":225,"end":247},"13":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":242,"end":245},"14":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":257,"end":275},"15":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":282,"end":283},"16":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":282,"end":290},"17":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":295,"end":298},"18":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":295,"end":305},"19":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":292,"end":293},"20":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":310,"end":311},"21":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":310,"end":318},"22":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":307,"end":308},"23":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":281,"end":319}},"is_native":false},"1":{"definition_location":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":335,"end":339},"type_parameters":[],"parameters":[],"returns":[],"locals":[["ctx#1#0",{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":356,"end":359}]],"nops":{},"code_map":{"0":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":362,"end":381},"1":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":352,"end":359},"2":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":435,"end":443},"3":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":423,"end":444},"4":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":451,"end":453},"5":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":406,"end":455},"6":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":457,"end":459},"7":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":402,"end":460},"8":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":590,"end":598},"9":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":578,"end":599},"10":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":606,"end":608},"11":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":561,"end":610},"12":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":612,"end":614},"13":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":557,"end":615},"14":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":555,"end":556},"15":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":543,"end":547},"16":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":615,"end":616}},"is_native":false},"2":{"definition_location":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":51,"end":618},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[9,0,245,148,110,8,77,217,5,163,139,61,20,43,54,140,85,157,1,225,181,146,97,220,49,193,128,189,216,45,155,5],"start":51,"end":618}},"is_native":false}},"constant_map":{}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/dependencies/MoveStdlib/bcs.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/dependencies/MoveStdlib/bcs.move new file mode 100644 index 0000000000000..7e0cec97d2a6d --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/dependencies/MoveStdlib/bcs.move @@ -0,0 +1,11 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +/// Utility for converting a Move value to its binary representation in BCS (Binary Canonical +/// Serialization). BCS is the binary encoding for Move resources and other non-module values +/// published on-chain. See https://github.com/diem/bcs#binary-canonical-serialization-bcs for more +/// details on BCS. +module std::bcs; + +/// Return the binary representation of `v` in BCS (Binary Canonical Serialization) format +public native fun to_bytes(v: &MoveValue): vector; diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/dependencies/Sui/object.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/dependencies/Sui/object.move new file mode 100644 index 0000000000000..8bc0c67c38fc8 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/dependencies/Sui/object.move @@ -0,0 +1,233 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +/// Sui object identifiers +module sui::object; + +use std::bcs; +use sui::address; + +/// Allows calling `.to_address` on an `ID` to get an `address`. +public use fun id_to_address as ID.to_address; + +/// Allows calling `.to_bytes` on an `ID` to get a `vector`. +public use fun id_to_bytes as ID.to_bytes; + +/// Allows calling `.as_inner` on a `UID` to get an `&ID`. +public use fun uid_as_inner as UID.as_inner; + +/// Allows calling `.to_inner` on a `UID` to get an `ID`. +public use fun uid_to_inner as UID.to_inner; + +/// Allows calling `.to_address` on a `UID` to get an `address`. +public use fun uid_to_address as UID.to_address; + +/// Allows calling `.to_bytes` on a `UID` to get a `vector`. +public use fun uid_to_bytes as UID.to_bytes; + +/// The hardcoded ID for the singleton Sui System State Object. +const SUI_SYSTEM_STATE_OBJECT_ID: address = @0x5; + +/// The hardcoded ID for the singleton Clock Object. +const SUI_CLOCK_OBJECT_ID: address = @0x6; + +/// The hardcoded ID for the singleton AuthenticatorState Object. +const SUI_AUTHENTICATOR_STATE_ID: address = @0x7; + +/// The hardcoded ID for the singleton Random Object. +const SUI_RANDOM_ID: address = @0x8; + +/// The hardcoded ID for the singleton DenyList. +const SUI_DENY_LIST_OBJECT_ID: address = @0x403; + +/// The hardcoded ID for the Bridge Object. +const SUI_BRIDGE_ID: address = @0x9; + +/// Sender is not @0x0 the system address. +const ENotSystemAddress: u64 = 0; + +/// An object ID. This is used to reference Sui Objects. +/// This is *not* guaranteed to be globally unique--anyone can create an `ID` from a `UID` or +/// from an object, and ID's can be freely copied and dropped. +/// Here, the values are not globally unique because there can be multiple values of type `ID` +/// with the same underlying bytes. For example, `object::id(&obj)` can be called as many times +/// as you want for a given `obj`, and each `ID` value will be identical. +public struct ID has copy, drop, store { + // We use `address` instead of `vector` here because `address` has a more + // compact serialization. `address` is serialized as a BCS fixed-length sequence, + // which saves us the length prefix we would pay for if this were `vector`. + // See https://github.com/diem/bcs#fixed-and-variable-length-sequences. + bytes: address, +} + +/// Globally unique IDs that define an object's ID in storage. Any Sui Object, that is a struct +/// with the `key` ability, must have `id: UID` as its first field. +/// These are globally unique in the sense that no two values of type `UID` are ever equal, in +/// other words for any two values `id1: UID` and `id2: UID`, `id1` != `id2`. +/// This is a privileged type that can only be derived from a `TxContext`. +/// `UID` doesn't have the `drop` ability, so deleting a `UID` requires a call to `delete`. +public struct UID has store { + id: ID, +} + +// === id === + +/// Get the raw bytes of a `ID` +public fun id_to_bytes(id: &ID): vector { + bcs::to_bytes(&id.bytes) +} + +/// Get the inner bytes of `id` as an address. +public fun id_to_address(id: &ID): address { + id.bytes +} + +/// Make an `ID` from raw bytes. +public fun id_from_bytes(bytes: vector): ID { + address::from_bytes(bytes).to_id() +} + +/// Make an `ID` from an address. +public fun id_from_address(bytes: address): ID { + ID { bytes } +} + +// === uid === + +#[allow(unused_function)] +/// Create the `UID` for the singleton `SuiSystemState` object. +/// This should only be called once from `sui_system`. +fun sui_system_state(ctx: &TxContext): UID { + assert!(ctx.sender() == @0x0, ENotSystemAddress); + UID { + id: ID { bytes: SUI_SYSTEM_STATE_OBJECT_ID }, + } +} + +/// Create the `UID` for the singleton `Clock` object. +/// This should only be called once from `clock`. +public(package) fun clock(): UID { + UID { + id: ID { bytes: SUI_CLOCK_OBJECT_ID }, + } +} + +/// Create the `UID` for the singleton `AuthenticatorState` object. +/// This should only be called once from `authenticator_state`. +public(package) fun authenticator_state(): UID { + UID { + id: ID { bytes: SUI_AUTHENTICATOR_STATE_ID }, + } +} + +/// Create the `UID` for the singleton `Random` object. +/// This should only be called once from `random`. +public(package) fun randomness_state(): UID { + UID { + id: ID { bytes: SUI_RANDOM_ID }, + } +} + +/// Create the `UID` for the singleton `DenyList` object. +/// This should only be called once from `deny_list`. +public(package) fun sui_deny_list_object_id(): UID { + UID { + id: ID { bytes: SUI_DENY_LIST_OBJECT_ID }, + } +} + +#[allow(unused_function)] +/// Create the `UID` for the singleton `Bridge` object. +/// This should only be called once from `bridge`. +fun bridge(): UID { + UID { + id: ID { bytes: SUI_BRIDGE_ID }, + } +} + +/// Get the inner `ID` of `uid` +public fun uid_as_inner(uid: &UID): &ID { + &uid.id +} + +/// Get the raw bytes of a `uid`'s inner `ID` +public fun uid_to_inner(uid: &UID): ID { + uid.id +} + +/// Get the raw bytes of a `UID` +public fun uid_to_bytes(uid: &UID): vector { + bcs::to_bytes(&uid.id.bytes) +} + +/// Get the inner bytes of `id` as an address. +public fun uid_to_address(uid: &UID): address { + uid.id.bytes +} + +// === any object === + +/// Create a new object. Returns the `UID` that must be stored in a Sui object. +/// This is the only way to create `UID`s. +public fun new(ctx: &mut TxContext): UID { + UID { + id: ID { bytes: ctx.fresh_object_address() }, + } +} + +/// Delete the object and it's `UID`. This is the only way to eliminate a `UID`. +// This exists to inform Sui of object deletions. When an object +// gets unpacked, the programmer will have to do something with its +// `UID`. The implementation of this function emits a deleted +// system event so Sui knows to process the object deletion +public fun delete(id: UID) { + let UID { id: ID { bytes } } = id; + delete_impl(bytes) +} + +/// Get the underlying `ID` of `obj` +public fun id(obj: &T): ID { + borrow_uid(obj).id +} + +/// Borrow the underlying `ID` of `obj` +public fun borrow_id(obj: &T): &ID { + &borrow_uid(obj).id +} + +/// Get the raw bytes for the underlying `ID` of `obj` +public fun id_bytes(obj: &T): vector { + bcs::to_bytes(&borrow_uid(obj).id) +} + +/// Get the inner bytes for the underlying `ID` of `obj` +public fun id_address(obj: &T): address { + borrow_uid(obj).id.bytes +} + +/// Get the `UID` for `obj`. +/// Safe because Sui has an extra bytecode verifier pass that forces every struct with +/// the `key` ability to have a distinguished `UID` field. +/// Cannot be made public as the access to `UID` for a given object must be privileged, and +/// restrictable in the object's module. +native fun borrow_uid(obj: &T): &UID; + +/// Generate a new UID specifically used for creating a UID from a hash +public(package) fun new_uid_from_hash(bytes: address): UID { + record_new_uid(bytes); + UID { id: ID { bytes } } +} + +// === internal functions === + +// helper for delete +native fun delete_impl(id: address); + +// marks newly created UIDs from hash +native fun record_new_uid(id: address); + +#[test_only] +/// Return the most recent created object ID. +public fun last_created(ctx: &TxContext): ID { + ID { bytes: ctx.last_created_object_id() } +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/dependencies/Sui/tx_context.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/dependencies/Sui/tx_context.move new file mode 100644 index 0000000000000..1fdef9ff83a81 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/dependencies/Sui/tx_context.move @@ -0,0 +1,141 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +module sui::tx_context; + +#[test_only] +/// Number of bytes in an tx hash (which will be the transaction digest) +const TX_HASH_LENGTH: u64 = 32; + +#[test_only] +/// Expected an tx hash of length 32, but found a different length +const EBadTxHashLength: u64 = 0; + +#[test_only] +/// Attempt to get the most recent created object ID when none has been created. +const ENoIDsCreated: u64 = 1; + +/// Information about the transaction currently being executed. +/// This cannot be constructed by a transaction--it is a privileged object created by +/// the VM and passed in to the entrypoint of the transaction as `&mut TxContext`. +public struct TxContext has drop { + /// The address of the user that signed the current transaction + sender: address, + /// Hash of the current transaction + tx_hash: vector, + /// The current epoch number + epoch: u64, + /// Timestamp that the epoch started at + epoch_timestamp_ms: u64, + /// Counter recording the number of fresh id's created while executing + /// this transaction. Always 0 at the start of a transaction + ids_created: u64, +} + +/// Return the address of the user that signed the current +/// transaction +public fun sender(self: &TxContext): address { + self.sender +} + +/// Return the transaction digest (hash of transaction inputs). +/// Please do not use as a source of randomness. +public fun digest(self: &TxContext): &vector { + &self.tx_hash +} + +/// Return the current epoch +public fun epoch(self: &TxContext): u64 { + self.epoch +} + +/// Return the epoch start time as a unix timestamp in milliseconds. +public fun epoch_timestamp_ms(self: &TxContext): u64 { + self.epoch_timestamp_ms +} + +/// Create an `address` that has not been used. As it is an object address, it will never +/// occur as the address for a user. +/// In other words, the generated address is a globally unique object ID. +public fun fresh_object_address(ctx: &mut TxContext): address { + let ids_created = ctx.ids_created; + let id = derive_id(*&ctx.tx_hash, ids_created); + ctx.ids_created = ids_created + 1; + id +} + +#[allow(unused_function)] +/// Return the number of id's created by the current transaction. +/// Hidden for now, but may expose later +fun ids_created(self: &TxContext): u64 { + self.ids_created +} + +/// Native function for deriving an ID via hash(tx_hash || ids_created) +native fun derive_id(tx_hash: vector, ids_created: u64): address; + +// ==== test-only functions ==== + +#[test_only] +/// Create a `TxContext` for testing +public fun new( + sender: address, + tx_hash: vector, + epoch: u64, + epoch_timestamp_ms: u64, + ids_created: u64, +): TxContext { + assert!(tx_hash.length() == TX_HASH_LENGTH, EBadTxHashLength); + TxContext { sender, tx_hash, epoch, epoch_timestamp_ms, ids_created } +} + +#[test_only] +/// Create a `TxContext` for testing, with a potentially non-zero epoch number. +public fun new_from_hint( + addr: address, + hint: u64, + epoch: u64, + epoch_timestamp_ms: u64, + ids_created: u64, +): TxContext { + new(addr, dummy_tx_hash_with_hint(hint), epoch, epoch_timestamp_ms, ids_created) +} + +#[test_only] +/// Create a dummy `TxContext` for testing +public fun dummy(): TxContext { + let tx_hash = x"3a985da74fe225b2045c172d6bd390bd855f086e3e9d525b46bfe24511431532"; + new(@0x0, tx_hash, 0, 0, 0) +} + +#[test_only] +/// Utility for creating 256 unique input hashes. +/// These hashes are guaranteed to be unique given a unique `hint: u64` +fun dummy_tx_hash_with_hint(hint: u64): vector { + let mut tx_hash = std::bcs::to_bytes(&hint); + while (tx_hash.length() < TX_HASH_LENGTH) tx_hash.push_back(0); + tx_hash +} + +#[test_only] +public fun get_ids_created(self: &TxContext): u64 { + ids_created(self) +} + +#[test_only] +/// Return the most recent created object ID. +public fun last_created_object_id(self: &TxContext): address { + let ids_created = self.ids_created; + assert!(ids_created > 0, ENoIDsCreated); + derive_id(*&self.tx_hash, ids_created - 1) +} + +#[test_only] +public fun increment_epoch_number(self: &mut TxContext) { + self.epoch = self.epoch + 1 +} + +#[test_only] +public fun increment_epoch_timestamp(self: &mut TxContext, delta_ms: u64) { + self.epoch_timestamp_ms = self.epoch_timestamp_ms + delta_ms +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/m.move new file mode 100644 index 0000000000000..86c1a237632e8 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/build/global_loc/sources/m.move @@ -0,0 +1,22 @@ +// Test handling of global locations in the trace. +module global_loc::m; + +public struct SomeObject has key { + id: UID, + num: u8, +} + +fun foo(o: SomeObject, p: u8): u64 { + let n = object::id(&o).to_bytes()[0]; + let SomeObject { id, num } = o; + object::delete(id); + (n as u64) + (num as u64) + (p as u64) +} + +#[test] +fun test() { + let mut ctx = tx_context::dummy(); + let mut _res = foo(SomeObject { id: object::new(&mut ctx), num: 42 }, 42); + // line below is to force another unoptimized read to keep `res` visible + _res = _res + foo(SomeObject { id: object::new(&mut ctx), num: 42 }, 42); +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/sources/m.move new file mode 100644 index 0000000000000..86c1a237632e8 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/sources/m.move @@ -0,0 +1,22 @@ +// Test handling of global locations in the trace. +module global_loc::m; + +public struct SomeObject has key { + id: UID, + num: u8, +} + +fun foo(o: SomeObject, p: u8): u64 { + let n = object::id(&o).to_bytes()[0]; + let SomeObject { id, num } = o; + object::delete(id); + (n as u64) + (num as u64) + (p as u64) +} + +#[test] +fun test() { + let mut ctx = tx_context::dummy(); + let mut _res = foo(SomeObject { id: object::new(&mut ctx), num: 42 }, 42); + // line below is to force another unoptimized read to keep `res` visible + _res = _res + foo(SomeObject { id: object::new(&mut ctx), num: 42 }, 42); +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/test.exp b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/test.exp new file mode 100644 index 0000000000000..fee44233ccc7a --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/test.exp @@ -0,0 +1,45 @@ +current frame stack: + function: test (line 21) + scope 0 : + ctx : (0x2::tx_context::TxContext) { + sender : 0000000000000000000000000000000000000000000000000000000000000000 + tx_hash : [ + 0 : 58 + 1 : 152 + 2 : 93 + 3 : 167 + 4 : 79 + 5 : 226 + 6 : 37 + 7 : 178 + 8 : 4 + 9 : 92 + 10 : 23 + 11 : 45 + 12 : 107 + 13 : 211 + 14 : 144 + 15 : 189 + 16 : 133 + 17 : 95 + 18 : 8 + 19 : 110 + 20 : 62 + 21 : 157 + 22 : 82 + 23 : 91 + 24 : 70 + 25 : 191 + 26 : 226 + 27 : 69 + 28 : 17 + 29 : 67 + 30 : 21 + 31 : 50 + ] + epoch : 0 + epoch_timestamp_ms : 0 + ids_created : 1 + } + type: 0x2::tx_context::TxContext + diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/trace.spec.js b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/trace.spec.js new file mode 100644 index 0000000000000..12de693d855d9 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/trace.spec.js @@ -0,0 +1,10 @@ +let action = (runtime) => { + let res = ''; + // step over context creation + runtime.step(true); + // step over function creating a global location + runtime.step(true); + res += runtime.toString(); + return res; +}; +run_spec(__dirname, action); diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/traces/global_loc__m__test.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/traces/global_loc__m__test.json new file mode 100644 index 0000000000000..844dd32b381ac --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/global_loc/traces/global_loc__m__test.json @@ -0,0 +1 @@ +{"version":1,"events":[{"OpenFrame":{"frame":{"frame_id":0,"function_name":"test","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":1,"type_instantiation":[],"parameters":[],"return_types":[],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"tx_context","name":"TxContext","type_args":[]}},"ref_type":null}],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":1000000000,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":2,"function_name":"dummy","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"tx_context"},"binary_member_index":9,"type_instantiation":[],"parameters":[],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"tx_context","name":"TxContext","type_args":[]}},"ref_type":null}],"locals_types":[],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999965,"instruction":"LD_CONST"}},{"Effect":{"Push":{"RuntimeValue":{"value":"0000000000000000000000000000000000000000000000000000000000000000"}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999930,"instruction":"LD_CONST"}},{"Effect":{"Push":{"RuntimeValue":{"value":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999927,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999924,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999921,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999921,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":14,"function_name":"new","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"tx_context"},"binary_member_index":7,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":"0000000000000000000000000000000000000000000000000000000000000000"}},{"RuntimeValue":{"value":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}},{"RuntimeValue":{"value":0}},{"RuntimeValue":{"value":0}},{"RuntimeValue":{"value":0}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"tx_context","name":"TxContext","type_args":[]}},"ref_type":null}],"locals_types":[{"type_":"address","ref_type":null},{"type_":{"vector":"u8"},"ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999921}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999910,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[14,1]},"root_value_read":{"RuntimeValue":{"value":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[14,1]},"snapshot":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999907,"instruction":"VEC_LEN"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[14,1]},"snapshot":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}}}},{"Effect":{"Push":{"RuntimeValue":{"value":32}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999897,"instruction":"LD_CONST"}},{"Effect":{"Push":{"RuntimeValue":{"value":32}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999862,"instruction":"EQ"}},{"Effect":{"Pop":{"RuntimeValue":{"value":32}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":32}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999861,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999860,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999826,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[14,0]},"root_value_read":{"RuntimeValue":{"value":"0000000000000000000000000000000000000000000000000000000000000000"}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":"0000000000000000000000000000000000000000000000000000000000000000"}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999792,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[14,1]},"root_value_read":{"RuntimeValue":{"value":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999774,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[14,2]},"root_value_read":{"RuntimeValue":{"value":0}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999756,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[14,3]},"root_value_read":{"RuntimeValue":{"value":0}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999738,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[14,4]},"root_value_read":{"RuntimeValue":{"value":0}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999734,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":"0000000000000000000000000000000000000000000000000000000000000000"}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999999733,"instruction":"RET"}},{"CloseFrame":{"frame_id":14,"return_":[{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}],"gas_left":999999733}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999732,"instruction":"RET"}},{"CloseFrame":{"frame_id":2,"return_":[{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}],"gas_left":999999732}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999731,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Effect":{"Write":{"location":{"Local":[0,0]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999721,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999721,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":63,"function_name":"new","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":14,"type_instantiation":[],"parameters":[{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"UID","type_args":[]}},"ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"tx_context","name":"TxContext","type_args":[]}},"ref_type":"Mut"}],"is_native":false},"gas_left":999999721}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999710,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[63,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}},"moved":true}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999710,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":68,"function_name":"fresh_object_address","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"tx_context"},"binary_member_index":4,"type_instantiation":[],"parameters":[{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}],"return_types":[{"type_":"address","ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"tx_context","name":"TxContext","type_args":[]}},"ref_type":"Mut"},{"type_":"address","ref_type":null},{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999710}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999699,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[68,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999689,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[0,0]},4]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999671,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Local":[0,0]},4]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Local":[0,0]},4]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999670,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Write":{"location":{"Local":[68,2]},"root_value_after_write":{"RuntimeValue":{"value":0}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999660,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[68,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999650,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[0,0]},1]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999616,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Local":[0,0]},1]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Local":[0,0]},1]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999598,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[68,2]},"root_value_read":{"RuntimeValue":{"value":0}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999598,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":96,"function_name":"derive_id","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"tx_context"},"binary_member_index":6,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}},{"RuntimeValue":{"value":0}}],"return_types":[{"type_":"address","ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":null},{"type_":"u64","ref_type":null}],"is_native":true},"gas_left":999999598}},{"Effect":{"Push":{"RuntimeValue":{"value":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},{"CloseFrame":{"frame_id":96,"return_":[{"RuntimeValue":{"value":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}],"gas_left":999999511}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999510,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},{"Effect":{"Write":{"location":{"Local":[68,1]},"root_value_after_write":{"RuntimeValue":{"value":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999492,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[68,2]},"root_value_read":{"RuntimeValue":{"value":0}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999489,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999486,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999476,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[68,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}},"moved":true}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999999466,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[0,0]},4]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999448,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[0,0]},4]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":0}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Local":[0,0]},4]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999414,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[68,1]},"root_value_read":{"RuntimeValue":{"value":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999413,"instruction":"RET"}},{"CloseFrame":{"frame_id":68,"return_":[{"RuntimeValue":{"value":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}],"gas_left":999999413}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999409,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999405,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999404,"instruction":"RET"}},{"CloseFrame":{"frame_id":63,"return_":[{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}],"gas_left":999999404}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999401,"instruction":"LD_U8"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999397,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"num":42}}}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999394,"instruction":"LD_U8"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999394,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":143,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"num":42}}}},{"RuntimeValue":{"value":42}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeObject","type_args":[]}},"ref_type":null},{"type_":"u8","ref_type":null},{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"ID","type_args":[]}},"ref_type":null},{"type_":{"vector":"u8"},"ref_type":null},{"type_":"u8","ref_type":null},{"type_":"u8","ref_type":null}],"is_native":false},"gas_left":999999394}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999383,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[143,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"num":42}}}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[143,0]},"snapshot":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"num":42}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999383,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":148,"function_name":"id","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":16,"type_instantiation":[{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeObject","type_args":[]}}],"parameters":[{"ImmRef":{"location":{"Local":[143,0]},"snapshot":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"num":42}}}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"ID","type_args":[]}},"ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeObject","type_args":[]}},"ref_type":"Imm"}],"is_native":false},"gas_left":999999383}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999372,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[148,0]},"root_value_read":{"ImmRef":{"location":{"Local":[143,0]},"snapshot":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"num":42}}}},"moved":true}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[143,0]},"snapshot":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"num":42}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999372,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":153,"function_name":"borrow_uid","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":20,"type_instantiation":[{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeObject","type_args":[]}}],"parameters":[{"ImmRef":{"location":{"Local":[143,0]},"snapshot":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"num":42}}}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"UID","type_args":[]}},"ref_type":"Imm"}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeObject","type_args":[]}},"ref_type":"Imm"}],"is_native":true},"gas_left":999999372}},{"Effect":{"DataLoad":{"ref_type":"Imm","location":{"Global":154},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Global":154},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}}},{"CloseFrame":{"frame_id":153,"return_":[{"ImmRef":{"location":{"Global":154},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}],"gas_left":999999309}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999299,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Global":154},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Global":154},0]},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999263,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Global":154},0]},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Global":154},0]},"root_value_read":{"ImmRef":{"location":{"Global":154},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999262,"instruction":"RET"}},{"CloseFrame":{"frame_id":148,"return_":[{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}],"gas_left":999999262}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999261,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},{"Effect":{"Write":{"location":{"Local":[143,2]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999251,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[143,2]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[143,2]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999251,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":173,"function_name":"id_to_bytes","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"ImmRef":{"location":{"Local":[143,2]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}],"return_types":[{"type_":{"vector":"u8"},"ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"ID","type_args":[]}},"ref_type":"Imm"}],"is_native":false},"gas_left":999999251}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999240,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[173,0]},"root_value_read":{"ImmRef":{"location":{"Local":[143,2]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"moved":true}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[143,2]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999230,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[143,2]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[143,2]},0]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999230,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":181,"function_name":"to_bytes","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"bcs"},"binary_member_index":0,"type_instantiation":["address"],"parameters":[{"ImmRef":{"location":{"Indexed":[{"Local":[143,2]},0]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}],"return_types":[{"type_":{"vector":"u8"},"ref_type":null}],"locals_types":[{"type_":"address","ref_type":"Imm"}],"is_native":true},"gas_left":999999230}},{"Effect":{"Push":{"RuntimeValue":{"value":[56,29,217,7,140,50,42,70,99,195,146,118,26,2,17,181,39,193,39,178,149,131,133,18,23,249,72,214,33,49,244,9]}}}},{"CloseFrame":{"frame_id":181,"return_":[{"RuntimeValue":{"value":[56,29,217,7,140,50,42,70,99,195,146,118,26,2,17,181,39,193,39,178,149,131,133,18,23,249,72,214,33,49,244,9]}}],"gas_left":999999131}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999130,"instruction":"RET"}},{"CloseFrame":{"frame_id":173,"return_":[{"RuntimeValue":{"value":[56,29,217,7,140,50,42,70,99,195,146,118,26,2,17,181,39,193,39,178,149,131,133,18,23,249,72,214,33,49,244,9]}}],"gas_left":999999130}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999129,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[56,29,217,7,140,50,42,70,99,195,146,118,26,2,17,181,39,193,39,178,149,131,133,18,23,249,72,214,33,49,244,9]}}}},{"Effect":{"Write":{"location":{"Local":[143,3]},"root_value_after_write":{"RuntimeValue":{"value":[56,29,217,7,140,50,42,70,99,195,146,118,26,2,17,181,39,193,39,178,149,131,133,18,23,249,72,214,33,49,244,9]}}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999119,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[143,3]},"root_value_read":{"RuntimeValue":{"value":[56,29,217,7,140,50,42,70,99,195,146,118,26,2,17,181,39,193,39,178,149,131,133,18,23,249,72,214,33,49,244,9]}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[143,3]},"snapshot":[56,29,217,7,140,50,42,70,99,195,146,118,26,2,17,181,39,193,39,178,149,131,133,18,23,249,72,214,33,49,244,9]}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999116,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999106,"instruction":"VEC_IMM_BORROW"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[143,3]},"snapshot":[56,29,217,7,140,50,42,70,99,195,146,118,26,2,17,181,39,193,39,178,149,131,133,18,23,249,72,214,33,49,244,9]}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[143,3]},0]},"snapshot":[56,29,217,7,140,50,42,70,99,195,146,118,26,2,17,181,39,193,39,178,149,131,133,18,23,249,72,214,33,49,244,9]}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999088,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Local":[143,3]},0]},"snapshot":[56,29,217,7,140,50,42,70,99,195,146,118,26,2,17,181,39,193,39,178,149,131,133,18,23,249,72,214,33,49,244,9]}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Local":[143,3]},0]},"root_value_read":{"RuntimeValue":{"value":[56,29,217,7,140,50,42,70,99,195,146,118,26,2,17,181,39,193,39,178,149,131,133,18,23,249,72,214,33,49,244,9]}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":56}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999087,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":56}}}},{"Effect":{"Write":{"location":{"Local":[143,4]},"root_value_after_write":{"RuntimeValue":{"value":56}}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999031,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[143,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"num":42}}}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"num":42}}}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999028,"instruction":"UNPACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},"num":42}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999027,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Write":{"location":{"Local":[143,5]},"root_value_after_write":{"RuntimeValue":{"value":42}}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999999027,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":216,"function_name":"delete","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":15,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}],"return_types":[],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"UID","type_args":[]}},"ref_type":null}],"is_native":false},"gas_left":999999027}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999998988,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[216,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999998986,"instruction":"UNPACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999998984,"instruction":"UNPACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999998984,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":227,"function_name":"delete_impl","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":22,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":"381dd9078c322a4663c392761a0211b527c127b29583851217f948d62131f409"}}],"return_types":[],"locals_types":[{"type_":"address","ref_type":null}],"is_native":true},"gas_left":999998984}},{"CloseFrame":{"frame_id":227,"return_":[],"gas_left":999998930}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999998929,"instruction":"RET"}},{"CloseFrame":{"frame_id":216,"return_":[],"gas_left":999998929}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999998911,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[143,4]},"root_value_read":{"RuntimeValue":{"value":56}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":56}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999998908,"instruction":"CAST_U64"}},{"Effect":{"Pop":{"RuntimeValue":{"value":56}}}},{"Effect":{"Push":{"RuntimeValue":{"value":56}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999998890,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[143,5]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999998887,"instruction":"CAST_U64"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999998884,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":56}}}},{"Effect":{"Push":{"RuntimeValue":{"value":98}}}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999998866,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[143,1]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999998863,"instruction":"CAST_U64"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999998860,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":98}}}},{"Effect":{"Push":{"RuntimeValue":{"value":140}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999998859,"instruction":"RET"}},{"CloseFrame":{"frame_id":143,"return_":[{"RuntimeValue":{"value":140}}],"gas_left":999998859}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999998849,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999998849,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":263,"function_name":"new","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":14,"type_instantiation":[],"parameters":[{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"UID","type_args":[]}},"ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"tx_context","name":"TxContext","type_args":[]}},"ref_type":"Mut"}],"is_native":false},"gas_left":999998849}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999998838,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[263,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}},"moved":true}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999998838,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":268,"function_name":"fresh_object_address","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"tx_context"},"binary_member_index":4,"type_instantiation":[],"parameters":[{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}],"return_types":[{"type_":"address","ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"tx_context","name":"TxContext","type_args":[]}},"ref_type":"Mut"},{"type_":"address","ref_type":null},{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999998838}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999998827,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[268,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999998817,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[0,0]},4]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999998799,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Local":[0,0]},4]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Local":[0,0]},4]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999998798,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Write":{"location":{"Local":[268,2]},"root_value_after_write":{"RuntimeValue":{"value":1}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999998788,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[268,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999998778,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[0,0]},1]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999998744,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Local":[0,0]},1]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Local":[0,0]},1]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999998726,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[268,2]},"root_value_read":{"RuntimeValue":{"value":1}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999998726,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":296,"function_name":"derive_id","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"tx_context"},"binary_member_index":6,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50]}},{"RuntimeValue":{"value":1}}],"return_types":[{"type_":"address","ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":null},{"type_":"u64","ref_type":null}],"is_native":true},"gas_left":999998726}},{"Effect":{"Push":{"RuntimeValue":{"value":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},{"CloseFrame":{"frame_id":296,"return_":[{"RuntimeValue":{"value":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}],"gas_left":999998639}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999998638,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},{"Effect":{"Write":{"location":{"Local":[268,1]},"root_value_after_write":{"RuntimeValue":{"value":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999998620,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[268,2]},"root_value_read":{"RuntimeValue":{"value":1}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999998617,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999998614,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Push":{"RuntimeValue":{"value":2}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999998604,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[268,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}},"moved":true}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999998594,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[0,0]},4]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999998576,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[0,0]},4]},"snapshot":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":1}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":2}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Local":[0,0]},4]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x2::tx_context::TxContext","fields":{"sender":"0000000000000000000000000000000000000000000000000000000000000000","tx_hash":[58,152,93,167,79,226,37,178,4,92,23,45,107,211,144,189,133,95,8,110,62,157,82,91,70,191,226,69,17,67,21,50],"epoch":0,"epoch_timestamp_ms":0,"ids_created":2}}}}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999998542,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[268,1]},"root_value_read":{"RuntimeValue":{"value":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999998541,"instruction":"RET"}},{"CloseFrame":{"frame_id":268,"return_":[{"RuntimeValue":{"value":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}],"gas_left":999998541}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999998537,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999998533,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999998532,"instruction":"RET"}},{"CloseFrame":{"frame_id":263,"return_":[{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}],"gas_left":999998532}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999998529,"instruction":"LD_U8"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999998525,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"num":42}}}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999998522,"instruction":"LD_U8"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999998522,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":343,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"num":42}}}},{"RuntimeValue":{"value":42}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeObject","type_args":[]}},"ref_type":null},{"type_":"u8","ref_type":null},{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"ID","type_args":[]}},"ref_type":null},{"type_":{"vector":"u8"},"ref_type":null},{"type_":"u8","ref_type":null},{"type_":"u8","ref_type":null}],"is_native":false},"gas_left":999998522}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999998511,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[343,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"num":42}}}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[343,0]},"snapshot":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"num":42}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999998511,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":348,"function_name":"id","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":16,"type_instantiation":[{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeObject","type_args":[]}}],"parameters":[{"ImmRef":{"location":{"Local":[343,0]},"snapshot":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"num":42}}}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"ID","type_args":[]}},"ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeObject","type_args":[]}},"ref_type":"Imm"}],"is_native":false},"gas_left":999998511}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999998500,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[348,0]},"root_value_read":{"ImmRef":{"location":{"Local":[343,0]},"snapshot":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"num":42}}}},"moved":true}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[343,0]},"snapshot":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"num":42}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999998500,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":353,"function_name":"borrow_uid","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":20,"type_instantiation":[{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeObject","type_args":[]}}],"parameters":[{"ImmRef":{"location":{"Local":[343,0]},"snapshot":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"num":42}}}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"UID","type_args":[]}},"ref_type":"Imm"}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeObject","type_args":[]}},"ref_type":"Imm"}],"is_native":true},"gas_left":999998500}},{"Effect":{"DataLoad":{"ref_type":"Imm","location":{"Global":354},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Global":354},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}}},{"CloseFrame":{"frame_id":353,"return_":[{"ImmRef":{"location":{"Global":354},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}],"gas_left":999998437}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999998427,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Global":354},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Global":354},0]},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999998391,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Global":354},0]},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Global":354},0]},"root_value_read":{"ImmRef":{"location":{"Global":354},"snapshot":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999998390,"instruction":"RET"}},{"CloseFrame":{"frame_id":348,"return_":[{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}],"gas_left":999998390}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999998389,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},{"Effect":{"Write":{"location":{"Local":[343,2]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999998379,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[343,2]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[343,2]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999998379,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":373,"function_name":"id_to_bytes","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"ImmRef":{"location":{"Local":[343,2]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}],"return_types":[{"type_":{"vector":"u8"},"ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"ID","type_args":[]}},"ref_type":"Imm"}],"is_native":false},"gas_left":999998379}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999998368,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[373,0]},"root_value_read":{"ImmRef":{"location":{"Local":[343,2]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"moved":true}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[343,2]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999998358,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[343,2]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[343,2]},0]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999998358,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":381,"function_name":"to_bytes","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"bcs"},"binary_member_index":0,"type_instantiation":["address"],"parameters":[{"ImmRef":{"location":{"Indexed":[{"Local":[343,2]},0]},"snapshot":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}],"return_types":[{"type_":{"vector":"u8"},"ref_type":null}],"locals_types":[{"type_":"address","ref_type":"Imm"}],"is_native":true},"gas_left":999998358}},{"Effect":{"Push":{"RuntimeValue":{"value":[238,254,212,242,183,246,173,95,101,214,234,46,239,80,180,241,209,233,140,57,202,142,236,188,151,54,218,128,27,131,135,230]}}}},{"CloseFrame":{"frame_id":381,"return_":[{"RuntimeValue":{"value":[238,254,212,242,183,246,173,95,101,214,234,46,239,80,180,241,209,233,140,57,202,142,236,188,151,54,218,128,27,131,135,230]}}],"gas_left":999998259}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999998258,"instruction":"RET"}},{"CloseFrame":{"frame_id":373,"return_":[{"RuntimeValue":{"value":[238,254,212,242,183,246,173,95,101,214,234,46,239,80,180,241,209,233,140,57,202,142,236,188,151,54,218,128,27,131,135,230]}}],"gas_left":999998258}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999998257,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[238,254,212,242,183,246,173,95,101,214,234,46,239,80,180,241,209,233,140,57,202,142,236,188,151,54,218,128,27,131,135,230]}}}},{"Effect":{"Write":{"location":{"Local":[343,3]},"root_value_after_write":{"RuntimeValue":{"value":[238,254,212,242,183,246,173,95,101,214,234,46,239,80,180,241,209,233,140,57,202,142,236,188,151,54,218,128,27,131,135,230]}}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999998247,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[343,3]},"root_value_read":{"RuntimeValue":{"value":[238,254,212,242,183,246,173,95,101,214,234,46,239,80,180,241,209,233,140,57,202,142,236,188,151,54,218,128,27,131,135,230]}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[343,3]},"snapshot":[238,254,212,242,183,246,173,95,101,214,234,46,239,80,180,241,209,233,140,57,202,142,236,188,151,54,218,128,27,131,135,230]}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999998244,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999998234,"instruction":"VEC_IMM_BORROW"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[343,3]},"snapshot":[238,254,212,242,183,246,173,95,101,214,234,46,239,80,180,241,209,233,140,57,202,142,236,188,151,54,218,128,27,131,135,230]}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[343,3]},0]},"snapshot":[238,254,212,242,183,246,173,95,101,214,234,46,239,80,180,241,209,233,140,57,202,142,236,188,151,54,218,128,27,131,135,230]}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999998216,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Local":[343,3]},0]},"snapshot":[238,254,212,242,183,246,173,95,101,214,234,46,239,80,180,241,209,233,140,57,202,142,236,188,151,54,218,128,27,131,135,230]}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Local":[343,3]},0]},"root_value_read":{"RuntimeValue":{"value":[238,254,212,242,183,246,173,95,101,214,234,46,239,80,180,241,209,233,140,57,202,142,236,188,151,54,218,128,27,131,135,230]}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":238}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999998215,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":238}}}},{"Effect":{"Write":{"location":{"Local":[343,4]},"root_value_after_write":{"RuntimeValue":{"value":238}}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999998159,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[343,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"num":42}}}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"num":42}}}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999998156,"instruction":"UNPACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SomeObject","fields":{"id":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},"num":42}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999998155,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Write":{"location":{"Local":[343,5]},"root_value_after_write":{"RuntimeValue":{"value":42}}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999998155,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":416,"function_name":"delete","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":15,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}],"return_types":[],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000002","module":"object","name":"UID","type_args":[]}},"ref_type":null}],"is_native":false},"gas_left":999998155}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999998116,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[416,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999998114,"instruction":"UNPACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x2::object::UID","fields":{"id":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999998112,"instruction":"UNPACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x2::object::ID","fields":{"bytes":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999998112,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":427,"function_name":"delete_impl","module":{"address":"0000000000000000000000000000000000000000000000000000000000000002","name":"object"},"binary_member_index":22,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":"eefed4f2b7f6ad5f65d6ea2eef50b4f1d1e98c39ca8eecbc9736da801b8387e6"}}],"return_types":[],"locals_types":[{"type_":"address","ref_type":null}],"is_native":true},"gas_left":999998112}},{"CloseFrame":{"frame_id":427,"return_":[],"gas_left":999998058}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999998057,"instruction":"RET"}},{"CloseFrame":{"frame_id":416,"return_":[],"gas_left":999998057}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999998039,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[343,4]},"root_value_read":{"RuntimeValue":{"value":238}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":238}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999998036,"instruction":"CAST_U64"}},{"Effect":{"Pop":{"RuntimeValue":{"value":238}}}},{"Effect":{"Push":{"RuntimeValue":{"value":238}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999998018,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[343,5]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999998015,"instruction":"CAST_U64"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999998012,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":238}}}},{"Effect":{"Push":{"RuntimeValue":{"value":280}}}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999997994,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[343,1]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999997991,"instruction":"CAST_U64"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999997988,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":280}}}},{"Effect":{"Push":{"RuntimeValue":{"value":322}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999997987,"instruction":"RET"}},{"CloseFrame":{"frame_id":343,"return_":[{"RuntimeValue":{"value":322}}],"gas_left":999997987}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999997984,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":322}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":140}}}},{"Effect":{"Push":{"RuntimeValue":{"value":462}}}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999997983,"instruction":"POP"}},{"Effect":{"Pop":{"RuntimeValue":{"value":462}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999997982,"instruction":"RET"}},{"CloseFrame":{"frame_id":0,"return_":[],"gas_left":999997982}}]} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/Move.toml b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/Move.toml new file mode 100644 index 0000000000000..bd6df8584e0bf --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "native_fun" +edition = "2024.beta" + +[dependencies] +MoveStdlib = { local = "../../../../move-stdlib" } + +[addresses] +native_fun = "0x0" +std = "0x1" diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/bytecode_modules/m.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/bytecode_modules/m.mv new file mode 100644 index 0000000000000..ed85ed416f69d Binary files /dev/null and b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/bytecode_modules/m.mv differ diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/source_maps/dependencies/MoveStdlib/ascii.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/source_maps/dependencies/MoveStdlib/ascii.json new file mode 100644 index 0000000000000..c096138bf4a7b --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/source_maps/dependencies/MoveStdlib/ascii.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":263,"end":268},"module_name":["0000000000000000000000000000000000000000000000000000000000000001","ascii"],"struct_map":{"0":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1016,"end":1022},"type_parameters":[],"fields":[{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1055,"end":1060}]},"1":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1127,"end":1131},"type_parameters":[],"fields":[{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1164,"end":1168}]}},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1283,"end":1287},"type_parameters":[],"parameters":[["byte#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1288,"end":1292}]],"returns":[{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1299,"end":1303}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1336,"end":1340},"1":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1322,"end":1341},"2":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1314,"end":1366},"4":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1343,"end":1365},"5":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1314,"end":1366},"6":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1383,"end":1387},"7":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1376,"end":1389}},"is_native":false},"1":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1529,"end":1535},"type_parameters":[],"parameters":[["bytes#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1536,"end":1541}]],"returns":[{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1556,"end":1562}],"locals":[["x#1#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1577,"end":1578}]],"nops":{},"code_map":{"0":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1592,"end":1597},"1":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1581,"end":1598},"2":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1577,"end":1578},"3":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1616,"end":1617},"4":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1616,"end":1627},"5":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1608,"end":1652},"7":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1629,"end":1651},"8":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1608,"end":1652},"9":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1662,"end":1663},"10":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1662,"end":1678}},"is_native":false},"2":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1886,"end":1896},"type_parameters":[],"parameters":[["bytes#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1897,"end":1902}]],"returns":[{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1917,"end":1931}],"locals":[["$stop#0#6",{"file_hash":[21,247,201,8,68,64,165,87,119,232,179,234,51,204,66,204,125,149,214,14,86,232,139,211,1,65,223,246,65,227,152,78],"start":2398,"end":2403}],["%#2",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":10748,"end":10840}],["%#4",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2006,"end":2078}],["i#1#12",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7202,"end":7203}],["i#1#9",{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1709,"end":1710}],["stop#1#9",{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1733,"end":1737}],["v#1#3",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7170,"end":7171}]],"nops":{},"code_map":{"0":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1957,"end":1962},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7170,"end":7171},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7186,"end":7187},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7186,"end":7196},"4":{"file_hash":[21,247,201,8,68,64,165,87,119,232,179,234,51,204,66,204,125,149,214,14,86,232,139,211,1,65,223,246,65,227,152,78],"start":2398,"end":2403},"5":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":2473,"end":2474},"6":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1705,"end":1710},"7":{"file_hash":[21,247,201,8,68,64,165,87,119,232,179,234,51,204,66,204,125,149,214,14,86,232,139,211,1,65,223,246,65,227,152,78],"start":2448,"end":2453},"8":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1733,"end":1737},"9":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1762,"end":1763},"10":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1766,"end":1770},"11":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1764,"end":1765},"12":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1755,"end":1825},"13":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1789,"end":1790},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7202,"end":7203},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7209,"end":7210},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7211,"end":7212},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7208,"end":7213},"18":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1989,"end":1994},"19":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":1975,"end":1995},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":10786,"end":10787},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":10782,"end":10811},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":10806,"end":10811},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":10748,"end":10840},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":10794,"end":10811},"27":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1809,"end":1810},"28":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1813,"end":1814},"29":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1811,"end":1812},"30":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1805,"end":1806},"31":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1755,"end":1825},"32":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":2463,"end":2486},"34":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":10826,"end":10830},"35":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":10748,"end":10840},"37":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2006,"end":2078},"38":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2042,"end":2047},"39":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2033,"end":2049},"40":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2020,"end":2050},"41":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2006,"end":2078},"43":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2064,"end":2078},"44":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2006,"end":2078}},"is_native":false},"3":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2255,"end":2279},"type_parameters":[],"parameters":[["string#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2280,"end":2286}]],"returns":[{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2298,"end":2302}],"locals":[["$stop#0#6",{"file_hash":[21,247,201,8,68,64,165,87,119,232,179,234,51,204,66,204,125,149,214,14,86,232,139,211,1,65,223,246,65,227,152,78],"start":2398,"end":2403}],["%#2",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":10748,"end":10840}],["i#1#12",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7202,"end":7203}],["i#1#9",{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1709,"end":1710}],["stop#1#9",{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1733,"end":1737}],["v#1#3",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7170,"end":7171}]],"nops":{},"code_map":{"0":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2313,"end":2319},"1":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2313,"end":2325},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7170,"end":7171},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7186,"end":7187},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7186,"end":7196},"5":{"file_hash":[21,247,201,8,68,64,165,87,119,232,179,234,51,204,66,204,125,149,214,14,86,232,139,211,1,65,223,246,65,227,152,78],"start":2398,"end":2403},"6":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":2473,"end":2474},"7":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1705,"end":1710},"8":{"file_hash":[21,247,201,8,68,64,165,87,119,232,179,234,51,204,66,204,125,149,214,14,86,232,139,211,1,65,223,246,65,227,152,78],"start":2448,"end":2453},"9":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1733,"end":1737},"10":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1762,"end":1763},"11":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1766,"end":1770},"12":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1764,"end":1765},"13":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1755,"end":1825},"14":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1789,"end":1790},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7202,"end":7203},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7209,"end":7210},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7211,"end":7212},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7208,"end":7213},"19":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2356,"end":2361},"20":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2338,"end":2362},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":10786,"end":10787},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":10782,"end":10811},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":10806,"end":10811},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":10748,"end":10840},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":10794,"end":10811},"28":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1809,"end":1810},"29":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1813,"end":1814},"30":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1811,"end":1812},"31":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1805,"end":1806},"32":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1755,"end":1825},"33":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":2463,"end":2486},"35":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":10826,"end":10830},"36":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":10748,"end":10840},"38":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2313,"end":2363}},"is_native":false},"4":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2436,"end":2445},"type_parameters":[],"parameters":[["string#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2446,"end":2452}],["char#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2467,"end":2471}]],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2489,"end":2495},"1":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2489,"end":2501},"2":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2512,"end":2521},"5":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2489,"end":2522},"6":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2522,"end":2523}},"is_native":false},"5":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2597,"end":2605},"type_parameters":[],"parameters":[["string#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2606,"end":2612}]],"returns":[{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2628,"end":2632}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2656,"end":2662},"1":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2656,"end":2668},"2":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2656,"end":2679},"3":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2643,"end":2681}},"is_native":false},"6":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2757,"end":2763},"type_parameters":[],"parameters":[["string#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2764,"end":2770}]],"returns":[{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2782,"end":2785}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2796,"end":2802},"1":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2796,"end":2813},"2":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2796,"end":2822}},"is_native":false},"7":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2903,"end":2909},"type_parameters":[],"parameters":[["string#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2910,"end":2916}],["other#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2931,"end":2936}]],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2956,"end":2962},"1":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2956,"end":2968},"2":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2976,"end":2981},"3":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2976,"end":2994},"4":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":2956,"end":2995}},"is_native":false},"8":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3083,"end":3089},"type_parameters":[],"parameters":[["s#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3090,"end":3091}],["at#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3106,"end":3108}],["o#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3115,"end":3116}]],"returns":[],"locals":[["e#1#2",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3211,"end":3212}],["v#1#1",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6581,"end":6582}]],"nops":{},"code_map":{"0":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3144,"end":3146},"1":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3150,"end":3151},"3":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3150,"end":3160},"4":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3147,"end":3149},"5":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3136,"end":3176},"9":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3162,"end":3175},"10":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3136,"end":3176},"11":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3186,"end":3187},"12":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3186,"end":3200},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6577,"end":6582},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6605,"end":6606},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6605,"end":6617},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6604,"end":6605},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6597,"end":6635},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6622,"end":6623},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6622,"end":6634},"20":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3211,"end":3212},"21":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3214,"end":3215},"22":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3214,"end":3221},"23":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3229,"end":3230},"24":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3232,"end":3234},"25":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3214,"end":3235},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6645,"end":6662},"29":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6645,"end":6646},"30":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6645,"end":6662},"31":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3236,"end":3237}},"is_native":false},"9":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3336,"end":3345},"type_parameters":[],"parameters":[["string#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3346,"end":3352}],["i#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3363,"end":3364}],["j#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3371,"end":3372}]],"returns":[{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3380,"end":3386}],"locals":[["%#1",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3405,"end":3435}],["bytes#1#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3469,"end":3474}],["i#1#3",{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1709,"end":1710}],["i#1#6",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3511,"end":3512}],["stop#1#3",{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1733,"end":1737}]],"nops":{},"code_map":{"0":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3405,"end":3406},"1":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3410,"end":3411},"2":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3407,"end":3409},"3":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3405,"end":3435},"4":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3415,"end":3416},"5":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3420,"end":3426},"6":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3420,"end":3435},"7":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3417,"end":3419},"8":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3405,"end":3435},"13":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3397,"end":3451},"17":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3437,"end":3450},"18":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3397,"end":3451},"19":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3477,"end":3485},"20":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3465,"end":3474},"21":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3495,"end":3496},"22":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1705,"end":1710},"23":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3507,"end":3508},"24":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1733,"end":1737},"25":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1762,"end":1763},"26":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1766,"end":1770},"27":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1764,"end":1765},"28":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1755,"end":1825},"29":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1789,"end":1790},"30":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3511,"end":3512},"31":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3514,"end":3519},"32":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3530,"end":3536},"33":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3530,"end":3545},"34":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3543,"end":3544},"35":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3530,"end":3545},"37":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3514,"end":3546},"38":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1809,"end":1810},"39":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1813,"end":1814},"40":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1811,"end":1812},"41":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1805,"end":1806},"42":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1755,"end":1825},"43":{"file_hash":[21,247,201,8,68,64,165,87,119,232,179,234,51,204,66,204,125,149,214,14,86,232,139,211,1,65,223,246,65,227,152,78],"start":2039,"end":2080},"45":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3566,"end":3571},"46":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3557,"end":3573}},"is_native":false},"10":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3655,"end":3663},"type_parameters":[],"parameters":[["string#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3664,"end":3670}]],"returns":[{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3682,"end":3693}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3705,"end":3711},"1":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3704,"end":3717}},"is_native":false},"11":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3793,"end":3803},"type_parameters":[],"parameters":[["string#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3804,"end":3810}]],"returns":[{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3821,"end":3831}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3865,"end":3871},"1":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3846,"end":3862},"2":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3881,"end":3886}},"is_native":false},"12":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3962,"end":3966},"type_parameters":[],"parameters":[["char#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3967,"end":3971}]],"returns":[{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3980,"end":3982}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4013,"end":4017},"1":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":3997,"end":4010},"2":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4027,"end":4031}},"is_native":false},"13":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4147,"end":4160},"type_parameters":[],"parameters":[["b#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4161,"end":4162}]],"returns":[{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4169,"end":4173}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4184,"end":4185},"1":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4189,"end":4193},"2":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4186,"end":4188},"3":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4184,"end":4193}},"is_native":false},"14":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4317,"end":4334},"type_parameters":[],"parameters":[["byte#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4335,"end":4339}]],"returns":[{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4346,"end":4350}],"locals":[["%#1",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4361,"end":4424}]],"nops":{},"code_map":{"0":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4361,"end":4365},"1":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4369,"end":4373},"2":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4366,"end":4368},"3":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4361,"end":4424},"4":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4412,"end":4416},"5":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4420,"end":4424},"6":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4417,"end":4419},"7":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4361,"end":4424}},"is_native":false},"15":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4525,"end":4533},"type_parameters":[],"parameters":[["string#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4534,"end":4540}]],"returns":[{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4552,"end":4556}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4567,"end":4573},"1":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4567,"end":4579},"2":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4567,"end":4590}},"is_native":false},"16":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4669,"end":4681},"type_parameters":[],"parameters":[["string#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4682,"end":4688}]],"returns":[{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4700,"end":4706}],"locals":[["$stop#0#6",{"file_hash":[21,247,201,8,68,64,165,87,119,232,179,234,51,204,66,204,125,149,214,14,86,232,139,211,1,65,223,246,65,227,152,78],"start":2398,"end":2403}],["%#2",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4763,"end":4787}],["%#3",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8170,"end":8171}],["e#1#13",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8167,"end":8168}],["i#1#12",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7202,"end":7203}],["i#1#9",{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1709,"end":1710}],["r#1#1",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8134,"end":8135}],["stop#1#9",{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1733,"end":1737}],["v#1#1",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8110,"end":8111}],["v#1#3",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7170,"end":7171}]],"nops":{},"code_map":{"0":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4729,"end":4735},"1":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4729,"end":4746},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8110,"end":8111},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8138,"end":8146},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8130,"end":8135},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8156,"end":8157},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7170,"end":7171},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7186,"end":7187},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7186,"end":7196},"9":{"file_hash":[21,247,201,8,68,64,165,87,119,232,179,234,51,204,66,204,125,149,214,14,86,232,139,211,1,65,223,246,65,227,152,78],"start":2398,"end":2403},"10":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":2473,"end":2474},"11":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1705,"end":1710},"12":{"file_hash":[21,247,201,8,68,64,165,87,119,232,179,234,51,204,66,204,125,149,214,14,86,232,139,211,1,65,223,246,65,227,152,78],"start":2448,"end":2453},"13":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1733,"end":1737},"14":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1762,"end":1763},"15":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1766,"end":1770},"16":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1764,"end":1765},"17":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1755,"end":1825},"18":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1789,"end":1790},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7202,"end":7203},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7209,"end":7210},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7211,"end":7212},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7208,"end":7213},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8167,"end":8168},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8170,"end":8171},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8185,"end":8186},"27":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4781,"end":4786},"28":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4763,"end":4787},"30":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8170,"end":8171},"31":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4763,"end":4787},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8170,"end":8188},"33":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1809,"end":1810},"34":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1813,"end":1814},"35":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1811,"end":1812},"36":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1805,"end":1806},"37":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1755,"end":1825},"38":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":2463,"end":2486},"40":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8199,"end":8200},"41":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4798,"end":4814}},"is_native":false},"17":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4893,"end":4905},"type_parameters":[],"parameters":[["string#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4906,"end":4912}]],"returns":[{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4924,"end":4930}],"locals":[["$stop#0#6",{"file_hash":[21,247,201,8,68,64,165,87,119,232,179,234,51,204,66,204,125,149,214,14,86,232,139,211,1,65,223,246,65,227,152,78],"start":2398,"end":2403}],["%#2",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4987,"end":5011}],["%#3",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8170,"end":8171}],["e#1#13",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8167,"end":8168}],["i#1#12",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7202,"end":7203}],["i#1#9",{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1709,"end":1710}],["r#1#1",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8134,"end":8135}],["stop#1#9",{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1733,"end":1737}],["v#1#1",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8110,"end":8111}],["v#1#3",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7170,"end":7171}]],"nops":{},"code_map":{"0":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4953,"end":4959},"1":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4953,"end":4970},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8110,"end":8111},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8138,"end":8146},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8130,"end":8135},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8156,"end":8157},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7170,"end":7171},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7186,"end":7187},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7186,"end":7196},"9":{"file_hash":[21,247,201,8,68,64,165,87,119,232,179,234,51,204,66,204,125,149,214,14,86,232,139,211,1,65,223,246,65,227,152,78],"start":2398,"end":2403},"10":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":2473,"end":2474},"11":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1705,"end":1710},"12":{"file_hash":[21,247,201,8,68,64,165,87,119,232,179,234,51,204,66,204,125,149,214,14,86,232,139,211,1,65,223,246,65,227,152,78],"start":2448,"end":2453},"13":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1733,"end":1737},"14":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1762,"end":1763},"15":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1766,"end":1770},"16":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1764,"end":1765},"17":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1755,"end":1825},"18":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1789,"end":1790},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7202,"end":7203},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7209,"end":7210},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7211,"end":7212},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":7208,"end":7213},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8167,"end":8168},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8170,"end":8171},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8185,"end":8186},"27":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5005,"end":5010},"28":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4987,"end":5011},"30":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8170,"end":8171},"31":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":4987,"end":5011},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8170,"end":8188},"33":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1809,"end":1810},"34":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1813,"end":1814},"35":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1811,"end":1812},"36":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1805,"end":1806},"37":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":1755,"end":1825},"38":{"file_hash":[42,206,130,167,224,5,97,164,247,47,100,189,7,65,58,212,160,115,149,147,11,222,37,216,228,153,123,248,85,93,30,48],"start":2463,"end":2486},"40":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":8199,"end":8200},"41":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5022,"end":5038}},"is_native":false},"18":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5262,"end":5270},"type_parameters":[],"parameters":[["string#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5271,"end":5277}],["substr#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5288,"end":5294}]],"returns":[{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5306,"end":5309}],"locals":[["%#1",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5496,"end":5543}],["i#1#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5328,"end":5329}],["j#1#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5470,"end":5471}],["m#1#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5351,"end":5352}],["n#1#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5348,"end":5349}]],"nops":{},"code_map":{"0":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5332,"end":5333},"1":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5324,"end":5329},"2":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5357,"end":5363},"3":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5357,"end":5372},"4":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5374,"end":5380},"5":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5374,"end":5389},"6":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5351,"end":5352},"7":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5348,"end":5349},"8":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5404,"end":5405},"9":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5408,"end":5409},"10":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5406,"end":5407},"11":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5400,"end":5419},"12":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5411,"end":5419},"16":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5418,"end":5419},"17":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5411,"end":5419},"18":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5436,"end":5437},"19":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5441,"end":5442},"20":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5445,"end":5446},"21":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5443,"end":5444},"22":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5438,"end":5440},"23":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5429,"end":5622},"24":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5474,"end":5475},"25":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5466,"end":5471},"26":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5496,"end":5497},"27":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5500,"end":5501},"28":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5498,"end":5499},"29":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5496,"end":5543},"31":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5505,"end":5511},"32":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5505,"end":5524},"33":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5518,"end":5519},"34":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5522,"end":5523},"35":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5520,"end":5521},"36":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5505,"end":5524},"38":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5528,"end":5534},"39":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5528,"end":5543},"40":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5541,"end":5542},"41":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5528,"end":5543},"43":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5525,"end":5527},"44":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5496,"end":5543},"50":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5489,"end":5554},"51":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5549,"end":5550},"52":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5553,"end":5554},"53":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5551,"end":5552},"54":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5545,"end":5546},"55":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5489,"end":5554},"56":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5572,"end":5573},"57":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5577,"end":5578},"58":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5574,"end":5576},"59":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5568,"end":5588},"60":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5580,"end":5588},"64":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5587,"end":5588},"65":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5580,"end":5588},"66":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5606,"end":5607},"67":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5610,"end":5611},"68":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5608,"end":5609},"69":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5602,"end":5603},"70":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5429,"end":5622},"71":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5632,"end":5633}},"is_native":false},"19":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5703,"end":5720},"type_parameters":[],"parameters":[["byte#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5721,"end":5725}]],"returns":[{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5732,"end":5734}],"locals":[["%#1",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5749,"end":5777}],["%#2",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5745,"end":5808}]],"nops":{},"code_map":{"0":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5749,"end":5753},"1":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5757,"end":5761},"2":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5754,"end":5756},"3":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5749,"end":5777},"4":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5765,"end":5769},"5":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5773,"end":5777},"6":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5770,"end":5772},"7":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5749,"end":5777},"12":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5745,"end":5808},"13":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5779,"end":5783},"14":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5786,"end":5790},"15":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5784,"end":5785},"16":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5745,"end":5808},"18":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5804,"end":5808},"19":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5745,"end":5808}},"is_native":false},"20":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5878,"end":5895},"type_parameters":[],"parameters":[["byte#0#0",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5896,"end":5900}]],"returns":[{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5907,"end":5909}],"locals":[["%#1",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5924,"end":5952}],["%#2",{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5920,"end":5983}]],"nops":{},"code_map":{"0":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5924,"end":5928},"1":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5932,"end":5936},"2":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5929,"end":5931},"3":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5924,"end":5952},"4":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5940,"end":5944},"5":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5948,"end":5952},"6":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5945,"end":5947},"7":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5924,"end":5952},"12":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5920,"end":5983},"13":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5954,"end":5958},"14":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5961,"end":5965},"15":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5959,"end":5960},"16":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5920,"end":5983},"18":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5979,"end":5983},"19":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":5920,"end":5983}},"is_native":false},"21":{"definition_location":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":251,"end":5991},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[9,21,52,55,227,54,133,179,122,15,179,162,46,242,157,73,245,197,52,62,152,34,213,144,41,229,222,224,161,168,169,168],"start":251,"end":5991}},"is_native":false}},"constant_map":{"EInvalidASCIICharacter":0,"EInvalidIndex":1}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/source_maps/dependencies/MoveStdlib/string.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/source_maps/dependencies/MoveStdlib/string.json new file mode 100644 index 0000000000000..6025b459b83b1 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/source_maps/dependencies/MoveStdlib/string.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":180,"end":186},"module_name":["0000000000000000000000000000000000000000000000000000000000000001","string"],"struct_map":{"0":{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":454,"end":460},"type_parameters":[],"fields":[{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":493,"end":498}]}},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":646,"end":650},"type_parameters":[],"parameters":[["bytes#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":651,"end":656}]],"returns":[{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":671,"end":677}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":716,"end":722},"1":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":696,"end":723},"2":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":688,"end":738},"4":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":725,"end":737},"5":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":688,"end":738},"6":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":757,"end":762},"7":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":748,"end":764}},"is_native":false},"1":{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":836,"end":846},"type_parameters":[],"parameters":[["s#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":847,"end":848}]],"returns":[{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":866,"end":872}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":899,"end":900},"1":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":899,"end":913},"2":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":883,"end":915}},"is_native":false},"2":{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1030,"end":1038},"type_parameters":[],"parameters":[["s#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1039,"end":1040}]],"returns":[{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1051,"end":1064}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1098,"end":1099},"1":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1079,"end":1095},"2":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1109,"end":1132}},"is_native":false},"3":{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1218,"end":1226},"type_parameters":[],"parameters":[["bytes#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1227,"end":1232}]],"returns":[{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1247,"end":1261}],"locals":[["%#1",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1272,"end":1363}]],"nops":{},"code_map":{"0":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1296,"end":1302},"1":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1276,"end":1303},"2":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1272,"end":1363},"3":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1327,"end":1332},"4":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1318,"end":1334},"5":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1305,"end":1335},"6":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1272,"end":1363},"8":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1349,"end":1363},"9":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1272,"end":1363}},"is_native":false},"4":{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1445,"end":1453},"type_parameters":[],"parameters":[["s#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1454,"end":1455}]],"returns":[{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1467,"end":1478}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1490,"end":1491},"1":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1489,"end":1497}},"is_native":false},"5":{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1577,"end":1587},"type_parameters":[],"parameters":[["s#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1588,"end":1589}]],"returns":[{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1600,"end":1610}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1644,"end":1645},"1":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1625,"end":1641},"2":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1655,"end":1660}},"is_native":false},"6":{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1728,"end":1736},"type_parameters":[],"parameters":[["s#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1737,"end":1738}]],"returns":[{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1750,"end":1754}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1765,"end":1766},"1":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1765,"end":1772},"2":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1765,"end":1783}},"is_native":false},"7":{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1859,"end":1865},"type_parameters":[],"parameters":[["s#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1866,"end":1867}]],"returns":[{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1879,"end":1882}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1893,"end":1894},"1":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1893,"end":1900},"2":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1893,"end":1909}},"is_native":false},"8":{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1958,"end":1964},"type_parameters":[],"parameters":[["s#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1965,"end":1966}],["r#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":1981,"end":1982}]],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2002,"end":2003},"1":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2002,"end":2009},"2":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2017,"end":2024},"5":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2002,"end":2025}},"is_native":false},"9":{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2106,"end":2117},"type_parameters":[],"parameters":[["s#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2118,"end":2119}],["bytes#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2134,"end":2139}]],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2163,"end":2164},"1":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2177,"end":2182},"2":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2172,"end":2183},"3":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2163,"end":2184}},"is_native":false},"10":{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2331,"end":2337},"type_parameters":[],"parameters":[["s#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2338,"end":2339}],["at#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2354,"end":2356}],["o#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2363,"end":2364}]],"returns":[],"locals":[["%#1",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2435,"end":2495}],["bytes#1#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2388,"end":2393}],["end#1#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2619,"end":2622}],["front#1#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2579,"end":2584}],["l#1#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2547,"end":2548}]],"nops":{},"code_map":{"0":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2397,"end":2398},"1":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2396,"end":2404},"2":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2388,"end":2393},"3":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2435,"end":2437},"4":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2441,"end":2446},"5":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2441,"end":2455},"6":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2438,"end":2440},"7":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2435,"end":2495},"8":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2485,"end":2490},"9":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2492,"end":2494},"10":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2459,"end":2495},"11":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2435,"end":2495},"18":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2414,"end":2533},"22":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2509,"end":2522},"23":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2414,"end":2533},"24":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2551,"end":2552},"26":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2551,"end":2561},"27":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2547,"end":2548},"28":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2587,"end":2588},"30":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2599,"end":2600},"31":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2602,"end":2604},"32":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2587,"end":2605},"33":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2575,"end":2584},"34":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2625,"end":2626},"36":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2637,"end":2639},"37":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2641,"end":2642},"38":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2625,"end":2643},"39":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2619,"end":2622},"40":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2653,"end":2658},"41":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2666,"end":2667},"42":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2653,"end":2668},"43":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2678,"end":2683},"44":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2691,"end":2694},"45":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2678,"end":2695},"46":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2710,"end":2715},"47":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2706,"end":2707},"48":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2705,"end":2715},"49":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":2715,"end":2716}},"is_native":false},"11":{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3037,"end":3046},"type_parameters":[],"parameters":[["s#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3047,"end":3048}],["i#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3059,"end":3060}],["j#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3067,"end":3068}]],"returns":[{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3076,"end":3082}],"locals":[["%#1",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3176,"end":3306}],["bytes#1#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3097,"end":3102}],["l#1#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3127,"end":3128}]],"nops":{},"code_map":{"0":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3106,"end":3107},"1":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3105,"end":3113},"2":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3097,"end":3102},"3":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3131,"end":3136},"4":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3131,"end":3145},"5":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3127,"end":3128},"6":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3176,"end":3177},"7":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3181,"end":3182},"8":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3178,"end":3180},"9":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3176,"end":3306},"10":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3198,"end":3199},"11":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3203,"end":3204},"12":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3200,"end":3202},"13":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3176,"end":3306},"14":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3246,"end":3251},"15":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3253,"end":3254},"16":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3220,"end":3255},"17":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3176,"end":3306},"18":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3297,"end":3302},"19":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3304,"end":3305},"20":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3271,"end":3306},"21":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3176,"end":3306},"32":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3155,"end":3344},"36":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3320,"end":3333},"37":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3155,"end":3344},"38":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3390,"end":3395},"39":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3397,"end":3398},"40":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3400,"end":3401},"41":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3370,"end":3402},"42":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3354,"end":3404}},"is_native":false},"12":{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3544,"end":3552},"type_parameters":[],"parameters":[["s#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3553,"end":3554}],["r#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3565,"end":3566}]],"returns":[{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3578,"end":3581}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3611,"end":3612},"1":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3610,"end":3618},"2":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3621,"end":3622},"3":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3620,"end":3628},"4":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3592,"end":3629}},"is_native":false},"13":{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3671,"end":3690},"type_parameters":[],"parameters":[["v#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3691,"end":3692}]],"returns":[{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3708,"end":3712}],"locals":[],"nops":{},"code_map":{},"is_native":true},"14":{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3729,"end":3754},"type_parameters":[],"parameters":[["v#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3755,"end":3756}],["i#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3771,"end":3772}]],"returns":[{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3780,"end":3784}],"locals":[],"nops":{},"code_map":{},"is_native":true},"15":{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3801,"end":3820},"type_parameters":[],"parameters":[["v#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3821,"end":3822}],["i#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3837,"end":3838}],["j#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3845,"end":3846}]],"returns":[{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3854,"end":3864}],"locals":[],"nops":{},"code_map":{},"is_native":true},"16":{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3881,"end":3898},"type_parameters":[],"parameters":[["v#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3899,"end":3900}],["r#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3915,"end":3916}]],"returns":[{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3932,"end":3935}],"locals":[],"nops":{},"code_map":{},"is_native":true},"17":{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":3970,"end":4001},"type_parameters":[],"parameters":[["v#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":4002,"end":4003}],["i#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":4018,"end":4019}],["j#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":4026,"end":4027}]],"returns":[{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":4035,"end":4045}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":4076,"end":4077},"1":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":4079,"end":4080},"2":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":4082,"end":4083},"3":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":4056,"end":4084}},"is_native":false},"18":{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":4200,"end":4205},"type_parameters":[],"parameters":[["s#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":4206,"end":4207}]],"returns":[{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":4219,"end":4230}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":4233,"end":4234},"1":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":4233,"end":4245}},"is_native":false},"19":{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":4331,"end":4341},"type_parameters":[],"parameters":[["s#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":4342,"end":4343}],["i#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":4354,"end":4355}],["j#0#0",{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":4362,"end":4363}]],"returns":[{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":4371,"end":4377}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":4388,"end":4389},"1":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":4400,"end":4401},"2":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":4403,"end":4404},"3":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":4388,"end":4405}},"is_native":false},"20":{"definition_location":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":168,"end":4413},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[248,228,120,123,204,53,211,17,237,231,207,64,188,156,42,189,161,175,188,111,66,131,68,236,180,20,20,30,182,122,250,107],"start":168,"end":4413}},"is_native":false}},"constant_map":{"EInvalidIndex":1,"EInvalidUTF8":0}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/source_maps/dependencies/MoveStdlib/vector.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/source_maps/dependencies/MoveStdlib/vector.json new file mode 100644 index 0000000000000..126416a07eb7e --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/source_maps/dependencies/MoveStdlib/vector.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":261,"end":267},"module_name":["0000000000000000000000000000000000000000000000000000000000000001","vector"],"struct_map":{},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1202,"end":1207},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1208,"end":1215}]],"parameters":[],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1220,"end":1235}],"locals":[],"nops":{},"code_map":{},"is_native":true},"1":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1329,"end":1335},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1336,"end":1343}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1345,"end":1346}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1367,"end":1370}],"locals":[],"nops":{},"code_map":{},"is_native":true},"2":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1563,"end":1569},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1570,"end":1577}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1579,"end":1580}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1600,"end":1601}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1609,"end":1617}],"locals":[],"nops":{},"code_map":{},"is_native":true},"3":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1724,"end":1733},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1734,"end":1741}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1743,"end":1744}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1768,"end":1769}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"4":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1968,"end":1978},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1979,"end":1986}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1988,"end":1989}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2013,"end":2014}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2022,"end":2034}],"locals":[],"nops":{},"code_map":{},"is_native":true},"5":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2170,"end":2178},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2179,"end":2186}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2188,"end":2189}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2214,"end":2221}],"locals":[],"nops":{},"code_map":{},"is_native":true},"6":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2342,"end":2355},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2356,"end":2363}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2365,"end":2366}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"7":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2561,"end":2565},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2566,"end":2573}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2575,"end":2576}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2600,"end":2601}],["j#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2608,"end":2609}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"8":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2694,"end":2703},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2704,"end":2711}]],"parameters":[["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2713,"end":2714}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2726,"end":2741}],"locals":[["v#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2760,"end":2761}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2764,"end":2771},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2756,"end":2761},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2781,"end":2782},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2793,"end":2794},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2781,"end":2795},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2805,"end":2806}},"is_native":false},"9":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2900,"end":2907},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2908,"end":2915}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2917,"end":2918}]],"returns":[],"locals":[["back_index#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3057,"end":3067}],["front_index#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3024,"end":3035}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2956,"end":2959}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2962,"end":2963},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2962,"end":2972},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2956,"end":2959},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2986,"end":2989},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2993,"end":2994},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2990,"end":2992},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2982,"end":3005},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2996,"end":3005},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3038,"end":3039},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3020,"end":3035},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3070,"end":3073},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3076,"end":3077},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3074,"end":3075},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3053,"end":3067},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3094,"end":3105},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3108,"end":3118},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3106,"end":3107},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3087,"end":3260},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3134,"end":3135},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3141,"end":3152},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3154,"end":3164},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3134,"end":3165},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3193,"end":3204},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3207,"end":3208},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3205,"end":3206},"28":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3179,"end":3190},"29":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3235,"end":3245},"30":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3248,"end":3249},"31":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3246,"end":3247},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3222,"end":3232},"33":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3087,"end":3260}},"is_native":false},"10":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3363,"end":3369},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3370,"end":3377}]],"parameters":[["lhs#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3379,"end":3382}],["other#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3410,"end":3415}]],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3444,"end":3449},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3444,"end":3459},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3477,"end":3482},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3477,"end":3493},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3476,"end":3477},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3469,"end":3526},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3495,"end":3498},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3509,"end":3514},"9":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3509,"end":3525},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3495,"end":3526},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3469,"end":3526},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3536,"end":3557},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3536,"end":3541},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3536,"end":3557},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3557,"end":3558}},"is_native":false},"11":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3660,"end":3668},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3669,"end":3676}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3678,"end":3679}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3700,"end":3704}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3715,"end":3716},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3715,"end":3725},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3729,"end":3730},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3726,"end":3728},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3715,"end":3730}},"is_native":false},"12":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3836,"end":3844},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3845,"end":3852}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3854,"end":3855}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3875,"end":3876}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3889,"end":3893}],"locals":[["i#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3912,"end":3913}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3931,"end":3934}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3916,"end":3917},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3908,"end":3913},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3937,"end":3938},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3937,"end":3947},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3931,"end":3934},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3964,"end":3965},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3968,"end":3971},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3966,"end":3967},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3957,"end":4048},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3992,"end":3993},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3994,"end":3995},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3991,"end":3996},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4000,"end":4001},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3997,"end":3999},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3987,"end":4014},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4003,"end":4014},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4010,"end":4014},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4003,"end":4014},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4032,"end":4033},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4036,"end":4037},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4034,"end":4035},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4028,"end":4029},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3957,"end":4048},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4058,"end":4063}},"is_native":false},"13":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4196,"end":4204},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4205,"end":4212}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4214,"end":4215}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4235,"end":4236}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4250,"end":4254},{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4256,"end":4259}],"locals":[["i#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4279,"end":4280}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4298,"end":4301}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4283,"end":4284},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4275,"end":4280},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4304,"end":4305},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4304,"end":4314},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4298,"end":4301},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4331,"end":4332},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4335,"end":4338},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4333,"end":4334},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4324,"end":4420},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4359,"end":4360},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4361,"end":4362},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4358,"end":4363},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4367,"end":4368},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4364,"end":4366},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4354,"end":4386},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4370,"end":4386},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4378,"end":4382},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4384,"end":4385},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4370,"end":4386},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4404,"end":4405},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4408,"end":4409},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4406,"end":4407},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4400,"end":4401},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4324,"end":4420},"28":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4430,"end":4440},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4431,"end":4436},"33":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4438,"end":4439},"34":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4430,"end":4440}},"is_native":false},"14":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4660,"end":4666},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4667,"end":4674}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4676,"end":4677}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4705,"end":4706}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4714,"end":4721}],"locals":[["%#1",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4897,"end":4898}],["%#2",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4891}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4740,"end":4743}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4746,"end":4747},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4746,"end":4756},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4736,"end":4743},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4804,"end":4805},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4809,"end":4812},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4806,"end":4808},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4800,"end":4840},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4814,"end":4840},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4820,"end":4840},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4814,"end":4840},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4857,"end":4860},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4863,"end":4864},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4861,"end":4862},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4851,"end":4854},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4881,"end":4882},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4885,"end":4888},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4883,"end":4884},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4874,"end":4917},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4891},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4897,"end":4898},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4906,"end":4907},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4910,"end":4911},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4908,"end":4909},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4902,"end":4903},"28":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4891},"29":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4897,"end":4898},"30":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4913,"end":4914},"31":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4917},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4874,"end":4917},"33":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4927,"end":4928},"34":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4927,"end":4939}},"is_native":false},"15":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5290,"end":5296},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5297,"end":5304}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5306,"end":5307}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5331,"end":5332}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5347,"end":5348}]],"returns":[],"locals":[["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5369,"end":5372}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5375,"end":5376},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5375,"end":5385},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5369,"end":5372},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5426,"end":5427},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5430,"end":5433},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5428,"end":5429},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5422,"end":5461},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5435,"end":5461},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5441,"end":5461},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5435,"end":5461},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5472,"end":5473},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5484,"end":5485},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5472,"end":5486},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5503,"end":5504},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5507,"end":5510},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5505,"end":5506},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5496,"end":5573},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5526,"end":5527},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5533,"end":5534},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5536,"end":5539},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5526,"end":5540},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5558,"end":5559},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5562,"end":5563},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5560,"end":5561},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5554,"end":5555},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5496,"end":5573}},"is_native":false},"16":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5812,"end":5823},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5824,"end":5831}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5833,"end":5834}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5858,"end":5859}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5867,"end":5874}],"locals":[["last_idx#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5943,"end":5951}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5894,"end":5895},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5894,"end":5906},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5893,"end":5894},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5885,"end":5929},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5908,"end":5928},"9":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5885,"end":5929},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5954,"end":5955},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5954,"end":5964},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5967,"end":5968},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5965,"end":5966},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5943,"end":5951},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5978,"end":5979},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5985,"end":5986},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5988,"end":5996},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5978,"end":5997},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6007,"end":6008},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6007,"end":6019}},"is_native":false},"17":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":249,"end":13981},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":249,"end":13981}},"is_native":false}},"constant_map":{"EINDEX_OUT_OF_BOUNDS":0}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/source_maps/m.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/source_maps/m.json new file mode 100644 index 0000000000000..2f18d6290535c --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/source_maps/m.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":70,"end":71},"module_name":["0000000000000000000000000000000000000000000000000000000000000000","m"],"struct_map":{},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":122,"end":125},"type_parameters":[],"parameters":[["s#0#0",{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":126,"end":127}],["sub#0#0",{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":137,"end":140}],["p#0#0",{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":154,"end":155}]],"returns":[{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":163,"end":166}],"locals":[["%#1",{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":185,"end":194}],["%#2",{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":173,"end":174}]],"nops":{},"code_map":{"0":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":173,"end":174},"2":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":190,"end":193},"3":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":185,"end":194},"5":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":173,"end":174},"6":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":184,"end":194},"7":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":173,"end":195},"8":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":198,"end":199},"9":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":196,"end":197},"10":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":173,"end":199}},"is_native":false},"1":{"definition_location":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":215,"end":219},"type_parameters":[],"parameters":[],"returns":[],"locals":[["_res#1#0",{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":236,"end":240}]],"nops":{},"code_map":{"0":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":252,"end":260},"1":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":247,"end":261},"2":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":263,"end":267},"3":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":269,"end":271},"4":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":243,"end":272},"5":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":232,"end":240},"6":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":285,"end":289},"7":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":301,"end":309},"8":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":296,"end":310},"9":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":312,"end":316},"10":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":318,"end":322},"11":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":292,"end":323},"12":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":290,"end":291},"13":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":278,"end":282},"14":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":323,"end":324}},"is_native":false},"2":{"definition_location":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":51,"end":385},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[96,202,232,81,251,88,80,137,130,45,249,50,192,96,183,155,242,16,21,43,196,137,239,206,72,116,144,91,239,92,58,221],"start":51,"end":385}},"is_native":false}},"constant_map":{}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/dependencies/MoveStdlib/ascii.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/dependencies/MoveStdlib/ascii.move new file mode 100644 index 0000000000000..60564b49893a1 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/dependencies/MoveStdlib/ascii.move @@ -0,0 +1,166 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +/// The `ASCII` module defines basic string and char newtypes in Move that verify +/// that characters are valid ASCII, and that strings consist of only valid ASCII characters. +module std::ascii { + // Allows calling `.to_string()` to convert an `ascii::String` into as `string::String` + public use fun std::string::from_ascii as String.to_string; + + /// An invalid ASCII character was encountered when creating an ASCII string. + const EInvalidASCIICharacter: u64 = 0x10000; + /// An invalid index was encountered when creating a substring. + const EInvalidIndex: u64 = 0x10001; + + /// The `String` struct holds a vector of bytes that all represent + /// valid ASCII characters. Note that these ASCII characters may not all + /// be printable. To determine if a `String` contains only "printable" + /// characters you should use the `all_characters_printable` predicate + /// defined in this module. + public struct String has copy, drop, store { + bytes: vector, + } + + /// An ASCII character. + public struct Char has copy, drop, store { + byte: u8, + } + + /// Convert a `byte` into a `Char` that is checked to make sure it is valid ASCII. + public fun char(byte: u8): Char { + assert!(is_valid_char(byte), EInvalidASCIICharacter); + Char { byte } + } + + /// Convert a vector of bytes `bytes` into an `String`. Aborts if + /// `bytes` contains non-ASCII characters. + public fun string(bytes: vector): String { + let x = try_string(bytes); + assert!(x.is_some(), EInvalidASCIICharacter); + x.destroy_some() + } + + /// Convert a vector of bytes `bytes` into an `String`. Returns + /// `Some()` if the `bytes` contains all valid ASCII + /// characters. Otherwise returns `None`. + public fun try_string(bytes: vector): Option { + let is_valid = bytes.all!(|byte| is_valid_char(*byte)); + if (is_valid) option::some(String { bytes }) + else option::none() + } + + /// Returns `true` if all characters in `string` are printable characters + /// Returns `false` otherwise. Not all `String`s are printable strings. + public fun all_characters_printable(string: &String): bool { + string.bytes.all!(|byte| is_printable_char(*byte)) + } + + /// Push a `Char` to the end of the `string`. + public fun push_char(string: &mut String, char: Char) { + string.bytes.push_back(char.byte); + } + + /// Pop a `Char` from the end of the `string`. + public fun pop_char(string: &mut String): Char { + Char { byte: string.bytes.pop_back() } + } + + /// Returns the length of the `string` in bytes. + public fun length(string: &String): u64 { + string.as_bytes().length() + } + + /// Append the `other` string to the end of `string`. + public fun append(string: &mut String, other: String) { + string.bytes.append(other.into_bytes()) + } + + /// Insert the `other` string at the `at` index of `string`. + public fun insert(s: &mut String, at: u64, o: String) { + assert!(at <= s.length(), EInvalidIndex); + o.into_bytes().destroy!(|e| s.bytes.insert(e, at)); + } + + /// Copy the slice of the `string` from `i` to `j` into a new `String`. + public fun substring(string: &String, i: u64, j: u64): String { + assert!(i <= j && j <= string.length(), EInvalidIndex); + let mut bytes = vector[]; + i.range_do!(j, |i| bytes.push_back(string.bytes[i])); + String { bytes } + } + + /// Get the inner bytes of the `string` as a reference + public fun as_bytes(string: &String): &vector { + &string.bytes + } + + /// Unpack the `string` to get its backing bytes + public fun into_bytes(string: String): vector { + let String { bytes } = string; + bytes + } + + /// Unpack the `char` into its underlying bytes. + public fun byte(char: Char): u8 { + let Char { byte } = char; + byte + } + + /// Returns `true` if `b` is a valid ASCII character. + /// Returns `false` otherwise. + public fun is_valid_char(b: u8): bool { + b <= 0x7F + } + + /// Returns `true` if `byte` is an printable ASCII character. + /// Returns `false` otherwise. + public fun is_printable_char(byte: u8): bool { + byte >= 0x20 && // Disallow metacharacters + byte <= 0x7E // Don't allow DEL metacharacter + } + + /// Returns `true` if `string` is empty. + public fun is_empty(string: &String): bool { + string.bytes.is_empty() + } + + /// Convert a `string` to its uppercase equivalent. + public fun to_uppercase(string: &String): String { + let bytes = string.as_bytes().map_ref!(|byte| char_to_uppercase(*byte)); + String { bytes } + } + + /// Convert a `string` to its lowercase equivalent. + public fun to_lowercase(string: &String): String { + let bytes = string.as_bytes().map_ref!(|byte| char_to_lowercase(*byte)); + String { bytes } + } + + /// Computes the index of the first occurrence of the `substr` in the `string`. + /// Returns the length of the `string` if the `substr` is not found. + /// Returns 0 if the `substr` is empty. + public fun index_of(string: &String, substr: &String): u64 { + let mut i = 0; + let (n, m) = (string.length(), substr.length()); + if (n < m) return n; + while (i <= n - m) { + let mut j = 0; + while (j < m && string.bytes[i + j] == substr.bytes[j]) j = j + 1; + if (j == m) return i; + i = i + 1; + }; + n + } + + /// Convert a `char` to its lowercase equivalent. + fun char_to_uppercase(byte: u8): u8 { + if (byte >= 0x61 && byte <= 0x7A) byte - 0x20 + else byte + } + + /// Convert a `char` to its lowercase equivalent. + fun char_to_lowercase(byte: u8): u8 { + if (byte >= 0x41 && byte <= 0x5A) byte + 0x20 + else byte + } +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/dependencies/MoveStdlib/string.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/dependencies/MoveStdlib/string.move new file mode 100644 index 0000000000000..0939b2cbe45f3 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/dependencies/MoveStdlib/string.move @@ -0,0 +1,137 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +/// The `string` module defines the `String` type which represents UTF8 encoded +/// strings. +module std::string { + use std::ascii; + + /// An invalid UTF8 encoding. + const EInvalidUTF8: u64 = 1; + + /// Index out of range. + const EInvalidIndex: u64 = 2; + + /// A `String` holds a sequence of bytes which is guaranteed to be in utf8 + /// format. + public struct String has copy, drop, store { + bytes: vector, + } + + /// Creates a new string from a sequence of bytes. Aborts if the bytes do + /// not represent valid utf8. + public fun utf8(bytes: vector): String { + assert!(internal_check_utf8(&bytes), EInvalidUTF8); + String { bytes } + } + + /// Convert an ASCII string to a UTF8 string + public fun from_ascii(s: ascii::String): String { + String { bytes: s.into_bytes() } + } + + /// Convert an UTF8 string to an ASCII string. + /// Aborts if `s` is not valid ASCII + public fun to_ascii(s: String): ascii::String { + let String { bytes } = s; + bytes.to_ascii_string() + } + + /// Tries to create a new string from a sequence of bytes. + public fun try_utf8(bytes: vector): Option { + if (internal_check_utf8(&bytes)) option::some(String { bytes }) + else option::none() + } + + /// Returns a reference to the underlying byte vector. + public fun as_bytes(s: &String): &vector { + &s.bytes + } + + /// Unpack the `string` to get its underlying bytes. + public fun into_bytes(s: String): vector { + let String { bytes } = s; + bytes + } + + /// Checks whether this string is empty. + public fun is_empty(s: &String): bool { + s.bytes.is_empty() + } + + /// Returns the length of this string, in bytes. + public fun length(s: &String): u64 { + s.bytes.length() + } + + /// Appends a string. + public fun append(s: &mut String, r: String) { + s.bytes.append(r.bytes) + } + + /// Appends bytes which must be in valid utf8 format. + public fun append_utf8(s: &mut String, bytes: vector) { + s.append(utf8(bytes)) + } + + /// Insert the other string at the byte index in given string. The index + /// must be at a valid utf8 char boundary. + public fun insert(s: &mut String, at: u64, o: String) { + let bytes = &s.bytes; + assert!( + at <= bytes.length() && internal_is_char_boundary(bytes, at), + EInvalidIndex, + ); + let l = s.length(); + let mut front = s.substring(0, at); + let end = s.substring(at, l); + front.append(o); + front.append(end); + *s = front; + } + + /// Returns a sub-string using the given byte indices, where `i` is the first + /// byte position and `j` is the start of the first byte not included (or the + /// length of the string). The indices must be at valid utf8 char boundaries, + /// guaranteeing that the result is valid utf8. + public fun substring(s: &String, i: u64, j: u64): String { + let bytes = &s.bytes; + let l = bytes.length(); + assert!( + j <= l && + i <= j && + internal_is_char_boundary(bytes, i) && + internal_is_char_boundary(bytes, j), + EInvalidIndex, + ); + String { bytes: internal_sub_string(bytes, i, j) } + } + + /// Computes the index of the first occurrence of a string. Returns `s.length()` + /// if no occurrence found. + public fun index_of(s: &String, r: &String): u64 { + internal_index_of(&s.bytes, &r.bytes) + } + + // Native API + + native fun internal_check_utf8(v: &vector): bool; + native fun internal_is_char_boundary(v: &vector, i: u64): bool; + native fun internal_sub_string(v: &vector, i: u64, j: u64): vector; + native fun internal_index_of(v: &vector, r: &vector): u64; + + #[test_only] + public fun internal_sub_string_for_testing(v: &vector, i: u64, j: u64): vector { + internal_sub_string(v, i, j) + } + + // === Deprecated === + + #[deprecated(note = b"Use `std::string::as_bytes` instead.")] + public fun bytes(s: &String): &vector { s.as_bytes() } + + #[deprecated(note = b"Use `std::string::substring` instead.")] + public fun sub_string(s: &String, i: u64, j: u64): String { + s.substring(i, j) + } +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/dependencies/MoveStdlib/vector.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/dependencies/MoveStdlib/vector.move new file mode 100644 index 0000000000000..55c1abac34b74 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/dependencies/MoveStdlib/vector.move @@ -0,0 +1,364 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +#[defines_primitive(vector)] +/// A variable-sized container that can hold any type. Indexing is 0-based, and +/// vectors are growable. This module has many native functions. +module std::vector { + /// Allows calling `.to_string()` on a vector of `u8` to get a utf8 `String`. + public use fun std::string::utf8 as vector.to_string; + + /// Allows calling `.try_to_string()` on a vector of `u8` to get a utf8 `String`. + /// This will return `None` if the vector is not valid utf8. + public use fun std::string::try_utf8 as vector.try_to_string; + + /// Allows calling `.to_ascii_string()` on a vector of `u8` to get an `ascii::String`. + public use fun std::ascii::string as vector.to_ascii_string; + + /// Allows calling `.try_to_ascii_string()` on a vector of `u8` to get an + /// `ascii::String`. This will return `None` if the vector is not valid ascii. + public use fun std::ascii::try_string as vector.try_to_ascii_string; + + /// The index into the vector is out of bounds + const EINDEX_OUT_OF_BOUNDS: u64 = 0x20000; + + #[bytecode_instruction] + /// Create an empty vector. + public native fun empty(): vector; + + #[bytecode_instruction] + /// Return the length of the vector. + public native fun length(v: &vector): u64; + + #[syntax(index)] + #[bytecode_instruction] + /// Acquire an immutable reference to the `i`th element of the vector `v`. + /// Aborts if `i` is out of bounds. + public native fun borrow(v: &vector, i: u64): ∈ + + #[bytecode_instruction] + /// Add element `e` to the end of the vector `v`. + public native fun push_back(v: &mut vector, e: Element); + + #[syntax(index)] + #[bytecode_instruction] + /// Return a mutable reference to the `i`th element in the vector `v`. + /// Aborts if `i` is out of bounds. + public native fun borrow_mut(v: &mut vector, i: u64): &mut Element; + + #[bytecode_instruction] + /// Pop an element from the end of vector `v`. + /// Aborts if `v` is empty. + public native fun pop_back(v: &mut vector): Element; + + #[bytecode_instruction] + /// Destroy the vector `v`. + /// Aborts if `v` is not empty. + public native fun destroy_empty(v: vector); + + #[bytecode_instruction] + /// Swaps the elements at the `i`th and `j`th indices in the vector `v`. + /// Aborts if `i` or `j` is out of bounds. + public native fun swap(v: &mut vector, i: u64, j: u64); + + /// Return an vector of size one containing element `e`. + public fun singleton(e: Element): vector { + let mut v = empty(); + v.push_back(e); + v + } + + /// Reverses the order of the elements in the vector `v` in place. + public fun reverse(v: &mut vector) { + let len = v.length(); + if (len == 0) return (); + + let mut front_index = 0; + let mut back_index = len - 1; + while (front_index < back_index) { + v.swap(front_index, back_index); + front_index = front_index + 1; + back_index = back_index - 1; + } + } + + /// Pushes all of the elements of the `other` vector into the `lhs` vector. + public fun append(lhs: &mut vector, mut other: vector) { + other.reverse(); + while (!other.is_empty()) lhs.push_back(other.pop_back()); + other.destroy_empty(); + } + + /// Return `true` if the vector `v` has no elements and `false` otherwise. + public fun is_empty(v: &vector): bool { + v.length() == 0 + } + + /// Return true if `e` is in the vector `v`. + /// Otherwise, returns false. + public fun contains(v: &vector, e: &Element): bool { + let mut i = 0; + let len = v.length(); + while (i < len) { + if (&v[i] == e) return true; + i = i + 1; + }; + false + } + + /// Return `(true, i)` if `e` is in the vector `v` at index `i`. + /// Otherwise, returns `(false, 0)`. + public fun index_of(v: &vector, e: &Element): (bool, u64) { + let mut i = 0; + let len = v.length(); + while (i < len) { + if (&v[i] == e) return (true, i); + i = i + 1; + }; + (false, 0) + } + + /// Remove the `i`th element of the vector `v`, shifting all subsequent elements. + /// This is O(n) and preserves ordering of elements in the vector. + /// Aborts if `i` is out of bounds. + public fun remove(v: &mut vector, mut i: u64): Element { + let mut len = v.length(); + // i out of bounds; abort + if (i >= len) abort EINDEX_OUT_OF_BOUNDS; + + len = len - 1; + while (i < len) v.swap(i, { i = i + 1; i }); + v.pop_back() + } + + /// Insert `e` at position `i` in the vector `v`. + /// If `i` is in bounds, this shifts the old `v[i]` and all subsequent elements to the right. + /// If `i == v.length()`, this adds `e` to the end of the vector. + /// This is O(n) and preserves ordering of elements in the vector. + /// Aborts if `i > v.length()` + public fun insert(v: &mut vector, e: Element, mut i: u64) { + let len = v.length(); + // i too big abort + if (i > len) abort EINDEX_OUT_OF_BOUNDS; + + v.push_back(e); + while (i < len) { + v.swap(i, len); + i = i + 1 + } + } + + /// Swap the `i`th element of the vector `v` with the last element and then pop the vector. + /// This is O(1), but does not preserve ordering of elements in the vector. + /// Aborts if `i` is out of bounds. + public fun swap_remove(v: &mut vector, i: u64): Element { + assert!(!v.is_empty(), EINDEX_OUT_OF_BOUNDS); + let last_idx = v.length() - 1; + v.swap(i, last_idx); + v.pop_back() + } + + // === Macros === + + /// Create a vector of length `n` by calling the function `f` on each index. + public macro fun tabulate<$T>($n: u64, $f: |u64| -> $T): vector<$T> { + let mut v = vector[]; + let n = $n; + n.do!(|i| v.push_back($f(i))); + v + } + + /// Destroy the vector `v` by calling `f` on each element and then destroying the vector. + /// Does not preserve the order of elements in the vector (starts from the end of the vector). + public macro fun destroy<$T>($v: vector<$T>, $f: |$T|) { + let mut v = $v; + while (!v.is_empty()) $f(v.pop_back()); + v.destroy_empty(); + } + + /// Destroy the vector `v` by calling `f` on each element and then destroying the vector. + /// Preserves the order of elements in the vector. + public macro fun do<$T>($v: vector<$T>, $f: |$T|) { + let mut v = $v; + v.reverse(); + while (!v.is_empty()) $f(v.pop_back()); + v.destroy_empty(); + } + + /// Perform an action `f` on each element of the vector `v`. The vector is not modified. + public macro fun do_ref<$T>($v: &vector<$T>, $f: |&$T|) { + let v = $v; + v.length().do!(|i| $f(&v[i])) + } + + /// Perform an action `f` on each element of the vector `v`. + /// The function `f` takes a mutable reference to the element. + public macro fun do_mut<$T>($v: &mut vector<$T>, $f: |&mut $T|) { + let v = $v; + v.length().do!(|i| $f(&mut v[i])) + } + + /// Map the vector `v` to a new vector by applying the function `f` to each element. + /// Preserves the order of elements in the vector, first is called first. + public macro fun map<$T, $U>($v: vector<$T>, $f: |$T| -> $U): vector<$U> { + let v = $v; + let mut r = vector[]; + v.do!(|e| r.push_back($f(e))); + r + } + + /// Map the vector `v` to a new vector by applying the function `f` to each element. + /// Preserves the order of elements in the vector, first is called first. + public macro fun map_ref<$T, $U>($v: &vector<$T>, $f: |&$T| -> $U): vector<$U> { + let v = $v; + let mut r = vector[]; + v.do_ref!(|e| r.push_back($f(e))); + r + } + + /// Filter the vector `v` by applying the function `f` to each element. + /// Return a new vector containing only the elements for which `f` returns `true`. + public macro fun filter<$T: drop>($v: vector<$T>, $f: |&$T| -> bool): vector<$T> { + let v = $v; + let mut r = vector[]; + v.do!(|e| if ($f(&e)) r.push_back(e)); + r + } + + /// Split the vector `v` into two vectors by applying the function `f` to each element. + /// Return a tuple containing two vectors: the first containing the elements for which `f` returns `true`, + /// and the second containing the elements for which `f` returns `false`. + public macro fun partition<$T>($v: vector<$T>, $f: |&$T| -> bool): (vector<$T>, vector<$T>) { + let v = $v; + let mut r1 = vector[]; + let mut r2 = vector[]; + v.do!(|e| if ($f(&e)) r1.push_back(e) else r2.push_back(e)); + (r1, r2) + } + + /// Finds the index of first element in the vector `v` that satisfies the predicate `f`. + /// Returns `some(index)` if such an element is found, otherwise `none()`. + public macro fun find_index<$T>($v: &vector<$T>, $f: |&$T| -> bool): Option { + let v = $v; + 'find_index: { + v.length().do!(|i| if ($f(&v[i])) return 'find_index option::some(i)); + option::none() + } + } + + /// Count how many elements in the vector `v` satisfy the predicate `f`. + public macro fun count<$T>($v: &vector<$T>, $f: |&$T| -> bool): u64 { + let v = $v; + let mut count = 0; + v.do_ref!(|e| if ($f(e)) count = count + 1); + count + } + + /// Reduce the vector `v` to a single value by applying the function `f` to each element. + /// Similar to `fold_left` in Rust and `reduce` in Python and JavaScript. + public macro fun fold<$T, $Acc>($v: vector<$T>, $init: $Acc, $f: |$Acc, $T| -> $Acc): $Acc { + let v = $v; + let mut acc = $init; + v.do!(|e| acc = $f(acc, e)); + acc + } + + /// Whether any element in the vector `v` satisfies the predicate `f`. + /// If the vector is empty, returns `false`. + public macro fun any<$T>($v: &vector<$T>, $f: |&$T| -> bool): bool { + let v = $v; + 'any: { + v.do_ref!(|e| if ($f(e)) return 'any true); + false + } + } + + /// Whether all elements in the vector `v` satisfy the predicate `f`. + /// If the vector is empty, returns `true`. + public macro fun all<$T>($v: &vector<$T>, $f: |&$T| -> bool): bool { + let v = $v; + 'all: { + v.do_ref!(|e| if (!$f(e)) return 'all false); + true + } + } + + /// Destroys two vectors `v1` and `v2` by calling `f` to each pair of elements. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do<$T1, $T2>($v1: vector<$T1>, $v2: vector<$T2>, $f: |$T1, $T2|) { + let v1 = $v1; + let mut v2 = $v2; + v2.reverse(); + let len = v1.length(); + assert!(len == v2.length()); + v1.do!(|el1| $f(el1, v2.pop_back())); + } + + /// Destroys two vectors `v1` and `v2` by calling `f` to each pair of elements. + /// Aborts if the vectors are not of the same length. + /// Starts from the end of the vectors. + public macro fun zip_do_reverse<$T1, $T2>($v1: vector<$T1>, $v2: vector<$T2>, $f: |$T1, $T2|) { + let v1 = $v1; + let mut v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + v1.destroy!(|el1| $f(el1, v2.pop_back())); + } + + /// Iterate through `v1` and `v2` and apply the function `f` to references of each pair of + /// elements. The vectors are not modified. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do_ref<$T1, $T2>($v1: &vector<$T1>, $v2: &vector<$T2>, $f: |&$T1, &$T2|) { + let v1 = $v1; + let v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + len.do!(|i| $f(&v1[i], &v2[i])); + } + + /// Iterate through `v1` and `v2` and apply the function `f` to mutable references of each pair + /// of elements. The vectors may be modified. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do_mut<$T1, $T2>( + $v1: &mut vector<$T1>, + $v2: &mut vector<$T2>, + $f: |&mut $T1, &mut $T2|, + ) { + let v1 = $v1; + let v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + len.do!(|i| $f(&mut v1[i], &mut v2[i])); + } + + /// Destroys two vectors `v1` and `v2` by applying the function `f` to each pair of elements. + /// The returned values are collected into a new vector. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_map<$T1, $T2, $U>( + $v1: vector<$T1>, + $v2: vector<$T2>, + $f: |$T1, $T2| -> $U, + ): vector<$U> { + let mut r = vector[]; + zip_do!($v1, $v2, |el1, el2| r.push_back($f(el1, el2))); + r + } + + /// Iterate through `v1` and `v2` and apply the function `f` to references of each pair of + /// elements. The returned values are collected into a new vector. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_map_ref<$T1, $T2, $U>( + $v1: &vector<$T1>, + $v2: &vector<$T2>, + $f: |&$T1, &$T2| -> $U, + ): vector<$U> { + let mut r = vector[]; + zip_do_ref!($v1, $v2, |el1, el2| r.push_back($f(el1, el2))); + r + } +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/m.move new file mode 100644 index 0000000000000..1c64328ea361b --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/build/native_fun/sources/m.move @@ -0,0 +1,14 @@ +// Test native function execution (vector length). +module native_fun::m; + +use std::string::{String, utf8, index_of}; + +fun foo(s: String, sub: vector, p: u64): u64 { + s.index_of(&utf8(sub)) + p +} + +#[test] +fun test() { + let mut _res = foo(utf8(b"hello"), b"e", 42); + _res = _res + foo(utf8(b"hello"), b"l", _res); // to force another unoptimized read to keep `res` visible +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/sources/m.move new file mode 100644 index 0000000000000..1c64328ea361b --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/sources/m.move @@ -0,0 +1,14 @@ +// Test native function execution (vector length). +module native_fun::m; + +use std::string::{String, utf8, index_of}; + +fun foo(s: String, sub: vector, p: u64): u64 { + s.index_of(&utf8(sub)) + p +} + +#[test] +fun test() { + let mut _res = foo(utf8(b"hello"), b"e", 42); + _res = _res + foo(utf8(b"hello"), b"l", _res); // to force another unoptimized read to keep `res` visible +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/test.exp b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/test.exp new file mode 100644 index 0000000000000..480d23d264c6f --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/test.exp @@ -0,0 +1,6 @@ +current frame stack: + function: test (line 13) + scope 0 : + _res : 43 + type: u64 + diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/trace.spec.js b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/trace.spec.js new file mode 100644 index 0000000000000..2c2fa18537ff9 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/trace.spec.js @@ -0,0 +1,8 @@ +let action = (runtime) => { + let res = ''; + // step over a function containing a native call + runtime.step(true); + res += runtime.toString(); + return res; +}; +run_spec(__dirname, action); diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/traces/native_fun__m__test.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/traces/native_fun__m__test.json new file mode 100644 index 0000000000000..979d3eac38d65 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/native_fun/traces/native_fun__m__test.json @@ -0,0 +1 @@ +{"version":1,"events":[{"OpenFrame":{"frame":{"frame_id":0,"function_name":"test","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":1,"type_instantiation":[],"parameters":[],"return_types":[],"locals_types":[{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999992,"instruction":"LD_CONST"}},{"Effect":{"Push":{"RuntimeValue":{"value":[104,101,108,108,111]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999992,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":4,"function_name":"utf8","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":[104,101,108,108,111]}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":null}],"is_native":false},"gas_left":999999992}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999981,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":[104,101,108,108,111]}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[4,0]},"snapshot":[104,101,108,108,111]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999981,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":9,"function_name":"internal_check_utf8","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":13,"type_instantiation":[],"parameters":[{"ImmRef":{"location":{"Local":[4,0]},"snapshot":[104,101,108,108,111]}}],"return_types":[{"type_":"bool","ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":"Imm"}],"is_native":true},"gas_left":999999981}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"CloseFrame":{"frame_id":9,"return_":[{"RuntimeValue":{"value":true}}],"gas_left":999999900}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999899,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999898,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999891,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":[104,101,108,108,111]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[104,101,108,108,111]}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999887,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[104,101,108,108,111]}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999886,"instruction":"RET"}},{"CloseFrame":{"frame_id":4,"return_":[{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}],"gas_left":999999886}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999882,"instruction":"LD_CONST"}},{"Effect":{"Push":{"RuntimeValue":{"value":[101]}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999879,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999879,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":28,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},{"RuntimeValue":{"value":[101]}},{"RuntimeValue":{"value":42}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":null},{"type_":{"vector":"u8"},"ref_type":null},{"type_":"u64","ref_type":null},{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":null},{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":"Imm"}],"is_native":false},"gas_left":999999879}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999868,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[28,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[28,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999867,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[28,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Effect":{"Write":{"location":{"Local":[28,4]},"root_value_after_write":{"ImmRef":{"location":{"Local":[28,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999864,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[28,1]},"root_value_read":{"RuntimeValue":{"value":[101]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[101]}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999864,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":39,"function_name":"utf8","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":[101]}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":null}],"is_native":false},"gas_left":999999864}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999853,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[39,0]},"root_value_read":{"RuntimeValue":{"value":[101]}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[39,0]},"snapshot":[101]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999853,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":44,"function_name":"internal_check_utf8","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":13,"type_instantiation":[],"parameters":[{"ImmRef":{"location":{"Local":[39,0]},"snapshot":[101]}}],"return_types":[{"type_":"bool","ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":"Imm"}],"is_native":true},"gas_left":999999853}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"CloseFrame":{"frame_id":44,"return_":[{"RuntimeValue":{"value":true}}],"gas_left":999999780}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999779,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999778,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999775,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[39,0]},"root_value_read":{"RuntimeValue":{"value":[101]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[101]}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999771,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[101]}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[101]}}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999770,"instruction":"RET"}},{"CloseFrame":{"frame_id":39,"return_":[{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[101]}}}}],"gas_left":999999770}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999769,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[101]}}}}}},{"Effect":{"Write":{"location":{"Local":[28,3]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[101]}}}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999759,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[28,4]},"root_value_read":{"ImmRef":{"location":{"Local":[28,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},"moved":true}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[28,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999749,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[28,3]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[101]}}}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[28,3]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[101]}}}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999749,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":68,"function_name":"index_of","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":12,"type_instantiation":[],"parameters":[{"ImmRef":{"location":{"Local":[28,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},{"ImmRef":{"location":{"Local":[28,3]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[101]}}}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":"Imm"},{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":"Imm"}],"is_native":false},"gas_left":999999749}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999738,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[68,0]},"root_value_read":{"ImmRef":{"location":{"Local":[28,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},"moved":true}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[28,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999728,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[28,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[28,0]},0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999718,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[68,1]},"root_value_read":{"ImmRef":{"location":{"Local":[28,3]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[101]}}}},"moved":true}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[28,3]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[101]}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999708,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[28,3]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[101]}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[28,3]},0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[101]}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999708,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":82,"function_name":"internal_index_of","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":16,"type_instantiation":[],"parameters":[{"ImmRef":{"location":{"Indexed":[{"Local":[28,0]},0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},{"ImmRef":{"location":{"Indexed":[{"Local":[28,3]},0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[101]}}}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":"Imm"},{"type_":{"vector":"u8"},"ref_type":"Imm"}],"is_native":true},"gas_left":999999708}},{"Effect":{"Push":{"RuntimeValue":{"value":1}}}},{"CloseFrame":{"frame_id":82,"return_":[{"RuntimeValue":{"value":1}}],"gas_left":999999633}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999632,"instruction":"RET"}},{"CloseFrame":{"frame_id":68,"return_":[{"RuntimeValue":{"value":1}}],"gas_left":999999632}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999614,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[28,2]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999611,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":1}}}},{"Effect":{"Push":{"RuntimeValue":{"value":43}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999610,"instruction":"RET"}},{"CloseFrame":{"frame_id":28,"return_":[{"RuntimeValue":{"value":43}}],"gas_left":999999610}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999609,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":43}}}},{"Effect":{"Write":{"location":{"Local":[0,0]},"root_value_after_write":{"RuntimeValue":{"value":43}}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999591,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":43}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":43}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999583,"instruction":"LD_CONST"}},{"Effect":{"Push":{"RuntimeValue":{"value":[104,101,108,108,111]}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999583,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":105,"function_name":"utf8","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":[104,101,108,108,111]}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":null}],"is_native":false},"gas_left":999999583}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999572,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[105,0]},"root_value_read":{"RuntimeValue":{"value":[104,101,108,108,111]}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[105,0]},"snapshot":[104,101,108,108,111]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999572,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":110,"function_name":"internal_check_utf8","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":13,"type_instantiation":[],"parameters":[{"ImmRef":{"location":{"Local":[105,0]},"snapshot":[104,101,108,108,111]}}],"return_types":[{"type_":"bool","ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":"Imm"}],"is_native":true},"gas_left":999999572}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"CloseFrame":{"frame_id":110,"return_":[{"RuntimeValue":{"value":true}}],"gas_left":999999491}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999490,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999489,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999482,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[105,0]},"root_value_read":{"RuntimeValue":{"value":[104,101,108,108,111]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[104,101,108,108,111]}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999478,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[104,101,108,108,111]}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999477,"instruction":"RET"}},{"CloseFrame":{"frame_id":105,"return_":[{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}],"gas_left":999999477}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999473,"instruction":"LD_CONST"}},{"Effect":{"Push":{"RuntimeValue":{"value":[108]}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999455,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":43}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":43}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999455,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":130,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},{"RuntimeValue":{"value":[108]}},{"RuntimeValue":{"value":43}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":null},{"type_":{"vector":"u8"},"ref_type":null},{"type_":"u64","ref_type":null},{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":null},{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":"Imm"}],"is_native":false},"gas_left":999999455}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999444,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[130,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[130,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999443,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[130,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Effect":{"Write":{"location":{"Local":[130,4]},"root_value_after_write":{"ImmRef":{"location":{"Local":[130,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999440,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[130,1]},"root_value_read":{"RuntimeValue":{"value":[108]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[108]}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999440,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":141,"function_name":"utf8","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":[108]}}],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":null}],"is_native":false},"gas_left":999999440}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999429,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[141,0]},"root_value_read":{"RuntimeValue":{"value":[108]}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[141,0]},"snapshot":[108]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999429,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":146,"function_name":"internal_check_utf8","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":13,"type_instantiation":[],"parameters":[{"ImmRef":{"location":{"Local":[141,0]},"snapshot":[108]}}],"return_types":[{"type_":"bool","ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":"Imm"}],"is_native":true},"gas_left":999999429}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"CloseFrame":{"frame_id":146,"return_":[{"RuntimeValue":{"value":true}}],"gas_left":999999356}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999355,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999354,"instruction":"BRANCH"}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999351,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[141,0]},"root_value_read":{"RuntimeValue":{"value":[108]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[108]}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999347,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[108]}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[108]}}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999346,"instruction":"RET"}},{"CloseFrame":{"frame_id":141,"return_":[{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[108]}}}}],"gas_left":999999346}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999345,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[108]}}}}}},{"Effect":{"Write":{"location":{"Local":[130,3]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[108]}}}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999335,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[130,4]},"root_value_read":{"ImmRef":{"location":{"Local":[130,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},"moved":true}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[130,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999325,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[130,3]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x1::string::String","fields":{"bytes":[108]}}}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[130,3]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[108]}}}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999325,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":170,"function_name":"index_of","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":12,"type_instantiation":[],"parameters":[{"ImmRef":{"location":{"Local":[130,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},{"ImmRef":{"location":{"Local":[130,3]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[108]}}}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":"Imm"},{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000001","module":"string","name":"String","type_args":[]}},"ref_type":"Imm"}],"is_native":false},"gas_left":999999325}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999314,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[170,0]},"root_value_read":{"ImmRef":{"location":{"Local":[130,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},"moved":true}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[130,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999304,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[130,0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[130,0]},0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999294,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[170,1]},"root_value_read":{"ImmRef":{"location":{"Local":[130,3]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[108]}}}},"moved":true}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[130,3]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[108]}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999284,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[130,3]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[108]}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[130,3]},0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[108]}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999284,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":184,"function_name":"internal_index_of","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"string"},"binary_member_index":16,"type_instantiation":[],"parameters":[{"ImmRef":{"location":{"Indexed":[{"Local":[130,0]},0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[104,101,108,108,111]}}}},{"ImmRef":{"location":{"Indexed":[{"Local":[130,3]},0]},"snapshot":{"type":"0x1::string::String","fields":{"bytes":[108]}}}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":{"vector":"u8"},"ref_type":"Imm"},{"type_":{"vector":"u8"},"ref_type":"Imm"}],"is_native":true},"gas_left":999999284}},{"Effect":{"Push":{"RuntimeValue":{"value":2}}}},{"CloseFrame":{"frame_id":184,"return_":[{"RuntimeValue":{"value":2}}],"gas_left":999999207}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999206,"instruction":"RET"}},{"CloseFrame":{"frame_id":170,"return_":[{"RuntimeValue":{"value":2}}],"gas_left":999999206}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999188,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[130,2]},"root_value_read":{"RuntimeValue":{"value":43}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":43}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999185,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":43}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":2}}}},{"Effect":{"Push":{"RuntimeValue":{"value":45}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999184,"instruction":"RET"}},{"CloseFrame":{"frame_id":130,"return_":[{"RuntimeValue":{"value":45}}],"gas_left":999999184}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999181,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":45}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":43}}}},{"Effect":{"Push":{"RuntimeValue":{"value":88}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999180,"instruction":"POP"}},{"Effect":{"Pop":{"RuntimeValue":{"value":88}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999999179,"instruction":"RET"}},{"CloseFrame":{"frame_id":0,"return_":[],"gas_left":999999179}}]} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/Move.toml b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/Move.toml new file mode 100644 index 0000000000000..441337ac1f606 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "references" +edition = "2024.beta" + +[dependencies] +MoveStdlib = { local = "../../../../move-stdlib" } + +[addresses] +references = "0x0" +std = "0x1" diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/bytecode_modules/dependencies/MoveStdlib/vector.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/bytecode_modules/dependencies/MoveStdlib/vector.mv new file mode 100644 index 0000000000000..79c6d2eb99157 Binary files /dev/null and b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/bytecode_modules/dependencies/MoveStdlib/vector.mv differ diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/bytecode_modules/m.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/bytecode_modules/m.mv new file mode 100644 index 0000000000000..d9ae7daf44225 Binary files /dev/null and b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/bytecode_modules/m.mv differ diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/source_maps/dependencies/MoveStdlib/vector.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/source_maps/dependencies/MoveStdlib/vector.json new file mode 100644 index 0000000000000..126416a07eb7e --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/source_maps/dependencies/MoveStdlib/vector.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":261,"end":267},"module_name":["0000000000000000000000000000000000000000000000000000000000000001","vector"],"struct_map":{},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1202,"end":1207},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1208,"end":1215}]],"parameters":[],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1220,"end":1235}],"locals":[],"nops":{},"code_map":{},"is_native":true},"1":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1329,"end":1335},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1336,"end":1343}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1345,"end":1346}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1367,"end":1370}],"locals":[],"nops":{},"code_map":{},"is_native":true},"2":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1563,"end":1569},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1570,"end":1577}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1579,"end":1580}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1600,"end":1601}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1609,"end":1617}],"locals":[],"nops":{},"code_map":{},"is_native":true},"3":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1724,"end":1733},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1734,"end":1741}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1743,"end":1744}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1768,"end":1769}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"4":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1968,"end":1978},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1979,"end":1986}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1988,"end":1989}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2013,"end":2014}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2022,"end":2034}],"locals":[],"nops":{},"code_map":{},"is_native":true},"5":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2170,"end":2178},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2179,"end":2186}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2188,"end":2189}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2214,"end":2221}],"locals":[],"nops":{},"code_map":{},"is_native":true},"6":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2342,"end":2355},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2356,"end":2363}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2365,"end":2366}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"7":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2561,"end":2565},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2566,"end":2573}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2575,"end":2576}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2600,"end":2601}],["j#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2608,"end":2609}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"8":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2694,"end":2703},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2704,"end":2711}]],"parameters":[["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2713,"end":2714}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2726,"end":2741}],"locals":[["v#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2760,"end":2761}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2764,"end":2771},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2756,"end":2761},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2781,"end":2782},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2793,"end":2794},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2781,"end":2795},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2805,"end":2806}},"is_native":false},"9":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2900,"end":2907},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2908,"end":2915}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2917,"end":2918}]],"returns":[],"locals":[["back_index#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3057,"end":3067}],["front_index#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3024,"end":3035}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2956,"end":2959}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2962,"end":2963},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2962,"end":2972},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2956,"end":2959},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2986,"end":2989},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2993,"end":2994},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2990,"end":2992},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2982,"end":3005},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2996,"end":3005},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3038,"end":3039},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3020,"end":3035},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3070,"end":3073},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3076,"end":3077},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3074,"end":3075},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3053,"end":3067},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3094,"end":3105},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3108,"end":3118},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3106,"end":3107},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3087,"end":3260},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3134,"end":3135},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3141,"end":3152},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3154,"end":3164},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3134,"end":3165},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3193,"end":3204},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3207,"end":3208},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3205,"end":3206},"28":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3179,"end":3190},"29":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3235,"end":3245},"30":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3248,"end":3249},"31":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3246,"end":3247},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3222,"end":3232},"33":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3087,"end":3260}},"is_native":false},"10":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3363,"end":3369},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3370,"end":3377}]],"parameters":[["lhs#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3379,"end":3382}],["other#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3410,"end":3415}]],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3444,"end":3449},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3444,"end":3459},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3477,"end":3482},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3477,"end":3493},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3476,"end":3477},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3469,"end":3526},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3495,"end":3498},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3509,"end":3514},"9":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3509,"end":3525},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3495,"end":3526},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3469,"end":3526},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3536,"end":3557},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3536,"end":3541},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3536,"end":3557},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3557,"end":3558}},"is_native":false},"11":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3660,"end":3668},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3669,"end":3676}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3678,"end":3679}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3700,"end":3704}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3715,"end":3716},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3715,"end":3725},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3729,"end":3730},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3726,"end":3728},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3715,"end":3730}},"is_native":false},"12":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3836,"end":3844},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3845,"end":3852}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3854,"end":3855}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3875,"end":3876}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3889,"end":3893}],"locals":[["i#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3912,"end":3913}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3931,"end":3934}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3916,"end":3917},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3908,"end":3913},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3937,"end":3938},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3937,"end":3947},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3931,"end":3934},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3964,"end":3965},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3968,"end":3971},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3966,"end":3967},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3957,"end":4048},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3992,"end":3993},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3994,"end":3995},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3991,"end":3996},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4000,"end":4001},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3997,"end":3999},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3987,"end":4014},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4003,"end":4014},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4010,"end":4014},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4003,"end":4014},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4032,"end":4033},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4036,"end":4037},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4034,"end":4035},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4028,"end":4029},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3957,"end":4048},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4058,"end":4063}},"is_native":false},"13":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4196,"end":4204},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4205,"end":4212}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4214,"end":4215}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4235,"end":4236}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4250,"end":4254},{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4256,"end":4259}],"locals":[["i#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4279,"end":4280}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4298,"end":4301}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4283,"end":4284},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4275,"end":4280},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4304,"end":4305},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4304,"end":4314},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4298,"end":4301},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4331,"end":4332},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4335,"end":4338},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4333,"end":4334},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4324,"end":4420},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4359,"end":4360},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4361,"end":4362},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4358,"end":4363},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4367,"end":4368},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4364,"end":4366},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4354,"end":4386},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4370,"end":4386},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4378,"end":4382},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4384,"end":4385},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4370,"end":4386},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4404,"end":4405},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4408,"end":4409},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4406,"end":4407},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4400,"end":4401},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4324,"end":4420},"28":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4430,"end":4440},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4431,"end":4436},"33":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4438,"end":4439},"34":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4430,"end":4440}},"is_native":false},"14":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4660,"end":4666},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4667,"end":4674}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4676,"end":4677}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4705,"end":4706}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4714,"end":4721}],"locals":[["%#1",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4897,"end":4898}],["%#2",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4891}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4740,"end":4743}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4746,"end":4747},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4746,"end":4756},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4736,"end":4743},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4804,"end":4805},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4809,"end":4812},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4806,"end":4808},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4800,"end":4840},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4814,"end":4840},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4820,"end":4840},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4814,"end":4840},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4857,"end":4860},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4863,"end":4864},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4861,"end":4862},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4851,"end":4854},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4881,"end":4882},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4885,"end":4888},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4883,"end":4884},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4874,"end":4917},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4891},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4897,"end":4898},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4906,"end":4907},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4910,"end":4911},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4908,"end":4909},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4902,"end":4903},"28":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4891},"29":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4897,"end":4898},"30":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4913,"end":4914},"31":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4917},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4874,"end":4917},"33":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4927,"end":4928},"34":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4927,"end":4939}},"is_native":false},"15":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5290,"end":5296},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5297,"end":5304}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5306,"end":5307}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5331,"end":5332}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5347,"end":5348}]],"returns":[],"locals":[["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5369,"end":5372}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5375,"end":5376},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5375,"end":5385},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5369,"end":5372},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5426,"end":5427},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5430,"end":5433},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5428,"end":5429},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5422,"end":5461},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5435,"end":5461},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5441,"end":5461},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5435,"end":5461},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5472,"end":5473},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5484,"end":5485},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5472,"end":5486},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5503,"end":5504},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5507,"end":5510},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5505,"end":5506},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5496,"end":5573},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5526,"end":5527},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5533,"end":5534},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5536,"end":5539},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5526,"end":5540},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5558,"end":5559},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5562,"end":5563},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5560,"end":5561},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5554,"end":5555},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5496,"end":5573}},"is_native":false},"16":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5812,"end":5823},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5824,"end":5831}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5833,"end":5834}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5858,"end":5859}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5867,"end":5874}],"locals":[["last_idx#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5943,"end":5951}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5894,"end":5895},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5894,"end":5906},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5893,"end":5894},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5885,"end":5929},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5908,"end":5928},"9":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5885,"end":5929},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5954,"end":5955},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5954,"end":5964},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5967,"end":5968},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5965,"end":5966},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5943,"end":5951},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5978,"end":5979},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5985,"end":5986},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5988,"end":5996},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5978,"end":5997},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6007,"end":6008},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6007,"end":6019}},"is_native":false},"17":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":249,"end":13981},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":249,"end":13981}},"is_native":false}},"constant_map":{"EINDEX_OUT_OF_BOUNDS":0}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/source_maps/m.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/source_maps/m.json new file mode 100644 index 0000000000000..3b00c96ae5de9 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/source_maps/m.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":54,"end":55},"module_name":["0000000000000000000000000000000000000000000000000000000000000000","m"],"struct_map":{"0":{"definition_location":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":72,"end":82},"type_parameters":[],"fields":[{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":98,"end":110},{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":130,"end":142},{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":153,"end":169}]},"1":{"definition_location":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":201,"end":213},"type_parameters":[],"fields":[{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":235,"end":240}]}},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":254,"end":257},"type_parameters":[],"parameters":[["some_struct_ref#0#0",{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":263,"end":278}],["vec_ref#0#0",{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":301,"end":308}],["num_ref#0#0",{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":332,"end":339}]],"returns":[{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":350,"end":353}],"locals":[["e1#1#0",{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":455,"end":457}],["e2#1#0",{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":545,"end":547}]],"nops":{},"code_map":{"0":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":397,"end":399},"1":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":360,"end":375},"2":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":360,"end":394},"4":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":360,"end":399},"5":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":437,"end":444},"6":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":436,"end":444},"7":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":405,"end":420},"8":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":405,"end":433},"9":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":405,"end":444},"10":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":484,"end":499},"11":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":479,"end":516},"12":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":518,"end":519},"13":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":460,"end":520},"14":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":455,"end":457},"15":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":532,"end":534},"16":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":527,"end":529},"17":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":526,"end":534},"18":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":569,"end":576},"19":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":578,"end":579},"20":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":550,"end":580},"21":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":545,"end":547},"22":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":592,"end":594},"23":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":587,"end":589},"24":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":586,"end":594},"25":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":601,"end":608},"26":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":600,"end":608},"27":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":611,"end":626},"28":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":611,"end":639},"30":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":609,"end":610},"31":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":642,"end":649},"33":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":650,"end":651},"34":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":642,"end":652},"36":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":640,"end":641},"37":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":600,"end":652}},"is_native":false},"1":{"definition_location":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":660,"end":671},"type_parameters":[],"parameters":[],"returns":[{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":675,"end":685}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":749,"end":750},"1":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":727,"end":752},"2":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":776,"end":777},"3":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":823,"end":824},"4":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":805,"end":825},"5":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":692,"end":832}},"is_native":false},"2":{"definition_location":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":848,"end":852},"type_parameters":[],"parameters":[],"returns":[],"locals":[["num#1#0",{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":982,"end":985}],["some_struct#1#0",{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":869,"end":880}],["vec#1#0",{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":910,"end":913}]],"nops":{},"code_map":{"0":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":883,"end":896},"1":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":865,"end":880},"2":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":934,"end":935},"3":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":916,"end":936},"4":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":906,"end":913},"5":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":960,"end":968},"6":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":970,"end":971},"7":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":942,"end":972},"8":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":988,"end":990},"9":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":982,"end":985},"10":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":1000,"end":1016},"11":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":1018,"end":1026},"12":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":1028,"end":1032},"13":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":996,"end":1033},"15":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":1033,"end":1034}},"is_native":false},"3":{"definition_location":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":35,"end":1036},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[229,240,178,48,60,183,228,141,55,148,99,201,16,75,204,42,202,88,124,209,100,38,8,162,249,132,38,205,211,78,130,234],"start":35,"end":1036}},"is_native":false}},"constant_map":{}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/sources/dependencies/MoveStdlib/vector.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/sources/dependencies/MoveStdlib/vector.move new file mode 100644 index 0000000000000..55c1abac34b74 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/sources/dependencies/MoveStdlib/vector.move @@ -0,0 +1,364 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +#[defines_primitive(vector)] +/// A variable-sized container that can hold any type. Indexing is 0-based, and +/// vectors are growable. This module has many native functions. +module std::vector { + /// Allows calling `.to_string()` on a vector of `u8` to get a utf8 `String`. + public use fun std::string::utf8 as vector.to_string; + + /// Allows calling `.try_to_string()` on a vector of `u8` to get a utf8 `String`. + /// This will return `None` if the vector is not valid utf8. + public use fun std::string::try_utf8 as vector.try_to_string; + + /// Allows calling `.to_ascii_string()` on a vector of `u8` to get an `ascii::String`. + public use fun std::ascii::string as vector.to_ascii_string; + + /// Allows calling `.try_to_ascii_string()` on a vector of `u8` to get an + /// `ascii::String`. This will return `None` if the vector is not valid ascii. + public use fun std::ascii::try_string as vector.try_to_ascii_string; + + /// The index into the vector is out of bounds + const EINDEX_OUT_OF_BOUNDS: u64 = 0x20000; + + #[bytecode_instruction] + /// Create an empty vector. + public native fun empty(): vector; + + #[bytecode_instruction] + /// Return the length of the vector. + public native fun length(v: &vector): u64; + + #[syntax(index)] + #[bytecode_instruction] + /// Acquire an immutable reference to the `i`th element of the vector `v`. + /// Aborts if `i` is out of bounds. + public native fun borrow(v: &vector, i: u64): ∈ + + #[bytecode_instruction] + /// Add element `e` to the end of the vector `v`. + public native fun push_back(v: &mut vector, e: Element); + + #[syntax(index)] + #[bytecode_instruction] + /// Return a mutable reference to the `i`th element in the vector `v`. + /// Aborts if `i` is out of bounds. + public native fun borrow_mut(v: &mut vector, i: u64): &mut Element; + + #[bytecode_instruction] + /// Pop an element from the end of vector `v`. + /// Aborts if `v` is empty. + public native fun pop_back(v: &mut vector): Element; + + #[bytecode_instruction] + /// Destroy the vector `v`. + /// Aborts if `v` is not empty. + public native fun destroy_empty(v: vector); + + #[bytecode_instruction] + /// Swaps the elements at the `i`th and `j`th indices in the vector `v`. + /// Aborts if `i` or `j` is out of bounds. + public native fun swap(v: &mut vector, i: u64, j: u64); + + /// Return an vector of size one containing element `e`. + public fun singleton(e: Element): vector { + let mut v = empty(); + v.push_back(e); + v + } + + /// Reverses the order of the elements in the vector `v` in place. + public fun reverse(v: &mut vector) { + let len = v.length(); + if (len == 0) return (); + + let mut front_index = 0; + let mut back_index = len - 1; + while (front_index < back_index) { + v.swap(front_index, back_index); + front_index = front_index + 1; + back_index = back_index - 1; + } + } + + /// Pushes all of the elements of the `other` vector into the `lhs` vector. + public fun append(lhs: &mut vector, mut other: vector) { + other.reverse(); + while (!other.is_empty()) lhs.push_back(other.pop_back()); + other.destroy_empty(); + } + + /// Return `true` if the vector `v` has no elements and `false` otherwise. + public fun is_empty(v: &vector): bool { + v.length() == 0 + } + + /// Return true if `e` is in the vector `v`. + /// Otherwise, returns false. + public fun contains(v: &vector, e: &Element): bool { + let mut i = 0; + let len = v.length(); + while (i < len) { + if (&v[i] == e) return true; + i = i + 1; + }; + false + } + + /// Return `(true, i)` if `e` is in the vector `v` at index `i`. + /// Otherwise, returns `(false, 0)`. + public fun index_of(v: &vector, e: &Element): (bool, u64) { + let mut i = 0; + let len = v.length(); + while (i < len) { + if (&v[i] == e) return (true, i); + i = i + 1; + }; + (false, 0) + } + + /// Remove the `i`th element of the vector `v`, shifting all subsequent elements. + /// This is O(n) and preserves ordering of elements in the vector. + /// Aborts if `i` is out of bounds. + public fun remove(v: &mut vector, mut i: u64): Element { + let mut len = v.length(); + // i out of bounds; abort + if (i >= len) abort EINDEX_OUT_OF_BOUNDS; + + len = len - 1; + while (i < len) v.swap(i, { i = i + 1; i }); + v.pop_back() + } + + /// Insert `e` at position `i` in the vector `v`. + /// If `i` is in bounds, this shifts the old `v[i]` and all subsequent elements to the right. + /// If `i == v.length()`, this adds `e` to the end of the vector. + /// This is O(n) and preserves ordering of elements in the vector. + /// Aborts if `i > v.length()` + public fun insert(v: &mut vector, e: Element, mut i: u64) { + let len = v.length(); + // i too big abort + if (i > len) abort EINDEX_OUT_OF_BOUNDS; + + v.push_back(e); + while (i < len) { + v.swap(i, len); + i = i + 1 + } + } + + /// Swap the `i`th element of the vector `v` with the last element and then pop the vector. + /// This is O(1), but does not preserve ordering of elements in the vector. + /// Aborts if `i` is out of bounds. + public fun swap_remove(v: &mut vector, i: u64): Element { + assert!(!v.is_empty(), EINDEX_OUT_OF_BOUNDS); + let last_idx = v.length() - 1; + v.swap(i, last_idx); + v.pop_back() + } + + // === Macros === + + /// Create a vector of length `n` by calling the function `f` on each index. + public macro fun tabulate<$T>($n: u64, $f: |u64| -> $T): vector<$T> { + let mut v = vector[]; + let n = $n; + n.do!(|i| v.push_back($f(i))); + v + } + + /// Destroy the vector `v` by calling `f` on each element and then destroying the vector. + /// Does not preserve the order of elements in the vector (starts from the end of the vector). + public macro fun destroy<$T>($v: vector<$T>, $f: |$T|) { + let mut v = $v; + while (!v.is_empty()) $f(v.pop_back()); + v.destroy_empty(); + } + + /// Destroy the vector `v` by calling `f` on each element and then destroying the vector. + /// Preserves the order of elements in the vector. + public macro fun do<$T>($v: vector<$T>, $f: |$T|) { + let mut v = $v; + v.reverse(); + while (!v.is_empty()) $f(v.pop_back()); + v.destroy_empty(); + } + + /// Perform an action `f` on each element of the vector `v`. The vector is not modified. + public macro fun do_ref<$T>($v: &vector<$T>, $f: |&$T|) { + let v = $v; + v.length().do!(|i| $f(&v[i])) + } + + /// Perform an action `f` on each element of the vector `v`. + /// The function `f` takes a mutable reference to the element. + public macro fun do_mut<$T>($v: &mut vector<$T>, $f: |&mut $T|) { + let v = $v; + v.length().do!(|i| $f(&mut v[i])) + } + + /// Map the vector `v` to a new vector by applying the function `f` to each element. + /// Preserves the order of elements in the vector, first is called first. + public macro fun map<$T, $U>($v: vector<$T>, $f: |$T| -> $U): vector<$U> { + let v = $v; + let mut r = vector[]; + v.do!(|e| r.push_back($f(e))); + r + } + + /// Map the vector `v` to a new vector by applying the function `f` to each element. + /// Preserves the order of elements in the vector, first is called first. + public macro fun map_ref<$T, $U>($v: &vector<$T>, $f: |&$T| -> $U): vector<$U> { + let v = $v; + let mut r = vector[]; + v.do_ref!(|e| r.push_back($f(e))); + r + } + + /// Filter the vector `v` by applying the function `f` to each element. + /// Return a new vector containing only the elements for which `f` returns `true`. + public macro fun filter<$T: drop>($v: vector<$T>, $f: |&$T| -> bool): vector<$T> { + let v = $v; + let mut r = vector[]; + v.do!(|e| if ($f(&e)) r.push_back(e)); + r + } + + /// Split the vector `v` into two vectors by applying the function `f` to each element. + /// Return a tuple containing two vectors: the first containing the elements for which `f` returns `true`, + /// and the second containing the elements for which `f` returns `false`. + public macro fun partition<$T>($v: vector<$T>, $f: |&$T| -> bool): (vector<$T>, vector<$T>) { + let v = $v; + let mut r1 = vector[]; + let mut r2 = vector[]; + v.do!(|e| if ($f(&e)) r1.push_back(e) else r2.push_back(e)); + (r1, r2) + } + + /// Finds the index of first element in the vector `v` that satisfies the predicate `f`. + /// Returns `some(index)` if such an element is found, otherwise `none()`. + public macro fun find_index<$T>($v: &vector<$T>, $f: |&$T| -> bool): Option { + let v = $v; + 'find_index: { + v.length().do!(|i| if ($f(&v[i])) return 'find_index option::some(i)); + option::none() + } + } + + /// Count how many elements in the vector `v` satisfy the predicate `f`. + public macro fun count<$T>($v: &vector<$T>, $f: |&$T| -> bool): u64 { + let v = $v; + let mut count = 0; + v.do_ref!(|e| if ($f(e)) count = count + 1); + count + } + + /// Reduce the vector `v` to a single value by applying the function `f` to each element. + /// Similar to `fold_left` in Rust and `reduce` in Python and JavaScript. + public macro fun fold<$T, $Acc>($v: vector<$T>, $init: $Acc, $f: |$Acc, $T| -> $Acc): $Acc { + let v = $v; + let mut acc = $init; + v.do!(|e| acc = $f(acc, e)); + acc + } + + /// Whether any element in the vector `v` satisfies the predicate `f`. + /// If the vector is empty, returns `false`. + public macro fun any<$T>($v: &vector<$T>, $f: |&$T| -> bool): bool { + let v = $v; + 'any: { + v.do_ref!(|e| if ($f(e)) return 'any true); + false + } + } + + /// Whether all elements in the vector `v` satisfy the predicate `f`. + /// If the vector is empty, returns `true`. + public macro fun all<$T>($v: &vector<$T>, $f: |&$T| -> bool): bool { + let v = $v; + 'all: { + v.do_ref!(|e| if (!$f(e)) return 'all false); + true + } + } + + /// Destroys two vectors `v1` and `v2` by calling `f` to each pair of elements. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do<$T1, $T2>($v1: vector<$T1>, $v2: vector<$T2>, $f: |$T1, $T2|) { + let v1 = $v1; + let mut v2 = $v2; + v2.reverse(); + let len = v1.length(); + assert!(len == v2.length()); + v1.do!(|el1| $f(el1, v2.pop_back())); + } + + /// Destroys two vectors `v1` and `v2` by calling `f` to each pair of elements. + /// Aborts if the vectors are not of the same length. + /// Starts from the end of the vectors. + public macro fun zip_do_reverse<$T1, $T2>($v1: vector<$T1>, $v2: vector<$T2>, $f: |$T1, $T2|) { + let v1 = $v1; + let mut v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + v1.destroy!(|el1| $f(el1, v2.pop_back())); + } + + /// Iterate through `v1` and `v2` and apply the function `f` to references of each pair of + /// elements. The vectors are not modified. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do_ref<$T1, $T2>($v1: &vector<$T1>, $v2: &vector<$T2>, $f: |&$T1, &$T2|) { + let v1 = $v1; + let v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + len.do!(|i| $f(&v1[i], &v2[i])); + } + + /// Iterate through `v1` and `v2` and apply the function `f` to mutable references of each pair + /// of elements. The vectors may be modified. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do_mut<$T1, $T2>( + $v1: &mut vector<$T1>, + $v2: &mut vector<$T2>, + $f: |&mut $T1, &mut $T2|, + ) { + let v1 = $v1; + let v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + len.do!(|i| $f(&mut v1[i], &mut v2[i])); + } + + /// Destroys two vectors `v1` and `v2` by applying the function `f` to each pair of elements. + /// The returned values are collected into a new vector. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_map<$T1, $T2, $U>( + $v1: vector<$T1>, + $v2: vector<$T2>, + $f: |$T1, $T2| -> $U, + ): vector<$U> { + let mut r = vector[]; + zip_do!($v1, $v2, |el1, el2| r.push_back($f(el1, el2))); + r + } + + /// Iterate through `v1` and `v2` and apply the function `f` to references of each pair of + /// elements. The returned values are collected into a new vector. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_map_ref<$T1, $T2, $U>( + $v1: &vector<$T1>, + $v2: &vector<$T2>, + $f: |&$T1, &$T2| -> $U, + ): vector<$U> { + let mut r = vector[]; + zip_do_ref!($v1, $v2, |el1, el2| r.push_back($f(el1, el2))); + r + } +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/sources/m.move new file mode 100644 index 0000000000000..3b4c1e24e4170 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/build/references/sources/m.move @@ -0,0 +1,45 @@ +// Test tracking reference values. +module references::m; + +public struct SomeStruct has drop { + struct_field: SimpleStruct, + simple_field: u64, + vec_simple_field: vector, +} + +public struct SimpleStruct has drop, copy { + field: u64, +} + +fun foo( + some_struct_ref: &mut SomeStruct, + vec_ref: &mut vector, + num_ref: &u64, +): u64 { + some_struct_ref.struct_field.field = 42; + some_struct_ref.simple_field = *num_ref; + + let e1 = vector::borrow_mut(&mut some_struct_ref.vec_simple_field, 0); + *e1 = 42; + + let e2 = vector::borrow_mut(vec_ref, 0); + *e2 = 42; + *num_ref + some_struct_ref.simple_field + vec_ref[0] +} + +fun some_struct(): SomeStruct { + SomeStruct { + struct_field: SimpleStruct { field: 0 }, + simple_field: 0, + vec_simple_field: vector::singleton(0), + } +} + +#[test] +fun test() { + let mut some_struct = some_struct(); + let mut vec = vector::singleton(0); + vector::push_back(&mut vec, 7); + let num = 42; + foo(&mut some_struct, &mut vec, &num); +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/sources/m.move new file mode 100644 index 0000000000000..3b4c1e24e4170 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/sources/m.move @@ -0,0 +1,45 @@ +// Test tracking reference values. +module references::m; + +public struct SomeStruct has drop { + struct_field: SimpleStruct, + simple_field: u64, + vec_simple_field: vector, +} + +public struct SimpleStruct has drop, copy { + field: u64, +} + +fun foo( + some_struct_ref: &mut SomeStruct, + vec_ref: &mut vector, + num_ref: &u64, +): u64 { + some_struct_ref.struct_field.field = 42; + some_struct_ref.simple_field = *num_ref; + + let e1 = vector::borrow_mut(&mut some_struct_ref.vec_simple_field, 0); + *e1 = 42; + + let e2 = vector::borrow_mut(vec_ref, 0); + *e2 = 42; + *num_ref + some_struct_ref.simple_field + vec_ref[0] +} + +fun some_struct(): SomeStruct { + SomeStruct { + struct_field: SimpleStruct { field: 0 }, + simple_field: 0, + vec_simple_field: vector::singleton(0), + } +} + +#[test] +fun test() { + let mut some_struct = some_struct(); + let mut vec = vector::singleton(0); + vector::push_back(&mut vec, 7); + let num = 42; + foo(&mut some_struct, &mut vec, &num); +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/test.exp b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/test.exp new file mode 100644 index 0000000000000..77d861d3ef1d5 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/test.exp @@ -0,0 +1,90 @@ +current frame stack: + function: test (line 44) + scope 0 : + num : 42 + type: u64 + + some_struct : (0x0::m::SomeStruct) { + struct_field : (0x0::m::SimpleStruct) { + field : 0 + } + simple_field : 0 + vec_simple_field : [ + 0 : 0 + ] + } + type: 0x0::m::SomeStruct + + vec : [ + 0 : 0 + 1 : 7 + ] + type: vector + + function: foo (line 19) + scope 0 : + some_struct_ref : (0x0::m::SomeStruct) { + struct_field : (0x0::m::SimpleStruct) { + field : 0 + } + simple_field : 0 + vec_simple_field : [ + 0 : 0 + ] + } + type: &mut 0x0::m::SomeStruct + + vec_ref : [ + 0 : 0 + 1 : 7 + ] + type: &mut vector + + num_ref : 42 + type: &u64 + +current frame stack: + function: test (line 44) + scope 0 : + num : 42 + type: u64 + + some_struct : (0x0::m::SomeStruct) { + struct_field : (0x0::m::SimpleStruct) { + field : 42 + } + simple_field : 42 + vec_simple_field : [ + 0 : 42 + ] + } + type: 0x0::m::SomeStruct + + vec : [ + 0 : 42 + 1 : 7 + ] + type: vector + + function: foo (line 27) + scope 0 : + some_struct_ref : (0x0::m::SomeStruct) { + struct_field : (0x0::m::SimpleStruct) { + field : 42 + } + simple_field : 42 + vec_simple_field : [ + 0 : 42 + ] + } + type: &mut 0x0::m::SomeStruct + + vec_ref : [ + 0 : 42 + 1 : 7 + ] + type: &mut vector + + num_ref : 42 + type: &u64 + diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/trace.spec.js b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/trace.spec.js new file mode 100644 index 0000000000000..6963fb9aafe86 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/trace.spec.js @@ -0,0 +1,21 @@ +let action = (runtime) => { + let res = ''; + // step over functions creating data to be referenced + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + // step into a function + runtime.step(false); + res += runtime.toString(); + // advance until all references are updated + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + runtime.step(true); + res += runtime.toString(); + return res; +}; +run_spec(__dirname, action); diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/traces/references__m__test.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/traces/references__m__test.json new file mode 100644 index 0000000000000..15a2d2aade33f --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references/traces/references__m__test.json @@ -0,0 +1 @@ +{"version":1,"events":[{"OpenFrame":{"frame":{"frame_id":0,"function_name":"test","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":2,"type_instantiation":[],"parameters":[],"return_types":[],"locals_types":[{"type_":"u64","ref_type":null},{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeStruct","type_args":[]}},"ref_type":null},{"type_":{"vector":"u64"},"ref_type":null}],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":1000000000,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":2,"function_name":"some_struct","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":1,"type_instantiation":[],"parameters":[],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeStruct","type_args":[]}},"ref_type":null}],"locals_types":[],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999996,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999992,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":0}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999989,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999986,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999986,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":13,"function_name":"singleton","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"vector"},"binary_member_index":8,"type_instantiation":["u64"],"parameters":[{"RuntimeValue":{"value":0}}],"return_types":[{"type_":{"vector":"u64"},"ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null},{"type_":{"vector":"u64"},"ref_type":null}],"is_native":false},"gas_left":999999986}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999975,"instruction":"VEC_PACK"}},{"Effect":{"Push":{"RuntimeValue":{"value":[]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999974,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[]}}}},{"Effect":{"Write":{"location":{"Local":[13,1]},"root_value_after_write":{"RuntimeValue":{"value":[]}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999964,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[13,1]},"root_value_read":{"RuntimeValue":{"value":[]}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[13,1]},"snapshot":[]}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999946,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[13,0]},"root_value_read":{"RuntimeValue":{"value":0}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999945,"instruction":"VEC_PUSH_BACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[13,1]},"snapshot":[]}}}},{"Effect":{"Write":{"location":{"Local":[13,1]},"root_value_after_write":{"RuntimeValue":{"value":[0]}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999935,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[13,1]},"root_value_read":{"RuntimeValue":{"value":[0]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[0]}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999934,"instruction":"RET"}},{"CloseFrame":{"frame_id":13,"return_":[{"RuntimeValue":{"value":[0]}}],"gas_left":999999934}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999930,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[0]}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SimpleStruct","fields":{"field":0}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999929,"instruction":"RET"}},{"CloseFrame":{"frame_id":2,"return_":[{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}}],"gas_left":999999929}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999928,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Effect":{"Write":{"location":{"Local":[0,1]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999925,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999925,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":47,"function_name":"singleton","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"vector"},"binary_member_index":8,"type_instantiation":["u64"],"parameters":[{"RuntimeValue":{"value":0}}],"return_types":[{"type_":{"vector":"u64"},"ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null},{"type_":{"vector":"u64"},"ref_type":null}],"is_native":false},"gas_left":999999925}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999914,"instruction":"VEC_PACK"}},{"Effect":{"Push":{"RuntimeValue":{"value":[]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999913,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[]}}}},{"Effect":{"Write":{"location":{"Local":[47,1]},"root_value_after_write":{"RuntimeValue":{"value":[]}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999903,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[47,1]},"root_value_read":{"RuntimeValue":{"value":[]}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[47,1]},"snapshot":[]}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999885,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[47,0]},"root_value_read":{"RuntimeValue":{"value":0}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999884,"instruction":"VEC_PUSH_BACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[47,1]},"snapshot":[]}}}},{"Effect":{"Write":{"location":{"Local":[47,1]},"root_value_after_write":{"RuntimeValue":{"value":[0]}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999874,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[47,1]},"root_value_read":{"RuntimeValue":{"value":[0]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[0]}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999873,"instruction":"RET"}},{"CloseFrame":{"frame_id":47,"return_":[{"RuntimeValue":{"value":[0]}}],"gas_left":999999873}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999872,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[0]}}}},{"Effect":{"Write":{"location":{"Local":[0,2]},"root_value_after_write":{"RuntimeValue":{"value":[0]}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999862,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,2]},"root_value_read":{"RuntimeValue":{"value":[0]}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,2]},"snapshot":[0]}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999859,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999858,"instruction":"VEC_PUSH_BACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":7}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,2]},"snapshot":[0]}}}},{"Effect":{"Write":{"location":{"Local":[0,2]},"root_value_after_write":{"RuntimeValue":{"value":[0,7]}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999855,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999854,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Write":{"location":{"Local":[0,0]},"root_value_after_write":{"RuntimeValue":{"value":42}}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999844,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,1]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999834,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,2]},"root_value_read":{"RuntimeValue":{"value":[0,7]}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,2]},"snapshot":[0,7]}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999824,"instruction":"IMM_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[0,0]},"snapshot":42}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999824,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":95,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}},{"MutRef":{"location":{"Local":[0,2]},"snapshot":[0,7]}},{"ImmRef":{"location":{"Local":[0,0]},"snapshot":42}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeStruct","type_args":[]}},"ref_type":"Mut"},{"type_":{"vector":"u64"},"ref_type":"Mut"},{"type_":"u64","ref_type":"Imm"},{"type_":"u64","ref_type":"Mut"},{"type_":"u64","ref_type":"Mut"}],"is_native":false},"gas_left":999999824}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999820,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999810,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[95,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999800,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[0,1]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999790,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[0,1]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,1]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999772,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,1]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":0}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Indexed":[{"Local":[0,1]},0]},0]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":0,"vec_simple_field":[0]}}}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999762,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[95,2]},"root_value_read":{"ImmRef":{"location":{"Local":[0,0]},"snapshot":42}},"moved":false}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[0,0]},"snapshot":42}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999744,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[0,0]},"snapshot":42}}}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999734,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[95,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":0,"vec_simple_field":[0]}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999724,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[0,1]},1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999706,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[0,1]},1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":0,"vec_simple_field":[0]}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Local":[0,1]},1]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999696,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[95,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999686,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[0,1]},2]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999683,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999673,"instruction":"VEC_MUT_BORROW"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[0,1]},2]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,1]},2]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999999672,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,1]},2]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}}}},{"Effect":{"Write":{"location":{"Local":[95,3]},"root_value_after_write":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,1]},2]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}}}}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999669,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999659,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[95,3]},"root_value_read":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,1]},2]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}},"moved":true}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,1]},2]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999641,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,1]},2]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[0]}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Indexed":[{"Local":[0,1]},2]},0]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[42]}}}}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999631,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[95,1]},"root_value_read":{"MutRef":{"location":{"Local":[0,2]},"snapshot":[0,7]}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,2]},"snapshot":[0,7]}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999628,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999618,"instruction":"VEC_MUT_BORROW"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,2]},"snapshot":[0,7]}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[0,2]},0]},"snapshot":[0,7]}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999617,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[0,2]},0]},"snapshot":[0,7]}}}},{"Effect":{"Write":{"location":{"Local":[95,4]},"root_value_after_write":{"MutRef":{"location":{"Indexed":[{"Local":[0,2]},0]},"snapshot":[0,7]}}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999614,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999604,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[95,4]},"root_value_read":{"MutRef":{"location":{"Indexed":[{"Local":[0,2]},0]},"snapshot":[0,7]}},"moved":true}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[0,2]},0]},"snapshot":[0,7]}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999586,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[0,2]},0]},"snapshot":[0,7]}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Local":[0,2]},0]},"root_value_after_write":{"RuntimeValue":{"value":[42,7]}}}}},{"Instruction":{"type_parameters":[],"pc":25,"gas_left":999999576,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[95,2]},"root_value_read":{"ImmRef":{"location":{"Local":[0,0]},"snapshot":42}},"moved":true}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[0,0]},"snapshot":42}}}},{"Instruction":{"type_parameters":[],"pc":26,"gas_left":999999558,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[0,0]},"snapshot":42}}}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":27,"gas_left":999999548,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[95,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[42]}}}},"moved":true}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[42]}}}}}},{"Instruction":{"type_parameters":[],"pc":28,"gas_left":999999538,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[42]}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[0,1]},1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[42]}}}}}},{"Instruction":{"type_parameters":[],"pc":29,"gas_left":999999520,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Local":[0,1]},1]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[42]}}}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Local":[0,1]},1]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::SimpleStruct","fields":{"field":42}},"simple_field":42,"vec_simple_field":[42]}}}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":30,"gas_left":999999517,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":31,"gas_left":999999507,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[95,1]},"root_value_read":{"MutRef":{"location":{"Local":[0,2]},"snapshot":[42,7]}},"moved":true}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,2]},"snapshot":[42,7]}}}},{"Instruction":{"type_parameters":[],"pc":32,"gas_left":999999497,"instruction":"FREEZE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,2]},"snapshot":[42,7]}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Local":[0,2]},"snapshot":[42,7]}}}},{"Instruction":{"type_parameters":[],"pc":33,"gas_left":999999494,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":34,"gas_left":999999484,"instruction":"VEC_IMM_BORROW"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"ImmRef":{"location":{"Local":[0,2]},"snapshot":[42,7]}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[0,2]},0]},"snapshot":[42,7]}}}},{"Instruction":{"type_parameters":[],"pc":35,"gas_left":999999466,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Local":[0,2]},0]},"snapshot":[42,7]}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Local":[0,2]},0]},"root_value_read":{"RuntimeValue":{"value":[42,7]}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":36,"gas_left":999999463,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Push":{"RuntimeValue":{"value":126}}}},{"Instruction":{"type_parameters":[],"pc":37,"gas_left":999999462,"instruction":"RET"}},{"CloseFrame":{"frame_id":95,"return_":[{"RuntimeValue":{"value":126}}],"gas_left":999999462}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999999461,"instruction":"POP"}},{"Effect":{"Pop":{"RuntimeValue":{"value":126}}}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999460,"instruction":"RET"}},{"CloseFrame":{"frame_id":0,"return_":[],"gas_left":999999460}}]} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/Move.toml b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/Move.toml new file mode 100644 index 0000000000000..f6707c4981583 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "references_deep" +edition = "2024.beta" + +[dependencies] +MoveStdlib = { local = "../../../../move-stdlib" } + +[addresses] +references_deep = "0x0" +std = "0x1" diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/bytecode_modules/dependencies/MoveStdlib/vector.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/bytecode_modules/dependencies/MoveStdlib/vector.mv new file mode 100644 index 0000000000000..79c6d2eb99157 Binary files /dev/null and b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/bytecode_modules/dependencies/MoveStdlib/vector.mv differ diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/bytecode_modules/m.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/bytecode_modules/m.mv new file mode 100644 index 0000000000000..2f61b892a81f7 Binary files /dev/null and b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/bytecode_modules/m.mv differ diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/source_maps/dependencies/MoveStdlib/vector.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/source_maps/dependencies/MoveStdlib/vector.json new file mode 100644 index 0000000000000..126416a07eb7e --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/source_maps/dependencies/MoveStdlib/vector.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":261,"end":267},"module_name":["0000000000000000000000000000000000000000000000000000000000000001","vector"],"struct_map":{},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1202,"end":1207},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1208,"end":1215}]],"parameters":[],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1220,"end":1235}],"locals":[],"nops":{},"code_map":{},"is_native":true},"1":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1329,"end":1335},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1336,"end":1343}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1345,"end":1346}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1367,"end":1370}],"locals":[],"nops":{},"code_map":{},"is_native":true},"2":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1563,"end":1569},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1570,"end":1577}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1579,"end":1580}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1600,"end":1601}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1609,"end":1617}],"locals":[],"nops":{},"code_map":{},"is_native":true},"3":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1724,"end":1733},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1734,"end":1741}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1743,"end":1744}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1768,"end":1769}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"4":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1968,"end":1978},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1979,"end":1986}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":1988,"end":1989}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2013,"end":2014}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2022,"end":2034}],"locals":[],"nops":{},"code_map":{},"is_native":true},"5":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2170,"end":2178},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2179,"end":2186}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2188,"end":2189}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2214,"end":2221}],"locals":[],"nops":{},"code_map":{},"is_native":true},"6":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2342,"end":2355},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2356,"end":2363}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2365,"end":2366}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"7":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2561,"end":2565},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2566,"end":2573}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2575,"end":2576}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2600,"end":2601}],["j#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2608,"end":2609}]],"returns":[],"locals":[],"nops":{},"code_map":{},"is_native":true},"8":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2694,"end":2703},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2704,"end":2711}]],"parameters":[["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2713,"end":2714}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2726,"end":2741}],"locals":[["v#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2760,"end":2761}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2764,"end":2771},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2756,"end":2761},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2781,"end":2782},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2793,"end":2794},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2781,"end":2795},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2805,"end":2806}},"is_native":false},"9":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2900,"end":2907},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2908,"end":2915}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2917,"end":2918}]],"returns":[],"locals":[["back_index#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3057,"end":3067}],["front_index#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3024,"end":3035}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2956,"end":2959}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2962,"end":2963},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2962,"end":2972},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2956,"end":2959},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2986,"end":2989},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2993,"end":2994},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2990,"end":2992},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2982,"end":3005},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":2996,"end":3005},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3038,"end":3039},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3020,"end":3035},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3070,"end":3073},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3076,"end":3077},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3074,"end":3075},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3053,"end":3067},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3094,"end":3105},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3108,"end":3118},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3106,"end":3107},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3087,"end":3260},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3134,"end":3135},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3141,"end":3152},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3154,"end":3164},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3134,"end":3165},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3193,"end":3204},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3207,"end":3208},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3205,"end":3206},"28":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3179,"end":3190},"29":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3235,"end":3245},"30":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3248,"end":3249},"31":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3246,"end":3247},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3222,"end":3232},"33":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3087,"end":3260}},"is_native":false},"10":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3363,"end":3369},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3370,"end":3377}]],"parameters":[["lhs#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3379,"end":3382}],["other#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3410,"end":3415}]],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3444,"end":3449},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3444,"end":3459},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3477,"end":3482},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3477,"end":3493},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3476,"end":3477},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3469,"end":3526},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3495,"end":3498},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3509,"end":3514},"9":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3509,"end":3525},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3495,"end":3526},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3469,"end":3526},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3536,"end":3557},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3536,"end":3541},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3536,"end":3557},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3557,"end":3558}},"is_native":false},"11":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3660,"end":3668},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3669,"end":3676}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3678,"end":3679}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3700,"end":3704}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3715,"end":3716},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3715,"end":3725},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3729,"end":3730},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3726,"end":3728},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3715,"end":3730}},"is_native":false},"12":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3836,"end":3844},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3845,"end":3852}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3854,"end":3855}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3875,"end":3876}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3889,"end":3893}],"locals":[["i#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3912,"end":3913}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3931,"end":3934}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3916,"end":3917},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3908,"end":3913},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3937,"end":3938},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3937,"end":3947},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3931,"end":3934},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3964,"end":3965},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3968,"end":3971},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3966,"end":3967},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3957,"end":4048},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3992,"end":3993},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3994,"end":3995},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3991,"end":3996},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4000,"end":4001},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3997,"end":3999},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3987,"end":4014},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4003,"end":4014},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4010,"end":4014},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4003,"end":4014},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4032,"end":4033},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4036,"end":4037},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4034,"end":4035},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4028,"end":4029},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":3957,"end":4048},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4058,"end":4063}},"is_native":false},"13":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4196,"end":4204},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4205,"end":4212}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4214,"end":4215}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4235,"end":4236}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4250,"end":4254},{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4256,"end":4259}],"locals":[["i#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4279,"end":4280}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4298,"end":4301}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4283,"end":4284},"1":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4275,"end":4280},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4304,"end":4305},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4304,"end":4314},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4298,"end":4301},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4331,"end":4332},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4335,"end":4338},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4333,"end":4334},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4324,"end":4420},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4359,"end":4360},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4361,"end":4362},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4358,"end":4363},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4367,"end":4368},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4364,"end":4366},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4354,"end":4386},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4370,"end":4386},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4378,"end":4382},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4384,"end":4385},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4370,"end":4386},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4404,"end":4405},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4408,"end":4409},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4406,"end":4407},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4400,"end":4401},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4324,"end":4420},"28":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4430,"end":4440},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4431,"end":4436},"33":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4438,"end":4439},"34":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4430,"end":4440}},"is_native":false},"14":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4660,"end":4666},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4667,"end":4674}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4676,"end":4677}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4705,"end":4706}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4714,"end":4721}],"locals":[["%#1",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4897,"end":4898}],["%#2",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4891}],["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4740,"end":4743}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4746,"end":4747},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4746,"end":4756},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4736,"end":4743},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4804,"end":4805},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4809,"end":4812},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4806,"end":4808},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4800,"end":4840},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4814,"end":4840},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4820,"end":4840},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4814,"end":4840},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4857,"end":4860},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4863,"end":4864},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4861,"end":4862},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4851,"end":4854},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4881,"end":4882},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4885,"end":4888},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4883,"end":4884},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4874,"end":4917},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4891},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4897,"end":4898},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4906,"end":4907},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4910,"end":4911},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4908,"end":4909},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4902,"end":4903},"28":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4891},"29":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4897,"end":4898},"30":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4913,"end":4914},"31":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4890,"end":4917},"32":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4874,"end":4917},"33":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4927,"end":4928},"34":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":4927,"end":4939}},"is_native":false},"15":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5290,"end":5296},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5297,"end":5304}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5306,"end":5307}],["e#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5331,"end":5332}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5347,"end":5348}]],"returns":[],"locals":[["len#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5369,"end":5372}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5375,"end":5376},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5375,"end":5385},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5369,"end":5372},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5426,"end":5427},"5":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5430,"end":5433},"6":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5428,"end":5429},"7":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5422,"end":5461},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5435,"end":5461},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5441,"end":5461},"11":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5435,"end":5461},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5472,"end":5473},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5484,"end":5485},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5472,"end":5486},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5503,"end":5504},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5507,"end":5510},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5505,"end":5506},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5496,"end":5573},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5526,"end":5527},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5533,"end":5534},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5536,"end":5539},"22":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5526,"end":5540},"23":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5558,"end":5559},"24":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5562,"end":5563},"25":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5560,"end":5561},"26":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5554,"end":5555},"27":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5496,"end":5573}},"is_native":false},"16":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5812,"end":5823},"type_parameters":[["Element",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5824,"end":5831}]],"parameters":[["v#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5833,"end":5834}],["i#0#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5858,"end":5859}]],"returns":[{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5867,"end":5874}],"locals":[["last_idx#1#0",{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5943,"end":5951}]],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5894,"end":5895},"2":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5894,"end":5906},"3":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5893,"end":5894},"4":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5885,"end":5929},"8":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5908,"end":5928},"9":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5885,"end":5929},"10":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5954,"end":5955},"12":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5954,"end":5964},"13":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5967,"end":5968},"14":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5965,"end":5966},"15":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5943,"end":5951},"16":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5978,"end":5979},"17":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5985,"end":5986},"18":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5988,"end":5996},"19":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":5978,"end":5997},"20":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6007,"end":6008},"21":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":6007,"end":6019}},"is_native":false},"17":{"definition_location":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":249,"end":13981},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[42,57,43,60,190,49,164,194,57,126,218,170,200,152,239,244,35,144,147,120,5,88,201,108,231,168,248,67,118,176,80,79],"start":249,"end":13981}},"is_native":false}},"constant_map":{"EINDEX_OUT_OF_BOUNDS":0}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/source_maps/m.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/source_maps/m.json new file mode 100644 index 0000000000000..e9db4cf3eeb04 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/source_maps/m.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":107,"end":108},"module_name":["0000000000000000000000000000000000000000000000000000000000000000","m"],"struct_map":{"0":{"definition_location":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":125,"end":135},"type_parameters":[],"fields":[{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":151,"end":163}]},"1":{"definition_location":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":193,"end":202},"type_parameters":[],"fields":[{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":224,"end":233}]}},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":255,"end":258},"type_parameters":[],"parameters":[["vec_ref#0#0",{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":259,"end":266}]],"returns":[{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":287,"end":290}],"locals":[["e#1#0",{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":301,"end":302}]],"nops":{},"code_map":{"0":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":324,"end":331},"1":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":333,"end":334},"2":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":305,"end":335},"3":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":301,"end":302},"4":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":346,"end":348},"5":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":342,"end":343},"6":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":341,"end":348},"7":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":354,"end":361},"9":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":362,"end":363},"10":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":354,"end":364}},"is_native":false},"1":{"definition_location":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":372,"end":375},"type_parameters":[],"parameters":[["some_struct_ref#0#0",{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":376,"end":391}]],"returns":[{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":411,"end":414}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":440,"end":455},"1":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":440,"end":478},"2":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":435,"end":478},"3":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":431,"end":479},"4":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":491,"end":506},"5":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":491,"end":532},"7":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":530,"end":531},"8":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":491,"end":532},"10":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":489,"end":490},"11":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":485,"end":532}},"is_native":false},"2":{"definition_location":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":540,"end":551},"type_parameters":[],"parameters":[],"returns":[{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":555,"end":565}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":648,"end":649},"1":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":630,"end":650},"2":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":607,"end":652},"3":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":572,"end":658}},"is_native":false},"3":{"definition_location":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":674,"end":678},"type_parameters":[],"parameters":[],"returns":[],"locals":[["some_struct#1#0",{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":695,"end":706}]],"nops":{},"code_map":{"0":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":709,"end":722},"1":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":691,"end":706},"2":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":728,"end":762},"5":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":773,"end":774},"6":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":728,"end":775},"7":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":785,"end":801},"8":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":781,"end":802},"10":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":802,"end":803}},"is_native":false},"4":{"definition_location":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":83,"end":805},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[191,124,226,117,80,25,18,135,252,240,29,246,133,93,154,101,125,56,138,28,145,209,204,76,225,26,126,10,201,143,108,136],"start":83,"end":805}},"is_native":false}},"constant_map":{}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/sources/dependencies/MoveStdlib/vector.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/sources/dependencies/MoveStdlib/vector.move new file mode 100644 index 0000000000000..55c1abac34b74 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/sources/dependencies/MoveStdlib/vector.move @@ -0,0 +1,364 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +#[defines_primitive(vector)] +/// A variable-sized container that can hold any type. Indexing is 0-based, and +/// vectors are growable. This module has many native functions. +module std::vector { + /// Allows calling `.to_string()` on a vector of `u8` to get a utf8 `String`. + public use fun std::string::utf8 as vector.to_string; + + /// Allows calling `.try_to_string()` on a vector of `u8` to get a utf8 `String`. + /// This will return `None` if the vector is not valid utf8. + public use fun std::string::try_utf8 as vector.try_to_string; + + /// Allows calling `.to_ascii_string()` on a vector of `u8` to get an `ascii::String`. + public use fun std::ascii::string as vector.to_ascii_string; + + /// Allows calling `.try_to_ascii_string()` on a vector of `u8` to get an + /// `ascii::String`. This will return `None` if the vector is not valid ascii. + public use fun std::ascii::try_string as vector.try_to_ascii_string; + + /// The index into the vector is out of bounds + const EINDEX_OUT_OF_BOUNDS: u64 = 0x20000; + + #[bytecode_instruction] + /// Create an empty vector. + public native fun empty(): vector; + + #[bytecode_instruction] + /// Return the length of the vector. + public native fun length(v: &vector): u64; + + #[syntax(index)] + #[bytecode_instruction] + /// Acquire an immutable reference to the `i`th element of the vector `v`. + /// Aborts if `i` is out of bounds. + public native fun borrow(v: &vector, i: u64): ∈ + + #[bytecode_instruction] + /// Add element `e` to the end of the vector `v`. + public native fun push_back(v: &mut vector, e: Element); + + #[syntax(index)] + #[bytecode_instruction] + /// Return a mutable reference to the `i`th element in the vector `v`. + /// Aborts if `i` is out of bounds. + public native fun borrow_mut(v: &mut vector, i: u64): &mut Element; + + #[bytecode_instruction] + /// Pop an element from the end of vector `v`. + /// Aborts if `v` is empty. + public native fun pop_back(v: &mut vector): Element; + + #[bytecode_instruction] + /// Destroy the vector `v`. + /// Aborts if `v` is not empty. + public native fun destroy_empty(v: vector); + + #[bytecode_instruction] + /// Swaps the elements at the `i`th and `j`th indices in the vector `v`. + /// Aborts if `i` or `j` is out of bounds. + public native fun swap(v: &mut vector, i: u64, j: u64); + + /// Return an vector of size one containing element `e`. + public fun singleton(e: Element): vector { + let mut v = empty(); + v.push_back(e); + v + } + + /// Reverses the order of the elements in the vector `v` in place. + public fun reverse(v: &mut vector) { + let len = v.length(); + if (len == 0) return (); + + let mut front_index = 0; + let mut back_index = len - 1; + while (front_index < back_index) { + v.swap(front_index, back_index); + front_index = front_index + 1; + back_index = back_index - 1; + } + } + + /// Pushes all of the elements of the `other` vector into the `lhs` vector. + public fun append(lhs: &mut vector, mut other: vector) { + other.reverse(); + while (!other.is_empty()) lhs.push_back(other.pop_back()); + other.destroy_empty(); + } + + /// Return `true` if the vector `v` has no elements and `false` otherwise. + public fun is_empty(v: &vector): bool { + v.length() == 0 + } + + /// Return true if `e` is in the vector `v`. + /// Otherwise, returns false. + public fun contains(v: &vector, e: &Element): bool { + let mut i = 0; + let len = v.length(); + while (i < len) { + if (&v[i] == e) return true; + i = i + 1; + }; + false + } + + /// Return `(true, i)` if `e` is in the vector `v` at index `i`. + /// Otherwise, returns `(false, 0)`. + public fun index_of(v: &vector, e: &Element): (bool, u64) { + let mut i = 0; + let len = v.length(); + while (i < len) { + if (&v[i] == e) return (true, i); + i = i + 1; + }; + (false, 0) + } + + /// Remove the `i`th element of the vector `v`, shifting all subsequent elements. + /// This is O(n) and preserves ordering of elements in the vector. + /// Aborts if `i` is out of bounds. + public fun remove(v: &mut vector, mut i: u64): Element { + let mut len = v.length(); + // i out of bounds; abort + if (i >= len) abort EINDEX_OUT_OF_BOUNDS; + + len = len - 1; + while (i < len) v.swap(i, { i = i + 1; i }); + v.pop_back() + } + + /// Insert `e` at position `i` in the vector `v`. + /// If `i` is in bounds, this shifts the old `v[i]` and all subsequent elements to the right. + /// If `i == v.length()`, this adds `e` to the end of the vector. + /// This is O(n) and preserves ordering of elements in the vector. + /// Aborts if `i > v.length()` + public fun insert(v: &mut vector, e: Element, mut i: u64) { + let len = v.length(); + // i too big abort + if (i > len) abort EINDEX_OUT_OF_BOUNDS; + + v.push_back(e); + while (i < len) { + v.swap(i, len); + i = i + 1 + } + } + + /// Swap the `i`th element of the vector `v` with the last element and then pop the vector. + /// This is O(1), but does not preserve ordering of elements in the vector. + /// Aborts if `i` is out of bounds. + public fun swap_remove(v: &mut vector, i: u64): Element { + assert!(!v.is_empty(), EINDEX_OUT_OF_BOUNDS); + let last_idx = v.length() - 1; + v.swap(i, last_idx); + v.pop_back() + } + + // === Macros === + + /// Create a vector of length `n` by calling the function `f` on each index. + public macro fun tabulate<$T>($n: u64, $f: |u64| -> $T): vector<$T> { + let mut v = vector[]; + let n = $n; + n.do!(|i| v.push_back($f(i))); + v + } + + /// Destroy the vector `v` by calling `f` on each element and then destroying the vector. + /// Does not preserve the order of elements in the vector (starts from the end of the vector). + public macro fun destroy<$T>($v: vector<$T>, $f: |$T|) { + let mut v = $v; + while (!v.is_empty()) $f(v.pop_back()); + v.destroy_empty(); + } + + /// Destroy the vector `v` by calling `f` on each element and then destroying the vector. + /// Preserves the order of elements in the vector. + public macro fun do<$T>($v: vector<$T>, $f: |$T|) { + let mut v = $v; + v.reverse(); + while (!v.is_empty()) $f(v.pop_back()); + v.destroy_empty(); + } + + /// Perform an action `f` on each element of the vector `v`. The vector is not modified. + public macro fun do_ref<$T>($v: &vector<$T>, $f: |&$T|) { + let v = $v; + v.length().do!(|i| $f(&v[i])) + } + + /// Perform an action `f` on each element of the vector `v`. + /// The function `f` takes a mutable reference to the element. + public macro fun do_mut<$T>($v: &mut vector<$T>, $f: |&mut $T|) { + let v = $v; + v.length().do!(|i| $f(&mut v[i])) + } + + /// Map the vector `v` to a new vector by applying the function `f` to each element. + /// Preserves the order of elements in the vector, first is called first. + public macro fun map<$T, $U>($v: vector<$T>, $f: |$T| -> $U): vector<$U> { + let v = $v; + let mut r = vector[]; + v.do!(|e| r.push_back($f(e))); + r + } + + /// Map the vector `v` to a new vector by applying the function `f` to each element. + /// Preserves the order of elements in the vector, first is called first. + public macro fun map_ref<$T, $U>($v: &vector<$T>, $f: |&$T| -> $U): vector<$U> { + let v = $v; + let mut r = vector[]; + v.do_ref!(|e| r.push_back($f(e))); + r + } + + /// Filter the vector `v` by applying the function `f` to each element. + /// Return a new vector containing only the elements for which `f` returns `true`. + public macro fun filter<$T: drop>($v: vector<$T>, $f: |&$T| -> bool): vector<$T> { + let v = $v; + let mut r = vector[]; + v.do!(|e| if ($f(&e)) r.push_back(e)); + r + } + + /// Split the vector `v` into two vectors by applying the function `f` to each element. + /// Return a tuple containing two vectors: the first containing the elements for which `f` returns `true`, + /// and the second containing the elements for which `f` returns `false`. + public macro fun partition<$T>($v: vector<$T>, $f: |&$T| -> bool): (vector<$T>, vector<$T>) { + let v = $v; + let mut r1 = vector[]; + let mut r2 = vector[]; + v.do!(|e| if ($f(&e)) r1.push_back(e) else r2.push_back(e)); + (r1, r2) + } + + /// Finds the index of first element in the vector `v` that satisfies the predicate `f`. + /// Returns `some(index)` if such an element is found, otherwise `none()`. + public macro fun find_index<$T>($v: &vector<$T>, $f: |&$T| -> bool): Option { + let v = $v; + 'find_index: { + v.length().do!(|i| if ($f(&v[i])) return 'find_index option::some(i)); + option::none() + } + } + + /// Count how many elements in the vector `v` satisfy the predicate `f`. + public macro fun count<$T>($v: &vector<$T>, $f: |&$T| -> bool): u64 { + let v = $v; + let mut count = 0; + v.do_ref!(|e| if ($f(e)) count = count + 1); + count + } + + /// Reduce the vector `v` to a single value by applying the function `f` to each element. + /// Similar to `fold_left` in Rust and `reduce` in Python and JavaScript. + public macro fun fold<$T, $Acc>($v: vector<$T>, $init: $Acc, $f: |$Acc, $T| -> $Acc): $Acc { + let v = $v; + let mut acc = $init; + v.do!(|e| acc = $f(acc, e)); + acc + } + + /// Whether any element in the vector `v` satisfies the predicate `f`. + /// If the vector is empty, returns `false`. + public macro fun any<$T>($v: &vector<$T>, $f: |&$T| -> bool): bool { + let v = $v; + 'any: { + v.do_ref!(|e| if ($f(e)) return 'any true); + false + } + } + + /// Whether all elements in the vector `v` satisfy the predicate `f`. + /// If the vector is empty, returns `true`. + public macro fun all<$T>($v: &vector<$T>, $f: |&$T| -> bool): bool { + let v = $v; + 'all: { + v.do_ref!(|e| if (!$f(e)) return 'all false); + true + } + } + + /// Destroys two vectors `v1` and `v2` by calling `f` to each pair of elements. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do<$T1, $T2>($v1: vector<$T1>, $v2: vector<$T2>, $f: |$T1, $T2|) { + let v1 = $v1; + let mut v2 = $v2; + v2.reverse(); + let len = v1.length(); + assert!(len == v2.length()); + v1.do!(|el1| $f(el1, v2.pop_back())); + } + + /// Destroys two vectors `v1` and `v2` by calling `f` to each pair of elements. + /// Aborts if the vectors are not of the same length. + /// Starts from the end of the vectors. + public macro fun zip_do_reverse<$T1, $T2>($v1: vector<$T1>, $v2: vector<$T2>, $f: |$T1, $T2|) { + let v1 = $v1; + let mut v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + v1.destroy!(|el1| $f(el1, v2.pop_back())); + } + + /// Iterate through `v1` and `v2` and apply the function `f` to references of each pair of + /// elements. The vectors are not modified. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do_ref<$T1, $T2>($v1: &vector<$T1>, $v2: &vector<$T2>, $f: |&$T1, &$T2|) { + let v1 = $v1; + let v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + len.do!(|i| $f(&v1[i], &v2[i])); + } + + /// Iterate through `v1` and `v2` and apply the function `f` to mutable references of each pair + /// of elements. The vectors may be modified. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_do_mut<$T1, $T2>( + $v1: &mut vector<$T1>, + $v2: &mut vector<$T2>, + $f: |&mut $T1, &mut $T2|, + ) { + let v1 = $v1; + let v2 = $v2; + let len = v1.length(); + assert!(len == v2.length()); + len.do!(|i| $f(&mut v1[i], &mut v2[i])); + } + + /// Destroys two vectors `v1` and `v2` by applying the function `f` to each pair of elements. + /// The returned values are collected into a new vector. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_map<$T1, $T2, $U>( + $v1: vector<$T1>, + $v2: vector<$T2>, + $f: |$T1, $T2| -> $U, + ): vector<$U> { + let mut r = vector[]; + zip_do!($v1, $v2, |el1, el2| r.push_back($f(el1, el2))); + r + } + + /// Iterate through `v1` and `v2` and apply the function `f` to references of each pair of + /// elements. The returned values are collected into a new vector. + /// Aborts if the vectors are not of the same length. + /// The order of elements in the vectors is preserved. + public macro fun zip_map_ref<$T1, $T2, $U>( + $v1: &vector<$T1>, + $v2: &vector<$T2>, + $f: |&$T1, &$T2| -> $U, + ): vector<$U> { + let mut r = vector[]; + zip_do_ref!($v1, $v2, |el1, el2| r.push_back($f(el1, el2))); + r + } +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/sources/m.move new file mode 100644 index 0000000000000..f8e266674ec44 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/build/references_deep/sources/m.move @@ -0,0 +1,34 @@ +// Test tracking reference values when multiple levels of references are involved. +module references_deep::m; + +public struct SomeStruct has drop { + struct_field: VecStruct, +} + +public struct VecStruct has drop, copy { + vec_field: vector, +} + +fun bar(vec_ref: &mut vector): u64 { + let e = vector::borrow_mut(vec_ref, 0); + *e = 42; + vec_ref[0] +} + +fun foo(some_struct_ref: &mut SomeStruct): u64 { + let res = bar(&mut some_struct_ref.struct_field.vec_field); + res + some_struct_ref.struct_field.vec_field[0] +} + +fun some_struct(): SomeStruct { + SomeStruct { + struct_field: VecStruct { vec_field: vector::singleton(0) } + } +} + +#[test] +fun test() { + let mut some_struct = some_struct(); + some_struct.struct_field.vec_field.push_back(7); + foo(&mut some_struct); +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/sources/m.move new file mode 100644 index 0000000000000..f8e266674ec44 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/sources/m.move @@ -0,0 +1,34 @@ +// Test tracking reference values when multiple levels of references are involved. +module references_deep::m; + +public struct SomeStruct has drop { + struct_field: VecStruct, +} + +public struct VecStruct has drop, copy { + vec_field: vector, +} + +fun bar(vec_ref: &mut vector): u64 { + let e = vector::borrow_mut(vec_ref, 0); + *e = 42; + vec_ref[0] +} + +fun foo(some_struct_ref: &mut SomeStruct): u64 { + let res = bar(&mut some_struct_ref.struct_field.vec_field); + res + some_struct_ref.struct_field.vec_field[0] +} + +fun some_struct(): SomeStruct { + SomeStruct { + struct_field: VecStruct { vec_field: vector::singleton(0) } + } +} + +#[test] +fun test() { + let mut some_struct = some_struct(); + some_struct.struct_field.vec_field.push_back(7); + foo(&mut some_struct); +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/test.exp b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/test.exp new file mode 100644 index 0000000000000..5327fe3b67341 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/test.exp @@ -0,0 +1,74 @@ +current frame stack: + function: test (line 33) + scope 0 : + some_struct : (0x0::m::SomeStruct) { + struct_field : (0x0::m::VecStruct) { + vec_field : [ + 0 : 0 + 1 : 7 + ] + } + } + type: 0x0::m::SomeStruct + + function: foo (line 19) + scope 0 : + some_struct_ref : (0x0::m::SomeStruct) { + struct_field : (0x0::m::VecStruct) { + vec_field : [ + 0 : 0 + 1 : 7 + ] + } + } + type: &mut 0x0::m::SomeStruct + + function: bar (line 13) + scope 0 : + vec_ref : (0x0::m::SomeStruct) { + struct_field : (0x0::m::VecStruct) { + vec_field : [ + 0 : 0 + 1 : 7 + ] + } + } + type: &mut vector + +current frame stack: + function: test (line 33) + scope 0 : + some_struct : (0x0::m::SomeStruct) { + struct_field : (0x0::m::VecStruct) { + vec_field : [ + 0 : 42 + 1 : 7 + ] + } + } + type: 0x0::m::SomeStruct + + function: foo (line 19) + scope 0 : + some_struct_ref : (0x0::m::SomeStruct) { + struct_field : (0x0::m::VecStruct) { + vec_field : [ + 0 : 42 + 1 : 7 + ] + } + } + type: &mut 0x0::m::SomeStruct + + function: bar (line 15) + scope 0 : + vec_ref : (0x0::m::SomeStruct) { + struct_field : (0x0::m::VecStruct) { + vec_field : [ + 0 : 42 + 1 : 7 + ] + } + } + type: &mut vector + diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/trace.spec.js b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/trace.spec.js new file mode 100644 index 0000000000000..88ef8e7c14f28 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/trace.spec.js @@ -0,0 +1,17 @@ +let action = (runtime) => { + let res = ''; + // step over functions creating data to be referenced + runtime.step(true); + runtime.step(true); + // step into a function taking a reference as an argument + runtime.step(false); + // step into another function taking a reference as an argument + runtime.step(false); + res += runtime.toString(); + // advance until all references are updated + runtime.step(true); + runtime.step(true); + res += runtime.toString(); + return res; +}; +run_spec(__dirname, action); diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/traces/references_deep__m__test.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/traces/references_deep__m__test.json new file mode 100644 index 0000000000000..78359c58a39b8 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/references_deep/traces/references_deep__m__test.json @@ -0,0 +1 @@ +{"version":1,"events":[{"OpenFrame":{"frame":{"frame_id":0,"function_name":"test","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":3,"type_instantiation":[],"parameters":[],"return_types":[],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeStruct","type_args":[]}},"ref_type":null}],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":1000000000,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":2,"function_name":"some_struct","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":2,"type_instantiation":[],"parameters":[],"return_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeStruct","type_args":[]}},"ref_type":null}],"locals_types":[],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999996,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999996,"instruction":"CALL_GENERIC"}},{"OpenFrame":{"frame":{"frame_id":6,"function_name":"singleton","module":{"address":"0000000000000000000000000000000000000000000000000000000000000001","name":"vector"},"binary_member_index":8,"type_instantiation":["u64"],"parameters":[{"RuntimeValue":{"value":0}}],"return_types":[{"type_":{"vector":"u64"},"ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null},{"type_":{"vector":"u64"},"ref_type":null}],"is_native":false},"gas_left":999999996}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999985,"instruction":"VEC_PACK"}},{"Effect":{"Push":{"RuntimeValue":{"value":[]}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999984,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[]}}}},{"Effect":{"Write":{"location":{"Local":[6,1]},"root_value_after_write":{"RuntimeValue":{"value":[]}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999974,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[6,1]},"root_value_read":{"RuntimeValue":{"value":[]}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[6,1]},"snapshot":[]}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999956,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[6,0]},"root_value_read":{"RuntimeValue":{"value":0}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999955,"instruction":"VEC_PUSH_BACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[6,1]},"snapshot":[]}}}},{"Effect":{"Write":{"location":{"Local":[6,1]},"root_value_after_write":{"RuntimeValue":{"value":[0]}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999945,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[6,1]},"root_value_read":{"RuntimeValue":{"value":[0]}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":[0]}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999944,"instruction":"RET"}},{"CloseFrame":{"frame_id":6,"return_":[{"RuntimeValue":{"value":[0]}}],"gas_left":999999944}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999940,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":[0]}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999936,"instruction":"PACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}},{"Effect":{"Push":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999935,"instruction":"RET"}},{"CloseFrame":{"frame_id":2,"return_":[{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}}],"gas_left":999999935}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999934,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}}}},{"Effect":{"Write":{"location":{"Local":[0,0]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999924,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999914,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[0,0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999904,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[0,0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999901,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999900,"instruction":"VEC_PUSH_BACK"}},{"Effect":{"Pop":{"RuntimeValue":{"value":7}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0]}}}}}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999890,"instruction":"MUT_BORROW_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999890,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":57,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":1,"type_instantiation":[],"parameters":[{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":{"struct":{"address":"0000000000000000000000000000000000000000000000000000000000000000","module":"m","name":"SomeStruct","type_args":[]}},"ref_type":"Mut"}],"is_native":false},"gas_left":999999890}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999879,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[57,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999869,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Local":[0,0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999859,"instruction":"MUT_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Local":[0,0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999859,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":68,"function_name":"bar","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":{"vector":"u64"},"ref_type":"Mut"},{"type_":"u64","ref_type":"Mut"}],"is_native":false},"gas_left":999999859}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999848,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[68,0]},"root_value_read":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}},"moved":false}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999845,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999835,"instruction":"VEC_MUT_BORROW"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999834,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Effect":{"Write":{"location":{"Local":[68,1]},"root_value_after_write":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999831,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999821,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[68,1]},"root_value_read":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}},"moved":true}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999803,"instruction":"WRITE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[0,7]}}}}}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Write":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"root_value_after_write":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999793,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[68,0]},"root_value_read":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}},"moved":true}}},{"Effect":{"Push":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999783,"instruction":"FREEZE_REF"}},{"Effect":{"Pop":{"MutRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999780,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999770,"instruction":"VEC_IMM_BORROW"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999752,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999751,"instruction":"RET"}},{"CloseFrame":{"frame_id":68,"return_":[{"RuntimeValue":{"value":42}}],"gas_left":999999751}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999741,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[57,0]},"root_value_read":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}},"moved":true}}},{"Effect":{"Push":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999731,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"MutRef":{"location":{"Local":[0,0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Local":[0,0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999721,"instruction":"IMM_BORROW_FIELD"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Local":[0,0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999718,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999708,"instruction":"VEC_IMM_BORROW"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Effect":{"Push":{"ImmRef":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999690,"instruction":"READ_REF"}},{"Effect":{"Pop":{"ImmRef":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"snapshot":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}}}},{"Effect":{"Read":{"location":{"Indexed":[{"Indexed":[{"Indexed":[{"Local":[0,0]},0]},0]},0]},"root_value_read":{"RuntimeValue":{"value":{"type":"0x0::m::SomeStruct","fields":{"struct_field":{"type":"0x0::m::VecStruct","fields":{"vec_field":[42,7]}}}}}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999687,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999686,"instruction":"RET"}},{"CloseFrame":{"frame_id":57,"return_":[{"RuntimeValue":{"value":84}}],"gas_left":999999686}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999685,"instruction":"POP"}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999684,"instruction":"RET"}},{"CloseFrame":{"frame_id":0,"return_":[],"gas_left":999999684}}]} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/run_spec.js b/external-crates/move/crates/move-analyzer/trace-adapter/tests/run_spec.js new file mode 100644 index 0000000000000..07c3c12b10f3e --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/run_spec.js @@ -0,0 +1,51 @@ +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +const assert = require('assert'); +const linediff = require('line-diff'); +const fs = require('fs'); +const path = require('path'); +const runtime = require('../out/runtime'); + +const UB = process.env['UB']; + +/** + * Testing harness, assuming that the tested function + * is the `test` function in the `m` module. It executes + * a given callback function and compares its result with + * the expected one stored in a file. + * + * @param dirname the directory where the test (its manifest file) is located + * @param action a function to be executed by the harness that + * takes DAP runtime as argument and returns a string representing + * test result + */ +global.run_spec = function (dirname, action) { + const test_dir = path.basename(dirname); + describe(test_dir, () => { + it(test_dir, () => { + const rt = new runtime.Runtime(); + // assume that the test is always in the `test` function + // of the `m` module + const traceInfo = test_dir + '::' + 'm::test'; + return rt.start(path.join(dirname, 'sources', `m.move`), traceInfo, true).then(() => { + const result = action(rt); + const exp_file = 'test.exp'; + const exp_path = path.join(dirname, exp_file); + if (UB === '1') { + // user asked to regenerate output + fs.writeFileSync(exp_path, result, 'utf8'); + return; + } + if (!fs.existsSync(exp_path)) { + assert.fail(`\n${result}\nNo expected output file`); + } + const exp_out = fs.readFileSync(exp_path, { encoding: 'utf8' }); + if (result !== exp_out) { + const out_diff = new linediff(exp_out, result).toString(); + assert.fail(`${out_diff}\nCurrent output does not match the expected one (run with UB=1 to save the current output)`); + } + }); + }); + }); +}; diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/Move.toml b/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/Move.toml new file mode 100644 index 0000000000000..2eb5111ce9d09 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "shadowing" +edition = "2024.beta" + +[dependencies] +MoveStdlib = { local = "../../../../move-stdlib" } + +[addresses] +shadowing = "0x0" +std = "0x1" diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/build/shadowing/bytecode_modules/m.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/build/shadowing/bytecode_modules/m.mv new file mode 100644 index 0000000000000..67ca2edde8a59 Binary files /dev/null and b/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/build/shadowing/bytecode_modules/m.mv differ diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/build/shadowing/source_maps/m.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/build/shadowing/source_maps/m.json new file mode 100644 index 0000000000000..3741ca8f7e731 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/build/shadowing/source_maps/m.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":105,"end":106},"module_name":["0000000000000000000000000000000000000000000000000000000000000000","m"],"struct_map":{},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":113,"end":116},"type_parameters":[],"parameters":[["p#0#0",{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":117,"end":118}],["val1#0#0",{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":126,"end":130}],["val2#0#0",{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":137,"end":141}],["shadowed_var#0#0",{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":148,"end":160}]],"returns":[{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":168,"end":171}],"locals":[["res#1#0",{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":186,"end":189}],["shadowed_var#1#0",{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":221,"end":233}],["shadowed_var#2#0",{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":306,"end":318}]],"nops":{},"code_map":{"0":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":192,"end":193},"1":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":182,"end":189},"2":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":204,"end":205},"3":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":200,"end":523},"4":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":236,"end":240},"5":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":243,"end":255},"6":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":241,"end":242},"7":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":221,"end":233},"8":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":269,"end":281},"9":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":284,"end":286},"10":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":282,"end":283},"11":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":265,"end":482},"12":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":321,"end":325},"13":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":328,"end":340},"14":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":326,"end":327},"15":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":306,"end":318},"16":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":358,"end":370},"17":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":373,"end":375},"18":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":371,"end":372},"19":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":354,"end":433},"20":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":401,"end":403},"21":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":406,"end":418},"22":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":404,"end":405},"23":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":395,"end":398},"24":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":453,"end":456},"25":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":459,"end":471},"26":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":457,"end":458},"27":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":447,"end":450},"28":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":498,"end":501},"29":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":504,"end":516},"30":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":502,"end":503},"31":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":492,"end":495},"32":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":530,"end":533},"33":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":536,"end":548},"34":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":534,"end":535},"35":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":530,"end":548}},"is_native":false},"1":{"definition_location":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":564,"end":568},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":581,"end":585},"1":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":587,"end":588},"2":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":590,"end":591},"3":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":593,"end":594},"4":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":577,"end":595},"6":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":595,"end":596}},"is_native":false},"2":{"definition_location":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":87,"end":598},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[200,116,202,144,197,72,240,71,227,157,234,136,204,22,234,123,245,203,253,178,185,5,137,144,145,135,198,77,248,2,136,160],"start":87,"end":598}},"is_native":false}},"constant_map":{}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/build/shadowing/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/build/shadowing/sources/m.move new file mode 100644 index 0000000000000..33c71b4c07a69 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/build/shadowing/sources/m.move @@ -0,0 +1,26 @@ +// Test variable shadowing: creation and disposal of shadowed variables +// and scopes. +module shadowing::m; + +fun foo(p: bool, val1: u64, val2: u64, shadowed_var: u64): u64 { + let mut res = 0; + + if (p) { + let shadowed_var = val1 + shadowed_var; + if (shadowed_var < 42) { + let shadowed_var = val2 + shadowed_var; + if (shadowed_var < 42) { + res = 42 + shadowed_var; + }; + res = res + shadowed_var; + }; + res = res + shadowed_var; + }; + + res + shadowed_var +} + +#[test] +fun test() { + foo(true, 7, 7, 7); +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/sources/m.move new file mode 100644 index 0000000000000..33c71b4c07a69 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/sources/m.move @@ -0,0 +1,26 @@ +// Test variable shadowing: creation and disposal of shadowed variables +// and scopes. +module shadowing::m; + +fun foo(p: bool, val1: u64, val2: u64, shadowed_var: u64): u64 { + let mut res = 0; + + if (p) { + let shadowed_var = val1 + shadowed_var; + if (shadowed_var < 42) { + let shadowed_var = val2 + shadowed_var; + if (shadowed_var < 42) { + res = 42 + shadowed_var; + }; + res = res + shadowed_var; + }; + res = res + shadowed_var; + }; + + res + shadowed_var +} + +#[test] +fun test() { + foo(true, 7, 7, 7); +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/test.exp b/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/test.exp new file mode 100644 index 0000000000000..6390d24cbbd49 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/test.exp @@ -0,0 +1,63 @@ +current frame stack: + function: test (line 25) + scope 0 : + function: foo (line 10) + scope 0 : + val2 : 7 + type: u64 + + shadowed_var : 7 + type: u64 + + res : 0 + type: u64 + + scope 1 : + shadowed_var : 14 + type: u64 + +current frame stack: + function: test (line 25) + scope 0 : + function: foo (line 12) + scope 0 : + shadowed_var : 7 + type: u64 + + res : 0 + type: u64 + + scope 1 : + shadowed_var : 14 + type: u64 + + scope 2 : + shadowed_var : 21 + type: u64 + +current frame stack: + function: test (line 25) + scope 0 : + function: foo (line 17) + scope 0 : + shadowed_var : 7 + type: u64 + + res : 84 + type: u64 + + scope 1 : + shadowed_var : 14 + type: u64 + +current frame stack: + function: test (line 25) + scope 0 : + function: foo (line 20) + scope 0 : + shadowed_var : 7 + type: u64 + + res : 98 + type: u64 + diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/trace.spec.js b/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/trace.spec.js new file mode 100644 index 0000000000000..70a22ea9fa435 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/trace.spec.js @@ -0,0 +1,25 @@ +let action = (runtime) => { + let res = ''; + // step into a function + runtime.step(false); + // advance until first shadowed variable is created + runtime.step(true); + runtime.step(true); + runtime.step(true); + res += runtime.toString(); + // advance until second shadowed variable is created + runtime.step(true); + runtime.step(true); + res += runtime.toString(); + // advance until second shadowed variable disappears + runtime.step(true); + runtime.step(true); + runtime.step(true); + res += runtime.toString(); + // advance until first shadowed variable disappears + runtime.step(true); + res += runtime.toString(); + + return res; +}; +run_spec(__dirname, action); diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/traces/shadowing__m__test.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/traces/shadowing__m__test.json new file mode 100644 index 0000000000000..e7b4dad767010 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/shadowing/traces/shadowing__m__test.json @@ -0,0 +1 @@ +{"version":1,"events":[{"OpenFrame":{"frame":{"frame_id":0,"function_name":"test","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":1,"type_instantiation":[],"parameters":[],"return_types":[],"locals_types":[],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999997,"instruction":"LD_TRUE"}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999994,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999991,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999988,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999988,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":10,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":true}},{"RuntimeValue":{"value":7}},{"RuntimeValue":{"value":7}},{"RuntimeValue":{"value":7}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"bool","ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999988}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999984,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":0}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999983,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":0}}}},{"Effect":{"Write":{"location":{"Local":[10,4]},"root_value_after_write":{"RuntimeValue":{"value":0}}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999965,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,0]},"root_value_read":{"RuntimeValue":{"value":true}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999964,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999946,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,1]},"root_value_read":{"RuntimeValue":{"value":7}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999928,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,3]},"root_value_read":{"RuntimeValue":{"value":7}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999925,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":7}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":7}}}},{"Effect":{"Push":{"RuntimeValue":{"value":14}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999924,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":14}}}},{"Effect":{"Write":{"location":{"Local":[10,5]},"root_value_after_write":{"RuntimeValue":{"value":14}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999906,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,5]},"root_value_read":{"RuntimeValue":{"value":14}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":14}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999903,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999900,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":14}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999899,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999881,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,2]},"root_value_read":{"RuntimeValue":{"value":7}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999863,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,5]},"root_value_read":{"RuntimeValue":{"value":14}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":14}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999999860,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":14}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":7}}}},{"Effect":{"Push":{"RuntimeValue":{"value":21}}}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999859,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":21}}}},{"Effect":{"Write":{"location":{"Local":[10,6]},"root_value_after_write":{"RuntimeValue":{"value":21}}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999841,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,6]},"root_value_read":{"RuntimeValue":{"value":21}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":21}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999838,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999835,"instruction":"LT"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":21}}}},{"Effect":{"Push":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999834,"instruction":"BR_FALSE"}},{"Effect":{"Pop":{"RuntimeValue":{"value":true}}}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999831,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999813,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,6]},"root_value_read":{"RuntimeValue":{"value":21}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":21}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999810,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":21}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":63}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999809,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":63}}}},{"Effect":{"Write":{"location":{"Local":[10,4]},"root_value_after_write":{"RuntimeValue":{"value":63}}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999791,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,4]},"root_value_read":{"RuntimeValue":{"value":63}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":63}}}},{"Instruction":{"type_parameters":[],"pc":25,"gas_left":999999773,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,6]},"root_value_read":{"RuntimeValue":{"value":21}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":21}}}},{"Instruction":{"type_parameters":[],"pc":26,"gas_left":999999770,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":21}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":63}}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":27,"gas_left":999999769,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Write":{"location":{"Local":[10,4]},"root_value_after_write":{"RuntimeValue":{"value":84}}}}},{"Instruction":{"type_parameters":[],"pc":28,"gas_left":999999751,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,4]},"root_value_read":{"RuntimeValue":{"value":84}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":29,"gas_left":999999733,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,5]},"root_value_read":{"RuntimeValue":{"value":14}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":14}}}},{"Instruction":{"type_parameters":[],"pc":30,"gas_left":999999730,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":14}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Push":{"RuntimeValue":{"value":98}}}},{"Instruction":{"type_parameters":[],"pc":31,"gas_left":999999729,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":98}}}},{"Effect":{"Write":{"location":{"Local":[10,4]},"root_value_after_write":{"RuntimeValue":{"value":98}}}}},{"Instruction":{"type_parameters":[],"pc":32,"gas_left":999999711,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,4]},"root_value_read":{"RuntimeValue":{"value":98}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":98}}}},{"Instruction":{"type_parameters":[],"pc":33,"gas_left":999999693,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[10,3]},"root_value_read":{"RuntimeValue":{"value":7}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":7}}}},{"Instruction":{"type_parameters":[],"pc":34,"gas_left":999999690,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":7}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":98}}}},{"Effect":{"Push":{"RuntimeValue":{"value":105}}}},{"Instruction":{"type_parameters":[],"pc":35,"gas_left":999999689,"instruction":"RET"}},{"CloseFrame":{"frame_id":10,"return_":[{"RuntimeValue":{"value":105}}],"gas_left":999999689}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999688,"instruction":"POP"}},{"Effect":{"Pop":{"RuntimeValue":{"value":105}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999687,"instruction":"RET"}},{"CloseFrame":{"frame_id":0,"return_":[],"gas_left":999999687}}]} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/Move.toml b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/Move.toml new file mode 100644 index 0000000000000..7eccf6f05afef --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "stepping" +edition = "2024.beta" + +[dependencies] +MoveStdlib = { local = "../../../../move-stdlib" } + +[addresses] +stepping = "0x0" +std = "0x1" diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/build/stepping/bytecode_modules/m.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/build/stepping/bytecode_modules/m.mv new file mode 100644 index 0000000000000..dcd6f02ab7588 Binary files /dev/null and b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/build/stepping/bytecode_modules/m.mv differ diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/build/stepping/source_maps/m.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/build/stepping/source_maps/m.json new file mode 100644 index 0000000000000..29f3badfbee25 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/build/stepping/source_maps/m.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":136,"end":137},"module_name":["0000000000000000000000000000000000000000000000000000000000000000","m"],"struct_map":{},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":144,"end":147},"type_parameters":[],"parameters":[["p#0#0",{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":148,"end":149}]],"returns":[{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":157,"end":160}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":167,"end":168},"1":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":171,"end":172},"2":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":169,"end":170},"3":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":167,"end":172}},"is_native":false},"1":{"definition_location":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":188,"end":192},"type_parameters":[],"parameters":[],"returns":[],"locals":[["_res#1#0",{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":209,"end":213}]],"nops":{},"code_map":{"0":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":220,"end":222},"1":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":216,"end":223},"2":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":205,"end":213},"3":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":236,"end":240},"4":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":247,"end":251},"5":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":243,"end":252},"6":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":241,"end":242},"7":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":229,"end":233},"8":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":265,"end":269},"9":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":276,"end":280},"10":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":272,"end":281},"11":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":270,"end":271},"12":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":258,"end":262},"13":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":281,"end":282}},"is_native":false},"2":{"definition_location":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":119,"end":343},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[167,251,62,162,22,201,105,5,139,246,239,203,25,24,39,142,137,164,92,138,52,96,179,60,228,96,111,121,32,208,84,152],"start":119,"end":343}},"is_native":false}},"constant_map":{}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/build/stepping/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/build/stepping/sources/m.move new file mode 100644 index 0000000000000..55709169e5686 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/build/stepping/sources/m.move @@ -0,0 +1,16 @@ +// Test simple stepping functionality: +// - step into a function +// - step out of a function +// - step over a function +module stepping::m; + +fun foo(p: u64): u64 { + p + p +} + +#[test] +fun test() { + let mut _res = foo(42); + _res = _res + foo(_res); + _res = _res + foo(_res); // to force another unoptimized read to keep `res` visible +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/sources/m.move new file mode 100644 index 0000000000000..55709169e5686 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/sources/m.move @@ -0,0 +1,16 @@ +// Test simple stepping functionality: +// - step into a function +// - step out of a function +// - step over a function +module stepping::m; + +fun foo(p: u64): u64 { + p + p +} + +#[test] +fun test() { + let mut _res = foo(42); + _res = _res + foo(_res); + _res = _res + foo(_res); // to force another unoptimized read to keep `res` visible +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/test.exp b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/test.exp new file mode 100644 index 0000000000000..69556b2e44ac9 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/test.exp @@ -0,0 +1,20 @@ +current frame stack: + function: test (line 13) + scope 0 : + function: foo (line 8) + scope 0 : + p : 42 + type: u64 + +current frame stack: + function: test (line 14) + scope 0 : + _res : 84 + type: u64 + +current frame stack: + function: test (line 15) + scope 0 : + _res : 252 + type: u64 + diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/trace.spec.js b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/trace.spec.js new file mode 100644 index 0000000000000..f5296cd01981c --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/trace.spec.js @@ -0,0 +1,14 @@ +let action = (runtime) => { + let res = ''; + // step into a function + runtime.step(false); + res += runtime.toString(); + // step out of a function + runtime.stepOut(false); + res += runtime.toString(); + // step over a function + runtime.step(true); + res += runtime.toString(); + return res; +}; +run_spec(__dirname, action); diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/traces/stepping__m__test.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/traces/stepping__m__test.json new file mode 100644 index 0000000000000..028dbd2996d64 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping/traces/stepping__m__test.json @@ -0,0 +1 @@ +{"version":1,"events":[{"OpenFrame":{"frame":{"frame_id":0,"function_name":"test","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":1,"type_instantiation":[],"parameters":[],"return_types":[],"locals_types":[{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999997,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999997,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":4,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":42}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999997}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999978,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999960,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999957,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999956,"instruction":"RET"}},{"CloseFrame":{"frame_id":4,"return_":[{"RuntimeValue":{"value":84}}],"gas_left":999999956}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999955,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Write":{"location":{"Local":[0,0]},"root_value_after_write":{"RuntimeValue":{"value":84}}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999937,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":84}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999919,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":84}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999919,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":27,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":84}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999919}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999900,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[27,0]},"root_value_read":{"RuntimeValue":{"value":84}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999882,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[27,0]},"root_value_read":{"RuntimeValue":{"value":84}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999879,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Push":{"RuntimeValue":{"value":168}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999878,"instruction":"RET"}},{"CloseFrame":{"frame_id":27,"return_":[{"RuntimeValue":{"value":168}}],"gas_left":999999878}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999875,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":168}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Push":{"RuntimeValue":{"value":252}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999874,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":252}}}},{"Effect":{"Write":{"location":{"Local":[0,0]},"root_value_after_write":{"RuntimeValue":{"value":252}}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999856,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":252}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":252}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999838,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[0,0]},"root_value_read":{"RuntimeValue":{"value":252}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":252}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999838,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":54,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":252}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999838}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999819,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[54,0]},"root_value_read":{"RuntimeValue":{"value":252}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":252}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999801,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[54,0]},"root_value_read":{"RuntimeValue":{"value":252}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":252}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999798,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":252}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":252}}}},{"Effect":{"Push":{"RuntimeValue":{"value":504}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999797,"instruction":"RET"}},{"CloseFrame":{"frame_id":54,"return_":[{"RuntimeValue":{"value":504}}],"gas_left":999999797}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999794,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":504}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":252}}}},{"Effect":{"Push":{"RuntimeValue":{"value":756}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999793,"instruction":"POP"}},{"Effect":{"Pop":{"RuntimeValue":{"value":756}}}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999792,"instruction":"RET"}},{"CloseFrame":{"frame_id":0,"return_":[],"gas_left":999999792}}]} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/Move.toml b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/Move.toml new file mode 100644 index 0000000000000..e79c6841d9106 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/Move.toml @@ -0,0 +1,10 @@ +[package] +name = "stepping_call" +edition = "2024.beta" + +[dependencies] +MoveStdlib = { local = "../../../../move-stdlib" } + +[addresses] +stepping_call = "0x0" +std = "0x1" diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/build/stepping_call/bytecode_modules/m.mv b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/build/stepping_call/bytecode_modules/m.mv new file mode 100644 index 0000000000000..d86d93f238378 Binary files /dev/null and b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/build/stepping_call/bytecode_modules/m.mv differ diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/build/stepping_call/source_maps/m.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/build/stepping_call/source_maps/m.json new file mode 100644 index 0000000000000..94b0650f11d24 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/build/stepping_call/source_maps/m.json @@ -0,0 +1 @@ +{"definition_location":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":334,"end":335},"module_name":["0000000000000000000000000000000000000000000000000000000000000000","m"],"struct_map":{},"enum_map":{},"function_map":{"0":{"definition_location":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":342,"end":345},"type_parameters":[],"parameters":[["p#0#0",{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":346,"end":347}]],"returns":[{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":355,"end":358}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":365,"end":366}},"is_native":false},"1":{"definition_location":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":374,"end":377},"type_parameters":[],"parameters":[["p#0#0",{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":378,"end":379}]],"returns":[{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":387,"end":390}],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":397,"end":398}},"is_native":false},"2":{"definition_location":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":406,"end":409},"type_parameters":[],"parameters":[["p#0#0",{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":410,"end":411}]],"returns":[{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":419,"end":422}],"locals":[["v1#1#0",{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":433,"end":435}],["v2#1#0",{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":470,"end":472}],["v3#1#0",{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":500,"end":502}]],"nops":{},"code_map":{"0":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":438,"end":439},"1":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":442,"end":443},"2":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":440,"end":441},"3":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":450,"end":451},"4":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":446,"end":452},"5":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":444,"end":445},"6":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":455,"end":456},"7":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":453,"end":454},"8":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":459,"end":460},"9":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":457,"end":458},"10":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":433,"end":435},"11":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":479,"end":480},"12":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":475,"end":481},"13":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":488,"end":489},"14":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":484,"end":490},"15":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":482,"end":483},"16":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":470,"end":472},"17":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":509,"end":510},"18":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":505,"end":511},"19":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":518,"end":519},"20":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":514,"end":520},"21":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":512,"end":513},"22":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":500,"end":502},"23":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":526,"end":528},"24":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":531,"end":533},"25":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":529,"end":530},"26":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":536,"end":538},"27":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":534,"end":535},"28":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":526,"end":538}},"is_native":false},"3":{"definition_location":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":554,"end":558},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":571,"end":573},"1":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":567,"end":574},"3":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":574,"end":575}},"is_native":false},"4":{"definition_location":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":312,"end":577},"type_parameters":[],"parameters":[],"returns":[],"locals":[],"nops":{},"code_map":{"0":{"file_hash":[155,146,46,135,32,157,133,216,70,224,37,199,86,224,28,188,137,176,105,255,56,8,75,65,33,14,81,44,202,92,140,44],"start":312,"end":577}},"is_native":false}},"constant_map":{}} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/build/stepping_call/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/build/stepping_call/sources/m.move new file mode 100644 index 0000000000000..d1e63ee566f45 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/build/stepping_call/sources/m.move @@ -0,0 +1,26 @@ +// Test stepping functionality in presence of function calls: +// - with other instructions on the same line a call, step over line in one go +// - with two calls on the same line, step over both in one go +// - with two calls on the same line, step into the first and +// after stepping out, step over the second +module stepping_call::m; + +fun baz(p: u64): u64 { + p +} + +fun bar(p: u64): u64 { + p +} + +fun foo(p: u64): u64 { + let v1 = p + p + bar(p) + p + p; + let v2 = baz(p) + bar(p); + let v3 = baz(p) + bar(p); + v1 + v2 + v3 +} + +#[test] +fun test() { + foo(42); +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/sources/m.move b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/sources/m.move new file mode 100644 index 0000000000000..d1e63ee566f45 --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/sources/m.move @@ -0,0 +1,26 @@ +// Test stepping functionality in presence of function calls: +// - with other instructions on the same line a call, step over line in one go +// - with two calls on the same line, step over both in one go +// - with two calls on the same line, step into the first and +// after stepping out, step over the second +module stepping_call::m; + +fun baz(p: u64): u64 { + p +} + +fun bar(p: u64): u64 { + p +} + +fun foo(p: u64): u64 { + let v1 = p + p + bar(p) + p + p; + let v2 = baz(p) + bar(p); + let v3 = baz(p) + bar(p); + v1 + v2 + v3 +} + +#[test] +fun test() { + foo(42); +} diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/test.exp b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/test.exp new file mode 100644 index 0000000000000..01039187c0a5e --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/test.exp @@ -0,0 +1,58 @@ +current frame stack: + function: test (line 25) + scope 0 : + function: foo (line 17) + scope 0 : + p : 42 + type: u64 + +current frame stack: + function: test (line 25) + scope 0 : + function: foo (line 18) + scope 0 : + p : 42 + type: u64 + + v1 : 210 + type: u64 + +current frame stack: + function: test (line 25) + scope 0 : + function: foo (line 19) + scope 0 : + p : 42 + type: u64 + + v1 : 210 + type: u64 + + v2 : 84 + type: u64 + +current frame stack: + function: test (line 25) + scope 0 : + function: foo (line 19) + scope 0 : + v1 : 210 + type: u64 + + v2 : 84 + type: u64 + +current frame stack: + function: test (line 25) + scope 0 : + function: foo (line 20) + scope 0 : + v1 : 210 + type: u64 + + v2 : 84 + type: u64 + + v3 : 84 + type: u64 + diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/trace.spec.js b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/trace.spec.js new file mode 100644 index 0000000000000..80eb43a64fb4d --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/trace.spec.js @@ -0,0 +1,27 @@ +let action = (runtime) => { + let res = ''; + // step into the main test function + runtime.step(false); + res += runtime.toString(); + + // step over a function to the next line + runtime.step(true); + res += runtime.toString(); + + // step over two functions to the next line + runtime.step(true); + res += runtime.toString(); + + // step into a function + runtime.step(false); + // step out of the function to the same line + runtime.stepOut(false); + res += runtime.toString(); + // step into a function + runtime.step(false); + // step out of the function to the next line + runtime.stepOut(false); + res += runtime.toString(); + return res; +}; +run_spec(__dirname, action); diff --git a/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/traces/stepping_call__m__test.json b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/traces/stepping_call__m__test.json new file mode 100644 index 0000000000000..568616f20037b --- /dev/null +++ b/external-crates/move/crates/move-analyzer/trace-adapter/tests/stepping_call/traces/stepping_call__m__test.json @@ -0,0 +1 @@ +{"version":1,"events":[{"OpenFrame":{"frame":{"frame_id":0,"function_name":"test","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":3,"type_instantiation":[],"parameters":[],"return_types":[],"locals_types":[],"is_native":false},"gas_left":1000000000}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999997,"instruction":"LD_U64"}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999997,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":4,"function_name":"foo","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":2,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":42}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null},{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999997}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999978,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999960,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999957,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999939,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":4,"gas_left":999999939,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":19,"function_name":"bar","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":1,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":42}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999939}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999920,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[19,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999919,"instruction":"RET"}},{"CloseFrame":{"frame_id":19,"return_":[{"RuntimeValue":{"value":42}}],"gas_left":999999919}},{"Instruction":{"type_parameters":[],"pc":5,"gas_left":999999916,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Push":{"RuntimeValue":{"value":126}}}},{"Instruction":{"type_parameters":[],"pc":6,"gas_left":999999898,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":7,"gas_left":999999895,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":126}}}},{"Effect":{"Push":{"RuntimeValue":{"value":168}}}},{"Instruction":{"type_parameters":[],"pc":8,"gas_left":999999877,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":9,"gas_left":999999874,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":168}}}},{"Effect":{"Push":{"RuntimeValue":{"value":210}}}},{"Instruction":{"type_parameters":[],"pc":10,"gas_left":999999873,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":210}}}},{"Effect":{"Write":{"location":{"Local":[4,1]},"root_value_after_write":{"RuntimeValue":{"value":210}}}}},{"Instruction":{"type_parameters":[],"pc":11,"gas_left":999999855,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":12,"gas_left":999999855,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":50,"function_name":"baz","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":42}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999855}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999836,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[50,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999835,"instruction":"RET"}},{"CloseFrame":{"frame_id":50,"return_":[{"RuntimeValue":{"value":42}}],"gas_left":999999835}},{"Instruction":{"type_parameters":[],"pc":13,"gas_left":999999817,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":14,"gas_left":999999817,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":60,"function_name":"bar","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":1,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":42}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999817}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999798,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[60,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999797,"instruction":"RET"}},{"CloseFrame":{"frame_id":60,"return_":[{"RuntimeValue":{"value":42}}],"gas_left":999999797}},{"Instruction":{"type_parameters":[],"pc":15,"gas_left":999999794,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":16,"gas_left":999999793,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Write":{"location":{"Local":[4,2]},"root_value_after_write":{"RuntimeValue":{"value":84}}}}},{"Instruction":{"type_parameters":[],"pc":17,"gas_left":999999775,"instruction":"COPY_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":false}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":18,"gas_left":999999775,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":77,"function_name":"baz","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":0,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":42}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999775}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999756,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[77,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999755,"instruction":"RET"}},{"CloseFrame":{"frame_id":77,"return_":[{"RuntimeValue":{"value":42}}],"gas_left":999999755}},{"Instruction":{"type_parameters":[],"pc":19,"gas_left":999999737,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":20,"gas_left":999999737,"instruction":"CALL"}},{"OpenFrame":{"frame":{"frame_id":87,"function_name":"bar","module":{"address":"0000000000000000000000000000000000000000000000000000000000000000","name":"m"},"binary_member_index":1,"type_instantiation":[],"parameters":[{"RuntimeValue":{"value":42}}],"return_types":[{"type_":"u64","ref_type":null}],"locals_types":[{"type_":"u64","ref_type":null}],"is_native":false},"gas_left":999999737}},{"Instruction":{"type_parameters":[],"pc":0,"gas_left":999999718,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[87,0]},"root_value_read":{"RuntimeValue":{"value":42}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":42}}}},{"Instruction":{"type_parameters":[],"pc":1,"gas_left":999999717,"instruction":"RET"}},{"CloseFrame":{"frame_id":87,"return_":[{"RuntimeValue":{"value":42}}],"gas_left":999999717}},{"Instruction":{"type_parameters":[],"pc":21,"gas_left":999999714,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":42}}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":22,"gas_left":999999713,"instruction":"ST_LOC"}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Write":{"location":{"Local":[4,3]},"root_value_after_write":{"RuntimeValue":{"value":84}}}}},{"Instruction":{"type_parameters":[],"pc":23,"gas_left":999999695,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,1]},"root_value_read":{"RuntimeValue":{"value":210}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":210}}}},{"Instruction":{"type_parameters":[],"pc":24,"gas_left":999999677,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,2]},"root_value_read":{"RuntimeValue":{"value":84}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":25,"gas_left":999999674,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":210}}}},{"Effect":{"Push":{"RuntimeValue":{"value":294}}}},{"Instruction":{"type_parameters":[],"pc":26,"gas_left":999999656,"instruction":"MOVE_LOC"}},{"Effect":{"Read":{"location":{"Local":[4,3]},"root_value_read":{"RuntimeValue":{"value":84}},"moved":true}}},{"Effect":{"Push":{"RuntimeValue":{"value":84}}}},{"Instruction":{"type_parameters":[],"pc":27,"gas_left":999999653,"instruction":"ADD"}},{"Effect":{"Pop":{"RuntimeValue":{"value":84}}}},{"Effect":{"Pop":{"RuntimeValue":{"value":294}}}},{"Effect":{"Push":{"RuntimeValue":{"value":378}}}},{"Instruction":{"type_parameters":[],"pc":28,"gas_left":999999652,"instruction":"RET"}},{"CloseFrame":{"frame_id":4,"return_":[{"RuntimeValue":{"value":378}}],"gas_left":999999652}},{"Instruction":{"type_parameters":[],"pc":2,"gas_left":999999651,"instruction":"POP"}},{"Effect":{"Pop":{"RuntimeValue":{"value":378}}}},{"Instruction":{"type_parameters":[],"pc":3,"gas_left":999999650,"instruction":"RET"}},{"CloseFrame":{"frame_id":0,"return_":[],"gas_left":999999650}}]} \ No newline at end of file diff --git a/external-crates/move/crates/move-analyzer/trace-debug/package.json b/external-crates/move/crates/move-analyzer/trace-debug/package.json index 3ae00120245d6..2a9c6eccf648c 100644 --- a/external-crates/move/crates/move-analyzer/trace-debug/package.json +++ b/external-crates/move/crates/move-analyzer/trace-debug/package.json @@ -20,6 +20,7 @@ ], "main": "./out/extension.js", "contributes": { + "breakpoints": [{ "language": "move" }], "debuggers": [ { "type": "move-debug", diff --git a/external-crates/move/crates/move-binary-format/src/compatibility_mode.rs b/external-crates/move/crates/move-binary-format/src/compatibility_mode.rs index a0848682ae405..d273f5a3d396b 100644 --- a/external-crates/move/crates/move-binary-format/src/compatibility_mode.rs +++ b/external-crates/move/crates/move-binary-format/src/compatibility_mode.rs @@ -100,7 +100,7 @@ pub trait CompatibilityMode: Default { ); /// Finish the compatibility check and return the error if one has been accumulated from individual errors. - fn finish(&self, _: &Compatibility) -> Result<(), Self::Error>; + fn finish(self, _: &Compatibility) -> Result<(), Self::Error>; } /// Compatibility mode impl for execution compatibility checks. @@ -240,7 +240,7 @@ impl CompatibilityMode for ExecutionCompatibilityMode { } /// Finish by comparing against the compatibility flags. - fn finish(&self, compatability: &Compatibility) -> Result<(), ()> { + fn finish(self, compatability: &Compatibility) -> Result<(), ()> { if !self.datatype_and_function_linking { return Err(()); } diff --git a/external-crates/move/crates/move-cli/Cargo.toml b/external-crates/move/crates/move-cli/Cargo.toml index 128c9493155fa..fae90ee1126d8 100644 --- a/external-crates/move/crates/move-cli/Cargo.toml +++ b/external-crates/move/crates/move-cli/Cargo.toml @@ -64,4 +64,4 @@ harness = false [features] tiered-gas = ["move-vm-test-utils/tiered-gas"] -gas-profiler = ["move-vm-runtime/gas-profiler"] +tracing = ["move-vm-runtime/tracing"] diff --git a/external-crates/move/crates/move-cli/src/base/test.rs b/external-crates/move/crates/move-cli/src/base/test.rs index e37309a8e9b1a..3133b799a9b10 100644 --- a/external-crates/move/crates/move-cli/src/base/test.rs +++ b/external-crates/move/crates/move-cli/src/base/test.rs @@ -196,7 +196,7 @@ pub fn run_move_unit_tests( let (files, comments_and_compiler_res) = compiler.run::().unwrap(); let (_, compiler) = diagnostics::unwrap_or_report_pass_diagnostics(&files, comments_and_compiler_res); - let (mut compiler, cfgir) = compiler.into_ast(); + let (compiler, cfgir) = compiler.into_ast(); let compilation_env = compiler.compilation_env(); let built_test_plan = construct_test_plan(compilation_env, Some(root_package), &cfgir); let mapped_files = compilation_env.mapped_files().clone(); diff --git a/external-crates/move/crates/move-cli/src/sandbox/cli.rs b/external-crates/move/crates/move-cli/src/sandbox/cli.rs index 30a4053bfdfb0..e8aee6887e169 100644 --- a/external-crates/move/crates/move-cli/src/sandbox/cli.rs +++ b/external-crates/move/crates/move-cli/src/sandbox/cli.rs @@ -11,15 +11,19 @@ use crate::{ }; use anyhow::Result; use clap::Parser; -use move_core_types::{ - language_storage::TypeTag, parser, transaction_argument::TransactionArgument, -}; +use move_core_types::parsing::values::ParsedValue; +use move_core_types::{language_storage::TypeTag, transaction_argument::TransactionArgument}; use move_package::compilation::package_layout::CompiledPackageLayout; use move_vm_test_utils::gas_schedule::CostTable; use std::{ fs, path::{Path, PathBuf}, }; +fn parse_transaction_argument(s: &str) -> Result { + let x: ParsedValue<()> = ParsedValue::parse(s)?; + let move_value = x.into_concrete_value(&|_| None)?; + TransactionArgument::try_from(move_value) +} #[derive(Parser)] pub enum SandboxCommand { @@ -75,7 +79,7 @@ pub enum SandboxCommand { /// ASCII strings (e.g., 'b"hi" will parse as the vector value [68, 69]). #[clap( long = "args", - value_parser = parser::parse_transaction_argument, + value_parser = parse_transaction_argument, num_args(1..), action = clap::ArgAction::Append, )] @@ -84,7 +88,6 @@ pub enum SandboxCommand { /// `main()`). Must match the type arguments kinds expected by `script_file`. #[clap( long = "type-args", - value_parser = parser::parse_type_tag, num_args(1..), action = clap::ArgAction::Append, )] @@ -155,7 +158,6 @@ pub struct StructLayoutOptions { /// Generate layout bindings for `struct` bound to these type arguments. #[clap( long = "type-args", - value_parser = parser::parse_type_tag, requires="struct", action = clap::ArgAction::Append, num_args(1..), diff --git a/external-crates/move/crates/move-cli/src/sandbox/commands/run.rs b/external-crates/move/crates/move-cli/src/sandbox/commands/run.rs index 5f3c556b6745c..61458bbb817ce 100644 --- a/external-crates/move/crates/move-cli/src/sandbox/commands/run.rs +++ b/external-crates/move/crates/move-cli/src/sandbox/commands/run.rs @@ -83,7 +83,7 @@ pub fn run( // script fun. parse module, extract script ID to pass to VM let module = CompiledModule::deserialize_with_defaults(&bytecode) .map_err(|e| anyhow!("Error deserializing module: {:?}", e))?; - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; use move_vm_types::gas::GasMeter; diff --git a/external-crates/move/crates/move-cli/tests/tracing_testsuite.rs b/external-crates/move/crates/move-cli/tests/tracing_testsuite.rs index cfe95ec02ac3a..e59c522afac15 100644 --- a/external-crates/move/crates/move-cli/tests/tracing_testsuite.rs +++ b/external-crates/move/crates/move-cli/tests/tracing_testsuite.rs @@ -6,7 +6,7 @@ use std::path::Path; #[allow(unused_variables)] fn run_all(args_path: &Path) -> datatest_stable::Result<()> { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] { use move_cli::sandbox::commands::test; use std::path::PathBuf; diff --git a/external-crates/move/crates/move-command-line-common/Cargo.toml b/external-crates/move/crates/move-command-line-common/Cargo.toml index 08679ba8a6e31..83aa969a204af 100644 --- a/external-crates/move/crates/move-command-line-common/Cargo.toml +++ b/external-crates/move/crates/move-command-line-common/Cargo.toml @@ -15,7 +15,6 @@ difference.workspace = true walkdir.workspace = true sha2.workspace = true hex.workspace = true -num-bigint.workspace = true once_cell.workspace = true serde.workspace = true dirs-next.workspace = true @@ -27,9 +26,3 @@ move-binary-format.workspace = true [dev-dependencies] proptest.workspace = true -# Ok to do this since: -# edition = 2021 ==> resolver = 2 -# * https://doc.rust-lang.org/edition-guide/rust-2021/default-cargo-resolver.html#summary -# resolver = 2 ==> feature-resolver-version-2 which allows dev-dependencies to set features -# * https://doc.rust-lang.org/cargo/reference/resolver.html#feature-resolver-version-2 -move-core-types = { workspace = true, features = ["fuzzing"] } diff --git a/external-crates/move/crates/move-command-line-common/src/lib.rs b/external-crates/move/crates/move-command-line-common/src/lib.rs index 2b087266cf27f..6014194ab13ef 100644 --- a/external-crates/move/crates/move-command-line-common/src/lib.rs +++ b/external-crates/move/crates/move-command-line-common/src/lib.rs @@ -4,14 +4,10 @@ #![forbid(unsafe_code)] -pub mod address; pub mod character_sets; pub mod display; pub mod env; pub mod error_bitset; pub mod files; pub mod interactive; -pub mod parser; pub mod testing; -pub mod types; -pub mod values; diff --git a/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/derived_line_number_raw_abort.exp b/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/derived_line_number_raw_abort.exp new file mode 100644 index 0000000000000..b980699ad1b88 --- /dev/null +++ b/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/derived_line_number_raw_abort.exp @@ -0,0 +1,11 @@ +processed 2 tasks + +task 1, lines 6-11: +//# run +Error: Function execution failed with VMError: { + major_status: ABORTED, + sub_status: Some(9223372079804448767), + location: 0x42::m, + indices: [], + offsets: [(FunctionDefinitionIndex(0), 1)], +} diff --git a/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/derived_line_number_raw_abort.move b/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/derived_line_number_raw_abort.move new file mode 100644 index 0000000000000..c9e4464e3ad8b --- /dev/null +++ b/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/derived_line_number_raw_abort.move @@ -0,0 +1,11 @@ +// NB: Do _not_ change the number of lines in this file. Any changes to the +// number of lines in this file may break the expected output of this test. + +//# init --edition 2024.beta + +//# run +module 0x42::m { + fun f() { + abort + } +} diff --git a/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/macro_call_line_number_abort.exp b/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/macro_call_line_number_abort.exp new file mode 100644 index 0000000000000..760fa19edda51 --- /dev/null +++ b/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/macro_call_line_number_abort.exp @@ -0,0 +1,21 @@ +processed 4 tasks + +task 2, line 25: +//# run 0x42::m::t_a +Error: Function execution failed with VMError: { + major_status: ABORTED, + sub_status: Some(9223372105574252543), + location: 0x42::m, + indices: [], + offsets: [(FunctionDefinitionIndex(0), 1)], +} + +task 3, line 27: +//# run 0x42::m::t_calls_a +Error: Function execution failed with VMError: { + major_status: ABORTED, + sub_status: Some(9223372118459154431), + location: 0x42::m, + indices: [], + offsets: [(FunctionDefinitionIndex(1), 1)], +} diff --git a/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/macro_call_line_number_abort.move b/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/macro_call_line_number_abort.move new file mode 100644 index 0000000000000..fc2069b6d0a0c --- /dev/null +++ b/external-crates/move/crates/move-compiler-transactional-tests/tests/constants/macro_call_line_number_abort.move @@ -0,0 +1,27 @@ +// NB: Do _not_ change the number of lines in this file. Any changes to the +// number of lines in this file may break the expected output of this test. + +//# init --edition 2024.beta + +//# publish +module 0x42::m { + macro fun a() { + abort + } + + macro fun calls_a() { + a!() + } + + entry fun t_a() { + a!() // assert should point to this line + } + + entry fun t_calls_a() { + calls_a!() // assert should point to this line + } +} + +//# run 0x42::m::t_a + +//# run 0x42::m::t_calls_a diff --git a/external-crates/move/crates/move-compiler/src/cfgir/ast.rs b/external-crates/move/crates/move-compiler/src/cfgir/ast.rs index 72507507b8142..e18c8b4e0233c 100644 --- a/external-crates/move/crates/move-compiler/src/cfgir/ast.rs +++ b/external-crates/move/crates/move-compiler/src/cfgir/ast.rs @@ -3,7 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - diagnostics::WarningFilters, + diagnostics::warning_filters::WarningFilters, expansion::ast::{Attributes, Friend, ModuleIdent, Mutability, TargetKind}, hlir::ast::{ BaseType, Command, Command_, EnumDefinition, FunctionSignature, Label, SingleType, diff --git a/external-crates/move/crates/move-compiler/src/cfgir/borrows/mod.rs b/external-crates/move/crates/move-compiler/src/cfgir/borrows/mod.rs index 59e44f130a78b..3952c47e44f32 100644 --- a/external-crates/move/crates/move-compiler/src/cfgir/borrows/mod.rs +++ b/external-crates/move/crates/move-compiler/src/cfgir/borrows/mod.rs @@ -13,7 +13,7 @@ use crate::{ translate::{display_var, DisplayVar}, }, parser::ast::BinOp_, - shared::{unique_map::UniqueMap, CompilationEnv}, + shared::unique_map::UniqueMap, }; use move_proc_macros::growing_stack; @@ -90,7 +90,6 @@ impl TransferFunctions for BorrowSafety { impl AbstractInterpreter for BorrowSafety {} pub fn verify( - compilation_env: &mut CompilationEnv, context: &super::CFGContext, cfg: &super::cfg::MutForwardCFG, ) -> BTreeMap { @@ -100,21 +99,17 @@ pub fn verify( let mut safety = BorrowSafety::new(locals); // check for existing errors - let has_errors = compilation_env.has_errors(); + let has_errors = context.env.has_errors(); let mut initial_state = BorrowState::initial(locals, safety.mutably_used.clone(), has_errors); initial_state.bind_arguments(&signature.parameters); initial_state.canonicalize_locals(&safety.local_numbers); let (final_state, ds) = safety.analyze_function(cfg, initial_state); - compilation_env.add_diags(ds); - unused_mut_borrows(compilation_env, context, safety.mutably_used); + context.add_diags(ds); + unused_mut_borrows(context, safety.mutably_used); final_state } -fn unused_mut_borrows( - compilation_env: &mut CompilationEnv, - context: &super::CFGContext, - mutably_used: RefExpInfoMap, -) { +fn unused_mut_borrows(context: &super::CFGContext, mutably_used: RefExpInfoMap) { const MSG: &str = "Mutable reference is never used mutably, \ consider switching to an immutable reference '&' instead"; @@ -143,7 +138,7 @@ fn unused_mut_borrows( } else { diag!(UnusedItem::MutReference, (*loc, MSG)) }; - compilation_env.add_diag(diag) + context.add_diag(diag) } } } diff --git a/external-crates/move/crates/move-compiler/src/cfgir/liveness/mod.rs b/external-crates/move/crates/move-compiler/src/cfgir/liveness/mod.rs index fd32f21714754..f5aa20f2619b3 100644 --- a/external-crates/move/crates/move-compiler/src/cfgir/liveness/mod.rs +++ b/external-crates/move/crates/move-compiler/src/cfgir/liveness/mod.rs @@ -13,7 +13,7 @@ use crate::{ diagnostics::Diagnostics, expansion::ast::Mutability, hlir::ast::{self as H, *}, - shared::{unique_map::UniqueMap, CompilationEnv}, + shared::unique_map::UniqueMap, }; use move_ir_types::location::*; use move_proc_macros::growing_stack; @@ -168,11 +168,7 @@ fn exp(state: &mut LivenessState, parent_e: &Exp) { /// - Reports an error if an assignment/let was not used /// Switches it to an `Ignore` if it has the drop ability (helps with error messages for borrows) -pub fn last_usage( - compilation_env: &mut CompilationEnv, - context: &super::CFGContext, - cfg: &mut MutForwardCFG, -) { +pub fn last_usage(context: &super::CFGContext, cfg: &mut MutForwardCFG) { let super::CFGContext { infinite_loop_starts, .. @@ -183,7 +179,7 @@ pub fn last_usage( .get(lbl) .unwrap_or_else(|| panic!("ICE no liveness states for {}", lbl)); let command_states = per_command_states.get(lbl).unwrap(); - last_usage::block(compilation_env, final_invariant, command_states, block) + last_usage::block(context, final_invariant, command_states, block) } } @@ -191,30 +187,29 @@ mod last_usage { use move_proc_macros::growing_stack; use crate::{ - cfgir::liveness::state::LivenessState, + cfgir::{liveness::state::LivenessState, CFGContext}, diag, hlir::{ ast::*, translate::{display_var, DisplayVar}, }, - shared::*, }; use std::collections::{BTreeSet, VecDeque}; struct Context<'a, 'b> { - env: &'a mut CompilationEnv, + outer: &'a CFGContext<'a>, next_live: &'b BTreeSet, dropped_live: BTreeSet, } impl<'a, 'b> Context<'a, 'b> { fn new( - env: &'a mut CompilationEnv, + outer: &'a CFGContext<'a>, next_live: &'b BTreeSet, dropped_live: BTreeSet, ) -> Self { Context { - env, + outer, next_live, dropped_live, } @@ -222,7 +217,7 @@ mod last_usage { } pub fn block( - compilation_env: &mut CompilationEnv, + context: &CFGContext, final_invariant: &LivenessState, command_states: &VecDeque, block: &mut BasicBlock, @@ -245,10 +240,7 @@ mod last_usage { .difference(next_data) .cloned() .collect::>(); - command( - &mut Context::new(compilation_env, next_data, dropped_live), - cmd, - ) + command(&mut Context::new(context, next_data, dropped_live), cmd) } } @@ -300,7 +292,7 @@ mod last_usage { '_{vstr}')", ); context - .env + .outer .add_diag(diag!(UnusedItem::Assignment, (l.loc, msg))); } *unused_assignment = true; diff --git a/external-crates/move/crates/move-compiler/src/cfgir/locals/mod.rs b/external-crates/move/crates/move-compiler/src/cfgir/locals/mod.rs index 1793e58dc9139..31a302ece25e1 100644 --- a/external-crates/move/crates/move-compiler/src/cfgir/locals/mod.rs +++ b/external-crates/move/crates/move-compiler/src/cfgir/locals/mod.rs @@ -6,6 +6,7 @@ pub mod state; use super::absint::*; use crate::{ + cfgir::CFGContext, diag, diagnostics::{Diagnostic, Diagnostics}, editions::Edition, @@ -16,15 +17,10 @@ use crate::{ }, naming::ast::{self as N, TParam}, parser::ast::{Ability_, DatatypeName}, - shared::{ - program_info::{DatatypeKind, TypingProgramInfo}, - unique_map::UniqueMap, - *, - }, + shared::{program_info::DatatypeKind, unique_map::UniqueMap}, }; use move_ir_types::location::*; use move_proc_macros::growing_stack; -use move_symbol_pool::Symbol; use state::*; use std::collections::BTreeMap; @@ -33,9 +29,7 @@ use std::collections::BTreeMap; //************************************************************************************************** struct LocalsSafety<'a> { - env: &'a CompilationEnv, - info: &'a TypingProgramInfo, - package: Option, + context: &'a CFGContext<'a>, local_types: &'a UniqueMap, signature: &'a FunctionSignature, unused_mut: BTreeMap, @@ -43,9 +37,7 @@ struct LocalsSafety<'a> { impl<'a> LocalsSafety<'a> { fn new( - env: &'a CompilationEnv, - info: &'a TypingProgramInfo, - package: Option, + context: &'a CFGContext<'a>, local_types: &'a UniqueMap, signature: &'a FunctionSignature, ) -> Self { @@ -60,9 +52,7 @@ impl<'a> LocalsSafety<'a> { }) .collect(); Self { - env, - info, - package, + context, local_types, signature, unused_mut, @@ -71,9 +61,7 @@ impl<'a> LocalsSafety<'a> { } struct Context<'a, 'b> { - env: &'a CompilationEnv, - info: &'a TypingProgramInfo, - package: Option, + outer: &'a CFGContext<'a>, local_types: &'a UniqueMap, unused_mut: &'a mut BTreeMap, local_states: &'b mut LocalStates, @@ -83,15 +71,12 @@ struct Context<'a, 'b> { impl<'a, 'b> Context<'a, 'b> { fn new(locals_safety: &'a mut LocalsSafety, local_states: &'b mut LocalStates) -> Self { - let env = locals_safety.env; - let info = locals_safety.info; + let outer = locals_safety.context; let local_types = locals_safety.local_types; let signature = locals_safety.signature; let unused_mut = &mut locals_safety.unused_mut; Self { - env, - info, - package: locals_safety.package, + outer, local_types, unused_mut, local_states, @@ -154,18 +139,18 @@ impl<'a, 'b> Context<'a, 'b> { // .unwrap(); fn datatype_decl_loc(&self, m: &ModuleIdent, n: &DatatypeName) -> Loc { - let kind = self.info.datatype_kind(m, n); + let kind = self.outer.info.datatype_kind(m, n); match kind { - DatatypeKind::Struct => self.info.struct_declared_loc(m, n), - DatatypeKind::Enum => self.info.enum_declared_loc(m, n), + DatatypeKind::Struct => self.outer.info.struct_declared_loc(m, n), + DatatypeKind::Enum => self.outer.info.enum_declared_loc(m, n), } } fn datatype_declared_abilities(&self, m: &ModuleIdent, n: &DatatypeName) -> &'a AbilitySet { - let kind = self.info.datatype_kind(m, n); + let kind = self.outer.info.datatype_kind(m, n); match kind { - DatatypeKind::Struct => self.info.struct_declared_abilities(m, n), - DatatypeKind::Enum => self.info.enum_declared_abilities(m, n), + DatatypeKind::Struct => self.outer.info.struct_declared_abilities(m, n), + DatatypeKind::Enum => self.outer.info.enum_declared_abilities(m, n), } } } @@ -189,7 +174,6 @@ impl<'a> TransferFunctions for LocalsSafety<'a> { impl<'a> AbstractInterpreter for LocalsSafety<'a> {} pub fn verify( - compilation_env: &mut CompilationEnv, context: &super::CFGContext, cfg: &super::cfg::MutForwardCFG, ) -> BTreeMap { @@ -197,22 +181,16 @@ pub fn verify( signature, locals, .. } = context; let initial_state = LocalStates::initial(&signature.parameters, locals); - let mut locals_safety = LocalsSafety::new( - compilation_env, - context.info, - context.package, - locals, - signature, - ); + let mut locals_safety = LocalsSafety::new(context, locals, signature); let (final_state, ds) = locals_safety.analyze_function(cfg, initial_state); - unused_let_muts(compilation_env, locals, locals_safety.unused_mut); - compilation_env.add_diags(ds); + unused_let_muts(context, locals, locals_safety.unused_mut); + context.add_diags(ds); final_state } /// Generates warnings for unused mut declarations fn unused_let_muts( - env: &mut CompilationEnv, + context: &CFGContext, locals: &UniqueMap, unused_mut_locals: BTreeMap, ) { @@ -226,7 +204,7 @@ fn unused_let_muts( let decl_loc = *locals.get_loc(&v).unwrap(); let decl_msg = format!("The variable '{vstr}' is never used mutably"); let mut_msg = "Consider removing the 'mut' declaration here"; - env.add_diag(diag!( + context.add_diag(diag!( UnusedItem::MutModifier, (decl_loc, decl_msg), (mut_loc, mut_msg) @@ -524,7 +502,7 @@ fn check_mutability( let usage_msg = format!("Invalid {usage} of immutable variable '{vstr}'"); let decl_msg = format!("To use the variable mutably, it must be declared 'mut', e.g. 'mut {vstr}'"); - if context.env.edition(context.package) == Edition::E2024_MIGRATION { + if context.outer.env.edition(context.outer.package) == Edition::E2024_MIGRATION { context.add_diag(diag!(Migration::NeedsLetMut, (decl_loc, decl_msg.clone()))) } else { let mut diag = diag!( diff --git a/external-crates/move/crates/move-compiler/src/cfgir/mod.rs b/external-crates/move/crates/move-compiler/src/cfgir/mod.rs index 958e2a3abb3fc..5dd26eca887e6 100644 --- a/external-crates/move/crates/move-compiler/src/cfgir/mod.rs +++ b/external-crates/move/crates/move-compiler/src/cfgir/mod.rs @@ -15,6 +15,7 @@ pub mod visitor; mod optimize; use crate::{ + diagnostics::warning_filters::WarningFiltersScope, expansion::ast::{Attributes, ModuleIdent, Mutability}, hlir::ast::{FunctionSignature, Label, SingleType, Var, Visibility}, shared::{program_info::TypingProgramInfo, unique_map::UniqueMap, CompilationEnv, Name}, @@ -26,6 +27,8 @@ use optimize::optimize; use std::collections::BTreeSet; pub struct CFGContext<'a> { + pub env: &'a CompilationEnv, + pub warning_filters_scope: WarningFiltersScope, pub info: &'a TypingProgramInfo, pub package: Option, pub module: ModuleIdent, @@ -43,16 +46,22 @@ pub enum MemberName { Function(Name), } -pub fn refine_inference_and_verify( - env: &mut CompilationEnv, - context: &CFGContext, - cfg: &mut MutForwardCFG, -) { - liveness::last_usage(env, context, cfg); - let locals_states = locals::verify(env, context, cfg); +pub fn refine_inference_and_verify(context: &CFGContext, cfg: &mut MutForwardCFG) { + liveness::last_usage(context, cfg); + let locals_states = locals::verify(context, cfg); liveness::release_dead_refs(context, &locals_states, cfg); - borrows::verify(env, context, cfg); + borrows::verify(context, cfg); +} + +impl CFGContext<'_> { + fn add_diag(&self, diag: crate::diagnostics::Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + fn add_diags(&self, diags: crate::diagnostics::Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } } impl MemberName { diff --git a/external-crates/move/crates/move-compiler/src/cfgir/optimize/mod.rs b/external-crates/move/crates/move-compiler/src/cfgir/optimize/mod.rs index db73ab7f19c72..72cd2c0e72ffc 100644 --- a/external-crates/move/crates/move-compiler/src/cfgir/optimize/mod.rs +++ b/external-crates/move/crates/move-compiler/src/cfgir/optimize/mod.rs @@ -43,7 +43,7 @@ const MOVE_2024_OPTIMIZATIONS: &[Optimization] = &[ #[growing_stack] pub fn optimize( - env: &mut CompilationEnv, + env: &CompilationEnv, package: Option, signature: &FunctionSignature, locals: &UniqueMap, diff --git a/external-crates/move/crates/move-compiler/src/cfgir/translate.rs b/external-crates/move/crates/move-compiler/src/cfgir/translate.rs index 93ec88edf4396..fb9692eacd400 100644 --- a/external-crates/move/crates/move-compiler/src/cfgir/translate.rs +++ b/external-crates/move/crates/move-compiler/src/cfgir/translate.rs @@ -10,9 +10,13 @@ use crate::{ visitor::{CFGIRVisitor, CFGIRVisitorConstructor, CFGIRVisitorContext}, }, diag, - diagnostics::Diagnostics, + diagnostics::{ + warning_filters::{WarningFilters, WarningFiltersScope}, + Diagnostic, Diagnostics, + }, expansion::ast::{Attributes, ModuleIdent, Mutability}, hlir::ast::{self as H, BlockLabel, Label, Value, Value_, Var}, + ice_assert, parser::ast::{ConstantName, FunctionName}, shared::{program_info::TypingProgramInfo, unique_map::UniqueMap, CompilationEnv}, FullyCompiledProgram, @@ -42,8 +46,9 @@ enum NamedBlockType { } struct Context<'env> { - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, info: &'env TypingProgramInfo, + warning_filters_scope: WarningFiltersScope, current_package: Option, label_count: usize, named_blocks: UniqueMap, @@ -52,9 +57,11 @@ struct Context<'env> { } impl<'env> Context<'env> { - pub fn new(env: &'env mut CompilationEnv, info: &'env TypingProgramInfo) -> Self { + pub fn new(env: &'env CompilationEnv, info: &'env TypingProgramInfo) -> Self { + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); Context { env, + warning_filters_scope, info, current_package: None, label_count: 0, @@ -63,6 +70,22 @@ impl<'env> Context<'env> { } } + pub fn add_diag(&self, diag: Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + pub fn add_diags(&self, diags: Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } + + pub fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) + } + + pub fn pop_warning_filter_scope(&mut self) { + self.warning_filters_scope.pop() + } + fn new_label(&mut self) -> Label { let count = self.label_count; self.label_count += 1; @@ -121,7 +144,7 @@ impl<'env> Context<'env> { //************************************************************************************************** pub fn program( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, _pre_compiled_lib: Option>, prog: H::Program, ) -> G::Program { @@ -170,10 +193,10 @@ fn module( constants: hconstants, } = mdef; context.current_package = package_name; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let constants = constants(context, module_ident, hconstants); let functions = hfunctions.map(|name, f| function(context, module_ident, name, f)); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); context.current_package = None; ( module_ident, @@ -238,7 +261,7 @@ fn constants( "Cyclic constant defined here", )); } - context.env.add_diag(diag); + context.add_diag(diag); cycle_nodes.append(&mut scc.into_iter().collect()); } } @@ -251,7 +274,7 @@ fn constants( .filter(|node| !cycle_nodes.contains(node) && graph.contains_node(*node)) .collect(); for node in neighbors { - context.env.add_diag(diag!( + context.add_diag(diag!( BytecodeGeneration::UnfoldableConstant, ( *consts.get_loc(&node).unwrap(), @@ -402,7 +425,7 @@ fn constant( value: (locals, block), } = c; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let final_value = constant_( context, constant_values, @@ -427,7 +450,7 @@ fn constant( _ => None, }; - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); G::Constant { warning_filter, index, @@ -471,6 +494,8 @@ fn constant_( }; let fake_infinite_loop_starts = BTreeSet::new(); let function_context = super::CFGContext { + env: context.env, + warning_filters_scope: context.warning_filters_scope.clone(), info: context.info, package: context.current_package, module, @@ -482,9 +507,11 @@ fn constant_( locals: &locals, infinite_loop_starts: &fake_infinite_loop_starts, }; - cfgir::refine_inference_and_verify(context.env, &function_context, &mut cfg); - assert!( + cfgir::refine_inference_and_verify(&function_context, &mut cfg); + ice_assert!( + context.env, num_previous_errors == context.env.count_diags(), + full_loc, "{}", ICE_MSG ); @@ -498,7 +525,7 @@ fn constant_( ); if blocks.len() != 1 { - context.env.add_diag(diag!( + context.add_diag(diag!( BytecodeGeneration::UnfoldableConstant, (full_loc, CANNOT_FOLD) )); @@ -510,7 +537,7 @@ fn constant_( let e = match cmd_ { C::IgnoreAndPop { exp, .. } => exp, _ => { - context.env.add_diag(diag!( + context.add_diag(diag!( BytecodeGeneration::UnfoldableConstant, (*cloc, CANNOT_FOLD) )); @@ -532,7 +559,7 @@ fn check_constant_value(context: &mut Context, e: &H::Exp) { use H::UnannotatedExp_ as E; match &e.exp.value { E::Value(_) => (), - _ => context.env.add_diag(diag!( + _ => context.add_diag(diag!( BytecodeGeneration::UnfoldableConstant, (e.exp.loc, CANNOT_FOLD) )), @@ -579,7 +606,7 @@ fn function( signature, body, } = f; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let body = function_body( context, module, @@ -590,7 +617,7 @@ fn function( &signature, body, ); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); G::Function { warning_filter, index, @@ -627,9 +654,11 @@ fn function_body( let (mut cfg, infinite_loop_starts, diags) = MutForwardCFG::new(start, &mut blocks, binfo); - context.env.add_diags(diags); + context.add_diags(diags); let function_context = super::CFGContext { + env: context.env, + warning_filters_scope: context.warning_filters_scope.clone(), info: context.info, package: context.current_package, module, @@ -641,7 +670,7 @@ fn function_body( locals: &locals, infinite_loop_starts: &infinite_loop_starts, }; - cfgir::refine_inference_and_verify(context.env, &function_context, &mut cfg); + cfgir::refine_inference_and_verify(&function_context, &mut cfg); // do not optimize if there are errors, warnings are okay if !context.env.has_errors() { cfgir::optimize( @@ -977,7 +1006,8 @@ fn visit_program(context: &mut Context, prog: &mut G::Program) { struct AbsintVisitor; struct AbsintVisitorContext<'a> { - env: &'a mut CompilationEnv, + env: &'a CompilationEnv, + warning_filters_scope: WarningFiltersScope, info: Arc, current_package: Option, } @@ -985,22 +1015,35 @@ struct AbsintVisitorContext<'a> { impl CFGIRVisitorConstructor for AbsintVisitor { type Context<'a> = AbsintVisitorContext<'a>; - fn context<'a>(env: &'a mut CompilationEnv, program: &G::Program) -> Self::Context<'a> { + fn context<'a>(env: &'a CompilationEnv, program: &G::Program) -> Self::Context<'a> { + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); AbsintVisitorContext { env, + warning_filters_scope, info: program.info.clone(), current_package: None, } } } +impl AbsintVisitorContext<'_> { + #[allow(unused)] + fn add_diag(&self, diag: crate::diagnostics::Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + fn add_diags(&self, diags: crate::diagnostics::Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } +} + impl<'a> CFGIRVisitorContext for AbsintVisitorContext<'a> { - fn add_warning_filter_scope(&mut self, filter: crate::diagnostics::WarningFilters) { - self.env.add_warning_filter_scope(filter) + fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) } fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() + self.warning_filters_scope.pop() } fn visit_module_custom(&mut self, _ident: ModuleIdent, mdef: &G::ModuleDefinition) -> bool { @@ -1035,6 +1078,8 @@ impl<'a> CFGIRVisitorContext for AbsintVisitorContext<'a> { }; let (cfg, infinite_loop_starts) = ImmForwardCFG::new(*start, blocks, block_info.iter()); let function_context = super::CFGContext { + env: self.env, + warning_filters_scope: self.warning_filters_scope.clone(), info: &self.info, package: self.current_package, module: mident, @@ -1048,9 +1093,9 @@ impl<'a> CFGIRVisitorContext for AbsintVisitorContext<'a> { }; let mut ds = Diagnostics::new(); for v in &self.env.visitors().abs_int { - ds.extend(v.verify(self.env, &function_context, &cfg)); + ds.extend(v.verify(&function_context, &cfg)); } - self.env.add_diags(ds); + self.add_diags(ds); true } } diff --git a/external-crates/move/crates/move-compiler/src/cfgir/visitor.rs b/external-crates/move/crates/move-compiler/src/cfgir/visitor.rs index 1ec55b3cd3f82..7bebfa4435b02 100644 --- a/external-crates/move/crates/move-compiler/src/cfgir/visitor.rs +++ b/external-crates/move/crates/move-compiler/src/cfgir/visitor.rs @@ -11,7 +11,7 @@ use crate::{ CFGContext, }, command_line::compiler::Visitor, - diagnostics::{Diagnostic, Diagnostics, WarningFilters}, + diagnostics::{warning_filters::WarningFilters, Diagnostic, Diagnostics}, expansion::ast::ModuleIdent, hlir::ast::{self as H, Command, Exp, LValue, LValue_, Label, ModuleCall, Type, Type_, Var}, parser::ast::{ConstantName, DatatypeName, FunctionName}, @@ -24,7 +24,7 @@ pub type AbsIntVisitorObj = Box; pub type CFGIRVisitorObj = Box; pub trait CFGIRVisitor: Send + Sync { - fn visit(&self, env: &mut CompilationEnv, program: &G::Program); + fn visit(&self, env: &CompilationEnv, program: &G::Program); fn visitor(self) -> Visitor where @@ -35,12 +35,7 @@ pub trait CFGIRVisitor: Send + Sync { } pub trait AbstractInterpreterVisitor: Send + Sync { - fn verify( - &self, - env: &CompilationEnv, - context: &CFGContext, - cfg: &ImmForwardCFG, - ) -> Diagnostics; + fn verify(&self, context: &CFGContext, cfg: &ImmForwardCFG) -> Diagnostics; fn visitor(self) -> Visitor where @@ -57,16 +52,16 @@ pub trait AbstractInterpreterVisitor: Send + Sync { pub trait CFGIRVisitorConstructor: Send { type Context<'a>: Sized + CFGIRVisitorContext; - fn context<'a>(env: &'a mut CompilationEnv, program: &G::Program) -> Self::Context<'a>; + fn context<'a>(env: &'a CompilationEnv, program: &G::Program) -> Self::Context<'a>; - fn visit(env: &mut CompilationEnv, program: &G::Program) { + fn visit(env: &CompilationEnv, program: &G::Program) { let mut context = Self::context(env, program); context.visit(program); } } pub trait CFGIRVisitorContext { - fn add_warning_filter_scope(&mut self, filter: WarningFilters); + fn push_warning_filter_scope(&mut self, filters: WarningFilters); fn pop_warning_filter_scope(&mut self); fn visit_module_custom(&mut self, _ident: ModuleIdent, _mdef: &G::ModuleDefinition) -> bool { @@ -78,7 +73,7 @@ pub trait CFGIRVisitorContext { /// required. fn visit(&mut self, program: &G::Program) { for (mident, mdef) in program.modules.key_cloned_iter() { - self.add_warning_filter_scope(mdef.warning_filter.clone()); + self.push_warning_filter_scope(mdef.warning_filter.clone()); if self.visit_module_custom(mident, mdef) { self.pop_warning_filter_scope(); continue; @@ -117,7 +112,7 @@ pub trait CFGIRVisitorContext { struct_name: DatatypeName, sdef: &H::StructDefinition, ) { - self.add_warning_filter_scope(sdef.warning_filter.clone()); + self.push_warning_filter_scope(sdef.warning_filter.clone()); if self.visit_struct_custom(module, struct_name, sdef) { self.pop_warning_filter_scope(); return; @@ -139,7 +134,7 @@ pub trait CFGIRVisitorContext { enum_name: DatatypeName, edef: &H::EnumDefinition, ) { - self.add_warning_filter_scope(edef.warning_filter.clone()); + self.push_warning_filter_scope(edef.warning_filter.clone()); if self.visit_enum_custom(module, enum_name, edef) { self.pop_warning_filter_scope(); return; @@ -161,7 +156,7 @@ pub trait CFGIRVisitorContext { constant_name: ConstantName, cdef: &G::Constant, ) { - self.add_warning_filter_scope(cdef.warning_filter.clone()); + self.push_warning_filter_scope(cdef.warning_filter.clone()); if self.visit_constant_custom(module, constant_name, cdef) { self.pop_warning_filter_scope(); return; @@ -183,7 +178,7 @@ pub trait CFGIRVisitorContext { function_name: FunctionName, fdef: &G::Function, ) { - self.add_warning_filter_scope(fdef.warning_filter.clone()); + self.push_warning_filter_scope(fdef.warning_filter.clone()); if self.visit_function_custom(module, function_name, fdef) { self.pop_warning_filter_scope(); return; @@ -322,11 +317,62 @@ impl From for CFGIRVisitorObj { } impl CFGIRVisitor for V { - fn visit(&self, env: &mut CompilationEnv, program: &G::Program) { + fn visit(&self, env: &CompilationEnv, program: &G::Program) { Self::visit(env, program) } } +macro_rules! simple_visitor { + ($visitor:ident, $($overrides:item),*) => { + pub struct $visitor; + + pub struct Context<'a> { + env: &'a crate::shared::CompilationEnv, + warning_filters_scope: crate::diagnostics::warning_filters::WarningFiltersScope, + } + + impl crate::cfgir::visitor::CFGIRVisitorConstructor for $visitor { + type Context<'a> = Context<'a>; + + fn context<'a>(env: &'a crate::shared::CompilationEnv, _program: &crate::cfgir::ast::Program) -> Self::Context<'a> { + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); + Context { + env, + warning_filters_scope, + } + } + } + + impl Context<'_> { + #[allow(unused)] + fn add_diag(&self, diag: crate::diagnostics::Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + #[allow(unused)] + fn add_diags(&self, diags: crate::diagnostics::Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } + } + + impl crate::cfgir::visitor::CFGIRVisitorContext for Context<'_> { + fn push_warning_filter_scope( + &mut self, + filters: crate::diagnostics::warning_filters::WarningFilters, + ) { + self.warning_filters_scope.push(filters) + } + + fn pop_warning_filter_scope(&mut self) { + self.warning_filters_scope.pop() + } + + $($overrides)* + } + } +} +pub(crate) use simple_visitor; + //************************************************************************************************** // simple absint visitor //************************************************************************************************** @@ -448,13 +494,12 @@ pub trait SimpleAbsIntConstructor: Sized { /// Given the initial state/domain, construct a new abstract interpreter. /// Return None if it should not be run given this context fn new<'a>( - env: &CompilationEnv, context: &'a CFGContext<'a>, cfg: &ImmForwardCFG, init_state: &mut as SimpleAbsInt>::State, ) -> Option>; - fn verify(env: &CompilationEnv, context: &CFGContext, cfg: &ImmForwardCFG) -> Diagnostics { + fn verify(context: &CFGContext, cfg: &ImmForwardCFG) -> Diagnostics { let mut locals = context .locals .key_cloned_iter() @@ -473,7 +518,7 @@ pub trait SimpleAbsIntConstructor: Sized { ); } let mut init_state = as SimpleAbsInt>::State::new(context, locals); - let Some(mut ai) = Self::new(env, context, cfg, &mut init_state) else { + let Some(mut ai) = Self::new(context, cfg, &mut init_state) else { return Diagnostics::new(); }; let (final_state, ds) = ai.analyze_function(cfg, init_state); @@ -760,13 +805,8 @@ impl From for AbsIntVisitorObj { } impl AbstractInterpreterVisitor for V { - fn verify( - &self, - env: &CompilationEnv, - context: &CFGContext, - cfg: &ImmForwardCFG, - ) -> Diagnostics { - ::verify(env, context, cfg) + fn verify(&self, context: &CFGContext, cfg: &ImmForwardCFG) -> Diagnostics { + ::verify(context, cfg) } } diff --git a/external-crates/move/crates/move-compiler/src/command_line/compiler.rs b/external-crates/move/crates/move-compiler/src/command_line/compiler.rs index 5ab1794628a81..22e7521153301 100644 --- a/external-crates/move/crates/move-compiler/src/command_line/compiler.rs +++ b/external-crates/move/crates/move-compiler/src/command_line/compiler.rs @@ -10,7 +10,8 @@ use crate::{ command_line::{DEFAULT_OUTPUT_DIR, MOVE_COMPILED_INTERFACES_DIR}, compiled_unit::{self, AnnotatedCompiledUnit}, diagnostics::{ - codes::{Severity, WarningFilter}, + codes::Severity, + warning_filters::{WarningFilter, WarningFilters}, *, }, editions::Edition, @@ -376,17 +377,19 @@ impl Compiler { interface_files_dir_opt, &compiled_module_named_address_mapping, )?; - let mut compilation_env = - CompilationEnv::new(flags, visitors, save_hooks, package_configs, default_config); - if let Some(filter) = warning_filter { - compilation_env.add_warning_filter_scope(filter); - } + let mut compilation_env = CompilationEnv::new( + flags, + visitors, + save_hooks, + warning_filter, + package_configs, + default_config, + ); for (prefix, filters) in known_warning_filters { compilation_env.add_custom_known_filters(prefix, filters)?; } - let (source_text, pprog, comments) = - parse_program(&mut compilation_env, maps, targets, deps)?; + let (source_text, pprog, comments) = parse_program(&compilation_env, maps, targets, deps)?; for (fhash, (fname, contents)) in &source_text { // TODO better support for bytecode interface file paths @@ -480,12 +483,12 @@ impl SteppedCompiler

{ "Invalid pass for run_to. Target pass precedes the current pass" ); let Self { - mut compilation_env, + compilation_env, pre_compiled_lib, program, } = self; let new_prog = run( - &mut compilation_env, + &compilation_env, pre_compiled_lib.clone(), program.unwrap(), TARGET, @@ -498,10 +501,7 @@ impl SteppedCompiler

{ }) } - pub fn compilation_env(&mut self) -> &mut CompilationEnv { - &mut self.compilation_env - } - pub fn compilation_env_ref(&self) -> &CompilationEnv { + pub fn compilation_env(&self) -> &CompilationEnv { &self.compilation_env } } @@ -657,9 +657,9 @@ pub fn construct_pre_compiled_lib, NamedAddress: Into Ok(Err((files, errors))), Ok(PassResult::Compilation(compiled, _)) => Ok(Ok(FullyCompiledProgram { files, @@ -886,7 +886,7 @@ pub fn move_check_for_errors( ) -> Result<(Vec, Diagnostics), (Pass, Diagnostics)> { let (_, compiler) = comments_and_compiler_res?; - let (mut compiler, cfgir) = compiler.run::()?.into_ast(); + let (compiler, cfgir) = compiler.run::()?.into_ast(); let compilation_env = compiler.compilation_env(); if compilation_env.flags().is_testing() { unit_test::plan_builder::construct_test_plan(compilation_env, None, &cfgir); @@ -922,7 +922,7 @@ impl PassResult { } } - pub fn save(&self, compilation_env: &mut CompilationEnv) { + pub fn save(&self, compilation_env: &CompilationEnv) { match self { PassResult::Parser(prog) => { compilation_env.save_parser_ast(prog); @@ -949,14 +949,14 @@ impl PassResult { } fn run( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, pre_compiled_lib: Option>, cur: PassResult, until: Pass, ) -> Result { #[growing_stack] fn rec( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, pre_compiled_lib: Option>, cur: PassResult, until: Pass, diff --git a/external-crates/move/crates/move-compiler/src/diagnostics/codes.rs b/external-crates/move/crates/move-compiler/src/diagnostics/codes.rs index a3d73b45893f9..436f81cfd82f3 100644 --- a/external-crates/move/crates/move-compiler/src/diagnostics/codes.rs +++ b/external-crates/move/crates/move-compiler/src/diagnostics/codes.rs @@ -6,8 +6,6 @@ // Main types //************************************************************************************************** -use crate::shared::FILTER_ALL; - #[derive(PartialEq, Eq, Clone, Copy, Debug, Hash, PartialOrd, Ord)] pub enum Severity { Note = 0, @@ -20,8 +18,6 @@ pub enum Severity { /// A an optional prefix to distinguish between different types of warnings (internal vs. possibly /// multiple externally provided ones). pub type ExternalPrefix = Option<&'static str>; -/// The name for a well-known filter. -pub type WellKnownFilterName = &'static str; /// The ID for a diagnostic, consisting of an optional prefix, a category, and a code. pub type DiagnosticsID = (ExternalPrefix, u8, u8); @@ -55,26 +51,6 @@ pub(crate) trait DiagnosticCode: Copy { } } -#[derive(PartialEq, Eq, Clone, Copy, Debug, PartialOrd, Ord)] -/// Represents a single annotation for a diagnostic filter -pub enum WarningFilter { - /// Filters all warnings - All(ExternalPrefix), - /// Filters all warnings of a specific category. Only known filters have names. - Category { - prefix: ExternalPrefix, - category: u8, - name: Option, - }, - /// Filters a single warning, as defined by codes below. Only known filters have names. - Code { - prefix: ExternalPrefix, - category: u8, - code: u8, - name: Option, - }, -} - //************************************************************************************************** // Categories and Codes //************************************************************************************************** @@ -387,45 +363,6 @@ codes!( ], ); -//************************************************************************************************** -// Warning Filter -//************************************************************************************************** - -impl WarningFilter { - pub fn to_str(self) -> Option<&'static str> { - match self { - Self::All(_) => Some(FILTER_ALL), - Self::Category { name, .. } | Self::Code { name, .. } => name, - } - } - - pub fn code( - prefix: ExternalPrefix, - category: u8, - code: u8, - name: Option, - ) -> Self { - Self::Code { - prefix, - category, - code, - name, - } - } - - pub fn category( - prefix: ExternalPrefix, - category: u8, - name: Option, - ) -> Self { - Self::Category { - prefix, - category, - name, - } - } -} - //************************************************************************************************** // impls //************************************************************************************************** diff --git a/external-crates/move/crates/move-compiler/src/diagnostics/mod.rs b/external-crates/move/crates/move-compiler/src/diagnostics/mod.rs index 3c7cbe46de462..5c64209378d30 100644 --- a/external-crates/move/crates/move-compiler/src/diagnostics/mod.rs +++ b/external-crates/move/crates/move-compiler/src/diagnostics/mod.rs @@ -3,19 +3,12 @@ // SPDX-License-Identifier: Apache-2.0 pub mod codes; +pub mod warning_filters; use crate::{ command_line::COLOR_MODE_ENV_VAR, - diagnostics::codes::{ - Category, DiagnosticCode, DiagnosticInfo, ExternalPrefix, Severity, WarningFilter, - WellKnownFilterName, - }, - shared::{ - ast_debug::AstDebug, - files::{ByteSpan, FileByteSpan, FileId, MappedFiles}, - known_attributes, FILTER_UNUSED_CONST, FILTER_UNUSED_FUNCTION, FILTER_UNUSED_MUT_PARAM, - FILTER_UNUSED_MUT_REF, FILTER_UNUSED_STRUCT_FIELD, FILTER_UNUSED_TYPE_PARAMETER, - }, + diagnostics::codes::{Category, DiagnosticCode, DiagnosticInfo, Severity}, + shared::files::{ByteSpan, FileByteSpan, FileId, MappedFiles}, }; use codespan_reporting::{ self as csr, @@ -37,8 +30,6 @@ use std::{ path::PathBuf, }; -use self::codes::UnusedItem; - //************************************************************************************************** // Types //************************************************************************************************** @@ -84,28 +75,6 @@ struct JsonDiagnostic { msg: String, } -#[derive(PartialEq, Eq, Clone, Debug)] -/// Used to filter out diagnostics, specifically used for warning suppression -pub struct WarningFilters { - filters: BTreeMap, - for_dependency: bool, // if false, the filters are used for source code -} - -#[derive(PartialEq, Eq, Clone, Debug)] -/// Filters split by category and code -enum UnprefixedWarningFilters { - /// Remove all warnings - All, - Specified { - /// Remove all diags of this category with optional known name - categories: BTreeMap>, - /// Remove specific diags with optional known filter name - codes: BTreeMap<(u8, u8), Option>, - }, - /// No filter - Empty, -} - #[derive(PartialEq, Eq, Clone, Debug, PartialOrd, Ord)] enum MigrationChange { AddMut, @@ -776,7 +745,7 @@ macro_rules! ice { macro_rules! ice_assert { ($env: expr, $cond: expr, $loc: expr, $($arg:tt)*) => {{ if !$cond { - $env.add_diag($crate::ice!(( + $env.add_error_diag($crate::ice!(( $loc, format!($($arg)*), ))); @@ -797,180 +766,6 @@ pub fn print_stack_trace() { } } -impl WarningFilters { - pub fn new_for_source() -> Self { - Self { - filters: BTreeMap::new(), - for_dependency: false, - } - } - - pub fn new_for_dependency() -> Self { - Self { - filters: BTreeMap::new(), - for_dependency: true, - } - } - - pub fn is_filtered(&self, diag: &Diagnostic) -> bool { - self.is_filtered_by_info(&diag.info) - } - - fn is_filtered_by_info(&self, info: &DiagnosticInfo) -> bool { - let prefix = info.external_prefix(); - self.filters - .get(&prefix) - .is_some_and(|filters| filters.is_filtered_by_info(info)) - } - - pub fn union(&mut self, other: &Self) { - for (prefix, filters) in &other.filters { - self.filters - .entry(*prefix) - .or_insert_with(UnprefixedWarningFilters::new) - .union(filters); - } - // if there is a dependency code filter on the stack, it means we are filtering dependent - // code and this information must be preserved when stacking up additional filters (which - // involves union of the current filter with the new one) - self.for_dependency = self.for_dependency || other.for_dependency; - } - - pub fn add(&mut self, filter: WarningFilter) { - let (prefix, category, code, name) = match filter { - WarningFilter::All(prefix) => { - self.filters.insert(prefix, UnprefixedWarningFilters::All); - return; - } - WarningFilter::Category { - prefix, - category, - name, - } => (prefix, category, None, name), - WarningFilter::Code { - prefix, - category, - code, - name, - } => (prefix, category, Some(code), name), - }; - self.filters - .entry(prefix) - .or_insert(UnprefixedWarningFilters::Empty) - .add(category, code, name) - } - - pub fn unused_warnings_filter_for_test() -> Self { - Self { - filters: BTreeMap::from([( - None, - UnprefixedWarningFilters::unused_warnings_filter_for_test(), - )]), - for_dependency: false, - } - } - - pub fn for_dependency(&self) -> bool { - self.for_dependency - } -} - -impl UnprefixedWarningFilters { - pub fn new() -> Self { - Self::Empty - } - - fn is_filtered_by_info(&self, info: &DiagnosticInfo) -> bool { - match self { - Self::All => info.severity() <= Severity::Warning, - Self::Specified { categories, codes } => { - info.severity() <= Severity::Warning - && (categories.contains_key(&info.category()) - || codes.contains_key(&(info.category(), info.code()))) - } - Self::Empty => false, - } - } - - pub fn union(&mut self, other: &Self) { - match (self, other) { - // if self is empty, just take the other filter - (s @ Self::Empty, _) => *s = other.clone(), - // if other is empty, or self is ALL, no change to the filter - (_, Self::Empty) => (), - (Self::All, _) => (), - // if other is all, self is now all - (s, Self::All) => *s = Self::All, - // category and code level union - ( - Self::Specified { categories, codes }, - Self::Specified { - categories: other_categories, - codes: other_codes, - }, - ) => { - categories.extend(other_categories); - // remove any codes covered by the category level filter - codes.extend( - other_codes - .iter() - .filter(|((category, _), _)| !categories.contains_key(category)), - ); - } - } - } - - /// Add a specific filter to the filter map. - /// If filter_code is None, then the filter applies to all codes in the filter_category. - fn add( - &mut self, - filter_category: u8, - filter_code: Option, - filter_name: Option, - ) { - match self { - Self::All => (), - Self::Empty => { - *self = Self::Specified { - categories: BTreeMap::new(), - codes: BTreeMap::new(), - }; - self.add(filter_category, filter_code, filter_name) - } - Self::Specified { categories, .. } if categories.contains_key(&filter_category) => (), - Self::Specified { categories, codes } => { - if let Some(filter_code) = filter_code { - codes.insert((filter_category, filter_code), filter_name); - } else { - categories.insert(filter_category, filter_name); - codes.retain(|(category, _), _| *category != filter_category); - } - } - } - } - - pub fn unused_warnings_filter_for_test() -> Self { - let filtered_codes = [ - (UnusedItem::Function, FILTER_UNUSED_FUNCTION), - (UnusedItem::StructField, FILTER_UNUSED_STRUCT_FIELD), - (UnusedItem::FunTypeParam, FILTER_UNUSED_TYPE_PARAMETER), - (UnusedItem::Constant, FILTER_UNUSED_CONST), - (UnusedItem::MutReference, FILTER_UNUSED_MUT_REF), - (UnusedItem::MutParam, FILTER_UNUSED_MUT_PARAM), - ] - .into_iter() - .map(|(item, filter)| { - let info = item.into_info(); - ((info.category(), info.code()), Some(filter)) - }) - .collect(); - Self::Specified { - categories: BTreeMap::new(), - codes: filtered_codes, - } - } -} - impl Migration { pub fn new( mapped_files: MappedFiles, @@ -1207,43 +1002,6 @@ impl From> for Diagnostics { } } -impl AstDebug for WarningFilters { - fn ast_debug(&self, w: &mut crate::shared::ast_debug::AstWriter) { - for (prefix, filters) in &self.filters { - let prefix_str = prefix.unwrap_or(known_attributes::DiagnosticAttribute::ALLOW); - match filters { - UnprefixedWarningFilters::All => w.write(format!( - "#[{}({})]", - prefix_str, - WarningFilter::All(*prefix).to_str().unwrap(), - )), - UnprefixedWarningFilters::Specified { categories, codes } => { - w.write(format!("#[{}(", prefix_str)); - let items = categories - .iter() - .map(|(cat, n)| WarningFilter::Category { - prefix: *prefix, - category: *cat, - name: *n, - }) - .chain(codes.iter().map(|((cat, code), n)| WarningFilter::Code { - prefix: *prefix, - category: *cat, - code: *code, - name: *n, - })); - w.list(items, ",", |w, filter| { - w.write(filter.to_str().unwrap()); - false - }); - w.write(")]") - } - UnprefixedWarningFilters::Empty => (), - } - } - } -} - impl From for DiagnosticInfo { fn from(value: C) -> Self { value.into_info() diff --git a/external-crates/move/crates/move-compiler/src/diagnostics/warning_filters.rs b/external-crates/move/crates/move-compiler/src/diagnostics/warning_filters.rs new file mode 100644 index 0000000000000..556277f5c5238 --- /dev/null +++ b/external-crates/move/crates/move-compiler/src/diagnostics/warning_filters.rs @@ -0,0 +1,442 @@ +use crate::{ + diagnostics::{ + codes::{Category, DiagnosticInfo, ExternalPrefix, Severity, UnusedItem}, + Diagnostic, DiagnosticCode, + }, + shared::{known_attributes, AstDebug}, +}; +use move_symbol_pool::Symbol; +use std::collections::{BTreeMap, BTreeSet}; + +pub const FILTER_ALL: &str = "all"; +pub const FILTER_UNUSED: &str = "unused"; +pub const FILTER_MISSING_PHANTOM: &str = "missing_phantom"; +pub const FILTER_UNUSED_USE: &str = "unused_use"; +pub const FILTER_UNUSED_VARIABLE: &str = "unused_variable"; +pub const FILTER_UNUSED_ASSIGNMENT: &str = "unused_assignment"; +pub const FILTER_UNUSED_TRAILING_SEMI: &str = "unused_trailing_semi"; +pub const FILTER_UNUSED_ATTRIBUTE: &str = "unused_attribute"; +pub const FILTER_UNUSED_TYPE_PARAMETER: &str = "unused_type_parameter"; +pub const FILTER_UNUSED_FUNCTION: &str = "unused_function"; +pub const FILTER_UNUSED_STRUCT_FIELD: &str = "unused_field"; +pub const FILTER_UNUSED_CONST: &str = "unused_const"; +pub const FILTER_DEAD_CODE: &str = "dead_code"; +pub const FILTER_UNUSED_LET_MUT: &str = "unused_let_mut"; +pub const FILTER_UNUSED_MUT_REF: &str = "unused_mut_ref"; +pub const FILTER_UNUSED_MUT_PARAM: &str = "unused_mut_parameter"; +pub const FILTER_IMPLICIT_CONST_COPY: &str = "implicit_const_copy"; +pub const FILTER_DUPLICATE_ALIAS: &str = "duplicate_alias"; +pub const FILTER_DEPRECATED: &str = "deprecated_usage"; +pub const FILTER_IDE_PATH_AUTOCOMPLETE: &str = "ide_path_autocomplete"; +pub const FILTER_IDE_DOT_AUTOCOMPLETE: &str = "ide_dot_autocomplete"; + +macro_rules! known_code_filter { + ($name:ident, $category:ident::$code:ident) => {{ + use crate::diagnostics::codes::*; + ( + move_symbol_pool::Symbol::from($name), + std::collections::BTreeSet::from([ + crate::diagnostics::warning_filters::WarningFilter::Code { + prefix: None, + category: Category::$category as u8, + code: $category::$code as u8, + name: Some($name), + }, + ]), + ) + }}; +} +pub(crate) use known_code_filter; + +//************************************************************************************************** +// Types +//************************************************************************************************** + +/// None for the default 'allow'. +/// Some(prefix) for a custom set of warnings, e.g. 'allow(lint(_))'. +pub type FilterPrefix = Option; +pub type FilterName = Symbol; + +#[derive(PartialEq, Eq, Clone, Debug)] +pub struct WarningFiltersScope { + scopes: Vec, +} + +#[derive(PartialEq, Eq, Clone, Debug)] +/// Used to filter out diagnostics, specifically used for warning suppression +pub struct WarningFilters { + filters: BTreeMap, + for_dependency: bool, // if false, the filters are used for source code +} + +#[derive(PartialEq, Eq, Clone, Debug)] +/// Filters split by category and code +enum UnprefixedWarningFilters { + /// Remove all warnings + All, + Specified { + /// Remove all diags of this category with optional known name + categories: BTreeMap>, + /// Remove specific diags with optional known filter name + codes: BTreeMap<(u8, u8), Option>, + }, + /// No filter + Empty, +} + +#[derive(PartialEq, Eq, Clone, Copy, Debug, PartialOrd, Ord)] +/// Represents a single annotation for a diagnostic filter +pub enum WarningFilter { + /// Filters all warnings + All(ExternalPrefix), + /// Filters all warnings of a specific category. Only known filters have names. + Category { + prefix: ExternalPrefix, + category: u8, + name: Option, + }, + /// Filters a single warning, as defined by codes below. Only known filters have names. + Code { + prefix: ExternalPrefix, + category: u8, + code: u8, + name: Option, + }, +} + +/// The name for a well-known filter. +pub type WellKnownFilterName = &'static str; + +//************************************************************************************************** +// impls +//************************************************************************************************** + +impl WarningFiltersScope { + /// Unsafe and should be used only for internal purposes, such as ide annotations + pub(crate) const EMPTY: &'static Self = &WarningFiltersScope { scopes: vec![] }; + + pub(crate) fn new(top_level_warning_filter: Option) -> Self { + Self { + scopes: top_level_warning_filter.into_iter().collect(), + } + } + + pub fn push(&mut self, filters: WarningFilters) { + self.scopes.push(filters) + } + + pub fn pop(&mut self) { + self.scopes.pop().unwrap(); + } + + pub fn is_filtered(&self, diag: &Diagnostic) -> bool { + self.scopes.iter().any(|filters| filters.is_filtered(diag)) + } + + pub fn is_filtered_for_dependency(&self) -> bool { + self.scopes.iter().any(|filters| filters.for_dependency()) + } +} + +impl WarningFilters { + pub const fn new_for_source() -> Self { + Self { + filters: BTreeMap::new(), + for_dependency: false, + } + } + + pub const fn new_for_dependency() -> Self { + Self { + filters: BTreeMap::new(), + for_dependency: true, + } + } + + pub fn is_filtered(&self, diag: &Diagnostic) -> bool { + self.is_filtered_by_info(&diag.info) + } + + fn is_filtered_by_info(&self, info: &DiagnosticInfo) -> bool { + let prefix = info.external_prefix(); + self.filters + .get(&prefix) + .is_some_and(|filters| filters.is_filtered_by_info(info)) + } + + pub fn union(&mut self, other: &Self) { + for (prefix, filters) in &other.filters { + self.filters + .entry(*prefix) + .or_insert_with(UnprefixedWarningFilters::new) + .union(filters); + } + // if there is a dependency code filter on the stack, it means we are filtering dependent + // code and this information must be preserved when stacking up additional filters (which + // involves union of the current filter with the new one) + self.for_dependency = self.for_dependency || other.for_dependency; + } + + pub fn add(&mut self, filter: WarningFilter) { + let (prefix, category, code, name) = match filter { + WarningFilter::All(prefix) => { + self.filters.insert(prefix, UnprefixedWarningFilters::All); + return; + } + WarningFilter::Category { + prefix, + category, + name, + } => (prefix, category, None, name), + WarningFilter::Code { + prefix, + category, + code, + name, + } => (prefix, category, Some(code), name), + }; + self.filters + .entry(prefix) + .or_insert(UnprefixedWarningFilters::Empty) + .add(category, code, name) + } + + pub fn unused_warnings_filter_for_test() -> Self { + Self { + filters: BTreeMap::from([( + None, + UnprefixedWarningFilters::unused_warnings_filter_for_test(), + )]), + for_dependency: false, + } + } + + pub fn for_dependency(&self) -> bool { + self.for_dependency + } +} + +impl UnprefixedWarningFilters { + pub fn new() -> Self { + Self::Empty + } + + fn is_filtered_by_info(&self, info: &DiagnosticInfo) -> bool { + match self { + Self::All => info.severity() <= Severity::Warning, + Self::Specified { categories, codes } => { + info.severity() <= Severity::Warning + && (categories.contains_key(&info.category()) + || codes.contains_key(&(info.category(), info.code()))) + } + Self::Empty => false, + } + } + + pub fn union(&mut self, other: &Self) { + match (self, other) { + // if self is empty, just take the other filter + (s @ Self::Empty, _) => *s = other.clone(), + // if other is empty, or self is ALL, no change to the filter + (_, Self::Empty) => (), + (Self::All, _) => (), + // if other is all, self is now all + (s, Self::All) => *s = Self::All, + // category and code level union + ( + Self::Specified { categories, codes }, + Self::Specified { + categories: other_categories, + codes: other_codes, + }, + ) => { + categories.extend(other_categories); + // remove any codes covered by the category level filter + codes.extend( + other_codes + .iter() + .filter(|((category, _), _)| !categories.contains_key(category)), + ); + } + } + } + + /// Add a specific filter to the filter map. + /// If filter_code is None, then the filter applies to all codes in the filter_category. + fn add( + &mut self, + filter_category: u8, + filter_code: Option, + filter_name: Option, + ) { + match self { + Self::All => (), + Self::Empty => { + *self = Self::Specified { + categories: BTreeMap::new(), + codes: BTreeMap::new(), + }; + self.add(filter_category, filter_code, filter_name) + } + Self::Specified { categories, .. } if categories.contains_key(&filter_category) => (), + Self::Specified { categories, codes } => { + if let Some(filter_code) = filter_code { + codes.insert((filter_category, filter_code), filter_name); + } else { + categories.insert(filter_category, filter_name); + codes.retain(|(category, _), _| *category != filter_category); + } + } + } + } + + pub fn unused_warnings_filter_for_test() -> Self { + let filtered_codes = [ + (UnusedItem::Function, FILTER_UNUSED_FUNCTION), + (UnusedItem::StructField, FILTER_UNUSED_STRUCT_FIELD), + (UnusedItem::FunTypeParam, FILTER_UNUSED_TYPE_PARAMETER), + (UnusedItem::Constant, FILTER_UNUSED_CONST), + (UnusedItem::MutReference, FILTER_UNUSED_MUT_REF), + (UnusedItem::MutParam, FILTER_UNUSED_MUT_PARAM), + ] + .into_iter() + .map(|(item, filter)| { + let info = item.into_info(); + ((info.category(), info.code()), Some(filter)) + }) + .collect(); + Self::Specified { + categories: BTreeMap::new(), + codes: filtered_codes, + } + } +} + +impl WarningFilter { + pub fn to_str(self) -> Option<&'static str> { + match self { + Self::All(_) => Some(FILTER_ALL), + Self::Category { name, .. } | Self::Code { name, .. } => name, + } + } + + pub fn code( + prefix: ExternalPrefix, + category: u8, + code: u8, + name: Option, + ) -> Self { + Self::Code { + prefix, + category, + code, + name, + } + } + + pub fn category( + prefix: ExternalPrefix, + category: u8, + name: Option, + ) -> Self { + Self::Category { + prefix, + category, + name, + } + } + + pub fn compiler_known_filters() -> BTreeMap> { + BTreeMap::from([ + ( + FILTER_ALL.into(), + BTreeSet::from([WarningFilter::All(None)]), + ), + ( + FILTER_UNUSED.into(), + BTreeSet::from([WarningFilter::Category { + prefix: None, + category: Category::UnusedItem as u8, + name: Some(FILTER_UNUSED), + }]), + ), + known_code_filter!(FILTER_MISSING_PHANTOM, Declarations::InvalidNonPhantomUse), + known_code_filter!(FILTER_UNUSED_USE, UnusedItem::Alias), + known_code_filter!(FILTER_UNUSED_VARIABLE, UnusedItem::Variable), + known_code_filter!(FILTER_UNUSED_ASSIGNMENT, UnusedItem::Assignment), + known_code_filter!(FILTER_UNUSED_TRAILING_SEMI, UnusedItem::TrailingSemi), + known_code_filter!(FILTER_UNUSED_ATTRIBUTE, UnusedItem::Attribute), + known_code_filter!(FILTER_UNUSED_FUNCTION, UnusedItem::Function), + known_code_filter!(FILTER_UNUSED_STRUCT_FIELD, UnusedItem::StructField), + ( + FILTER_UNUSED_TYPE_PARAMETER.into(), + BTreeSet::from([ + WarningFilter::Code { + prefix: None, + category: Category::UnusedItem as u8, + code: UnusedItem::StructTypeParam as u8, + name: Some(FILTER_UNUSED_TYPE_PARAMETER), + }, + WarningFilter::Code { + prefix: None, + category: Category::UnusedItem as u8, + code: UnusedItem::FunTypeParam as u8, + name: Some(FILTER_UNUSED_TYPE_PARAMETER), + }, + ]), + ), + known_code_filter!(FILTER_UNUSED_CONST, UnusedItem::Constant), + known_code_filter!(FILTER_DEAD_CODE, UnusedItem::DeadCode), + known_code_filter!(FILTER_UNUSED_LET_MUT, UnusedItem::MutModifier), + known_code_filter!(FILTER_UNUSED_MUT_REF, UnusedItem::MutReference), + known_code_filter!(FILTER_UNUSED_MUT_PARAM, UnusedItem::MutParam), + known_code_filter!(FILTER_IMPLICIT_CONST_COPY, TypeSafety::ImplicitConstantCopy), + known_code_filter!(FILTER_DUPLICATE_ALIAS, Declarations::DuplicateAlias), + known_code_filter!(FILTER_DEPRECATED, TypeSafety::DeprecatedUsage), + ]) + } + + pub fn ide_known_filters() -> BTreeMap> { + BTreeMap::from([ + known_code_filter!(FILTER_IDE_PATH_AUTOCOMPLETE, IDE::PathAutocomplete), + known_code_filter!(FILTER_IDE_DOT_AUTOCOMPLETE, IDE::DotAutocomplete), + ]) + } +} + +impl AstDebug for WarningFilters { + fn ast_debug(&self, w: &mut crate::shared::ast_debug::AstWriter) { + for (prefix, filters) in &self.filters { + let prefix_str = prefix.unwrap_or(known_attributes::DiagnosticAttribute::ALLOW); + match filters { + UnprefixedWarningFilters::All => w.write(format!( + "#[{}({})]", + prefix_str, + WarningFilter::All(*prefix).to_str().unwrap(), + )), + UnprefixedWarningFilters::Specified { categories, codes } => { + w.write(format!("#[{}(", prefix_str)); + let items = categories + .iter() + .map(|(cat, n)| WarningFilter::Category { + prefix: *prefix, + category: *cat, + name: *n, + }) + .chain(codes.iter().map(|((cat, code), n)| WarningFilter::Code { + prefix: *prefix, + category: *cat, + code: *code, + name: *n, + })); + w.list(items, ",", |w, filter| { + w.write(filter.to_str().unwrap()); + false + }); + w.write(")]") + } + UnprefixedWarningFilters::Empty => (), + } + } + } +} diff --git a/external-crates/move/crates/move-compiler/src/editions/mod.rs b/external-crates/move/crates/move-compiler/src/editions/mod.rs index 5cfea4d99d59d..910a977791337 100644 --- a/external-crates/move/crates/move-compiler/src/editions/mod.rs +++ b/external-crates/move/crates/move-compiler/src/editions/mod.rs @@ -72,13 +72,13 @@ pub const UPGRADE_NOTE: &str = /// Returns true if the feature is present in the given edition. /// Adds an error to the environment. pub fn check_feature_or_error( - env: &mut CompilationEnv, + env: &CompilationEnv, edition: Edition, feature: FeatureGate, loc: Loc, ) -> bool { if !edition.supports(feature) { - env.add_diag(create_feature_error(edition, feature, loc)); + env.add_error_diag(create_feature_error(edition, feature, loc)); false } else { true diff --git a/external-crates/move/crates/move-compiler/src/expansion/ast.rs b/external-crates/move/crates/move-compiler/src/expansion/ast.rs index 1fb7b04aad199..9a04042389e1a 100644 --- a/external-crates/move/crates/move-compiler/src/expansion/ast.rs +++ b/external-crates/move/crates/move-compiler/src/expansion/ast.rs @@ -3,7 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - diagnostics::WarningFilters, + diagnostics::warning_filters::WarningFilters, parser::ast::{ self as P, Ability, Ability_, BinOp, BlockLabel, ConstantName, DatatypeName, Field, FunctionName, ModuleName, QuantKind, UnaryOp, Var, VariantName, ENTRY_MODIFIER, @@ -397,7 +397,7 @@ pub enum Exp_ { Pack(ModuleAccess, Option>, Fields), Vector(Loc, Option>, Spanned>), - IfElse(Box, Box, Box), + IfElse(Box, Box, Option>), Match(Box, Spanned>), While(Option, Box, Box), Loop(Option, Box), @@ -414,7 +414,7 @@ pub enum Exp_ { Assign(LValueList, Box), FieldMutate(Box, Box), Mutate(Box, Box), - Abort(Box), + Abort(Option>), Return(Option, Box), Break(Option, Box), Continue(Option), @@ -1571,13 +1571,15 @@ impl AstDebug for Exp_ { w.comma(elems, |w, e| e.ast_debug(w)); w.write("]"); } - E::IfElse(b, t, f) => { + E::IfElse(b, t, f_opt) => { w.write("if ("); b.ast_debug(w); w.write(") "); t.ast_debug(w); - w.write(" else "); - f.ast_debug(w); + if let Some(f) = f_opt { + w.write(" else "); + f.ast_debug(w); + } } E::Match(subject, arms) => { w.write("match ("); @@ -1650,8 +1652,11 @@ impl AstDebug for Exp_ { } E::Abort(e) => { - w.write("abort "); - e.ast_debug(w); + w.write("abort"); + if let Some(e) = e { + w.write(" "); + e.ast_debug(w); + } } E::Return(name, e) => { w.write("return "); diff --git a/external-crates/move/crates/move-compiler/src/expansion/name_validation.rs b/external-crates/move/crates/move-compiler/src/expansion/name_validation.rs index 1991ab59a7f98..dcd9de94e0d93 100644 --- a/external-crates/move/crates/move-compiler/src/expansion/name_validation.rs +++ b/external-crates/move/crates/move-compiler/src/expansion/name_validation.rs @@ -103,7 +103,7 @@ impl NameCase { #[allow(clippy::result_unit_err)] pub fn check_valid_address_name( - env: &mut CompilationEnv, + env: &CompilationEnv, sp!(_, ln_): &P::LeadingNameAccess, ) -> Result<(), ()> { use P::LeadingNameAccess_ as LN; @@ -120,11 +120,7 @@ pub fn valid_local_variable_name(s: Symbol) -> bool { } #[allow(clippy::result_unit_err)] -pub fn check_valid_function_parameter_name( - env: &mut CompilationEnv, - is_macro: Option, - v: &Var, -) { +pub fn check_valid_function_parameter_name(env: &CompilationEnv, is_macro: Option, v: &Var) { const SYNTAX_IDENTIFIER_NOTE: &str = "'macro' parameters start with '$' to indicate that their arguments are not evaluated \ before the macro is expanded, meaning the entire expression is substituted. \ @@ -144,7 +140,7 @@ pub fn check_valid_function_parameter_name( (macro_loc, macro_msg), ); diag.add_note(SYNTAX_IDENTIFIER_NOTE); - env.add_diag(diag); + env.add_error_diag(diag); } } else if is_syntax_identifier { let msg = format!( @@ -153,26 +149,26 @@ pub fn check_valid_function_parameter_name( ); let mut diag = diag!(Declarations::InvalidName, (v.loc(), msg)); diag.add_note(SYNTAX_IDENTIFIER_NOTE); - env.add_diag(diag); + env.add_error_diag(diag); } else if !is_valid_local_variable_name(v.value()) { let msg = format!( "Invalid parameter name '{}'. Local variable names must start with 'a'..'z', '_', \ or be a valid name quoted with backticks (`name`)", v, ); - env.add_diag(diag!(Declarations::InvalidName, (v.loc(), msg))); + env.add_error_diag(diag!(Declarations::InvalidName, (v.loc(), msg))); } let _ = check_restricted_name_all_cases(env, NameCase::Variable, &v.0); } -pub fn check_valid_local_name(env: &mut CompilationEnv, v: &Var) { +pub fn check_valid_local_name(env: &CompilationEnv, v: &Var) { if !is_valid_local_variable_name(v.value()) { let msg = format!( "Invalid local name '{}'. Local variable names must start with 'a'..'z', '_', \ or be a valid name quoted with backticks (`name`)", v, ); - env.add_diag(diag!(Declarations::InvalidName, (v.loc(), msg))); + env.add_error_diag(diag!(Declarations::InvalidName, (v.loc(), msg))); } let _ = check_restricted_name_all_cases(env, NameCase::Variable, &v.0); } @@ -182,7 +178,7 @@ fn is_valid_local_variable_name(s: Symbol) -> bool { } pub fn check_valid_module_member_name( - env: &mut CompilationEnv, + env: &CompilationEnv, member: ModuleMemberKind, name: Name, ) -> Option { @@ -193,7 +189,7 @@ pub fn check_valid_module_member_name( } pub fn check_valid_module_member_alias( - env: &mut CompilationEnv, + env: &CompilationEnv, member: ModuleMemberKind, alias: Name, ) -> Option { @@ -209,7 +205,7 @@ pub fn check_valid_module_member_alias( } fn check_valid_module_member_name_impl( - env: &mut CompilationEnv, + env: &CompilationEnv, member: ModuleMemberKind, n: &Name, case: NameCase, @@ -231,7 +227,7 @@ fn check_valid_module_member_name_impl( n, upper_first_letter(case.name()), ); - env.add_diag(diag!(Declarations::InvalidName, (n.loc, msg))); + env.add_error_diag(diag!(Declarations::InvalidName, (n.loc, msg))); return Err(()); } } @@ -243,7 +239,7 @@ fn check_valid_module_member_name_impl( n, upper_first_letter(case.name()), ); - env.add_diag(diag!(Declarations::InvalidName, (n.loc, msg))); + env.add_error_diag(diag!(Declarations::InvalidName, (n.loc, msg))); return Err(()); } } @@ -272,14 +268,14 @@ fn check_valid_module_member_name_impl( #[allow(clippy::result_unit_err)] pub fn check_valid_type_parameter_name( - env: &mut CompilationEnv, + env: &CompilationEnv, is_macro: Option, n: &Name, ) -> Result<(), ()> { // TODO move these names to a more central place? if n.value == symbol!("_") { let diag = restricted_name_error(NameCase::TypeParameter, n.loc, "_"); - env.add_diag(diag); + env.add_error_diag(diag); return Err(()); } @@ -302,7 +298,7 @@ pub fn check_valid_type_parameter_name( (macro_loc, macro_msg), ); diag.add_note(SYNTAX_IDENTIFIER_NOTE); - env.add_diag(diag); + env.add_error_diag(diag); } else { let next_char = n.value.chars().nth(1).unwrap(); if !next_char.is_ascii_alphabetic() { @@ -314,7 +310,7 @@ pub fn check_valid_type_parameter_name( ); let mut diag = diag!(Declarations::InvalidName, (n.loc, msg)); diag.add_note(SYNTAX_IDENTIFIER_NOTE); - env.add_diag(diag); + env.add_error_diag(diag); } } } else if is_syntax_ident { @@ -325,7 +321,7 @@ pub fn check_valid_type_parameter_name( ); let mut diag = diag!(Declarations::InvalidName, (n.loc, msg)); diag.add_note(SYNTAX_IDENTIFIER_NOTE); - env.add_diag(diag); + env.add_error_diag(diag); } // TODO move these names to a more central place? @@ -353,7 +349,7 @@ pub fn is_valid_datatype_or_constant_name(s: &str) -> bool { // Checks for a restricted name in any decl case // Self and vector are not allowed pub fn check_restricted_name_all_cases( - env: &mut CompilationEnv, + env: &CompilationEnv, case: NameCase, n: &Name, ) -> Result<(), ()> { @@ -373,7 +369,7 @@ pub fn check_restricted_name_all_cases( case.name(), n, ); - env.add_diag(diag!(Declarations::InvalidName, (n.loc, msg))); + env.add_error_diag(diag!(Declarations::InvalidName, (n.loc, msg))); return Err(()); } } @@ -385,7 +381,7 @@ pub fn check_restricted_name_all_cases( if n_str == ModuleName::SELF_NAME || (!can_be_vector && n_str == crate::naming::ast::BuiltinTypeName_::VECTOR) { - env.add_diag(restricted_name_error(case, n.loc, n_str)); + env.add_error_diag(restricted_name_error(case, n.loc, n_str)); Err(()) } else { Ok(()) @@ -393,13 +389,13 @@ pub fn check_restricted_name_all_cases( } fn check_restricted_names( - env: &mut CompilationEnv, + env: &CompilationEnv, case: NameCase, sp!(loc, n_): &Name, all_names: &BTreeSet, ) -> Result<(), ()> { if all_names.contains(n_) { - env.add_diag(restricted_name_error(case, *loc, n_)); + env.add_error_diag(restricted_name_error(case, *loc, n_)); Err(()) } else { Ok(()) diff --git a/external-crates/move/crates/move-compiler/src/expansion/path_expander.rs b/external-crates/move/crates/move-compiler/src/expansion/path_expander.rs index 70e7cde321745..3c5c2ebadd262 100644 --- a/external-crates/move/crates/move-compiler/src/expansion/path_expander.rs +++ b/external-crates/move/crates/move-compiler/src/expansion/path_expander.rs @@ -247,7 +247,7 @@ impl Move2024PathExpander { NR::Address(name.loc, make_address(context, name, name.loc, address)) } Some(AliasEntry::TypeParam(_)) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( name.loc, "ICE alias map misresolved name as type param" ))); @@ -270,7 +270,7 @@ impl Move2024PathExpander { NR::ModuleAccess(name.loc, mident, mem) } AliasEntry::TypeParam(_) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( name.loc, "ICE alias map misresolved name as type param" ))); @@ -318,7 +318,7 @@ impl Move2024PathExpander { .join(","); diag.add_note(format!("Type arguments are used with the enum, as '{mident}::{name}<{tys}>::{variant}'")) } - context.env.add_diag(diag); + context.add_diag(diag); } } } @@ -326,7 +326,7 @@ impl Move2024PathExpander { fn check_is_macro(context: &mut DefnContext, is_macro: &Option, result: &NR) { if let NR::Address(_, _) | NR::ModuleIdent(_, _) = result { if let Some(loc) = is_macro { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::InvalidTypeParameter, ( *loc, @@ -385,7 +385,7 @@ impl Move2024PathExpander { && root.tyargs.is_none() => { if let Some(address) = top_level_address_opt(context, root.name) { - context.env.add_diag(diag!( + context.add_diag(diag!( Migration::NeedsGlobalQualification, (root.name.loc, "Must globally qualify name") )); @@ -467,9 +467,7 @@ impl Move2024PathExpander { is_macro = entry.is_macro; } NR::UnresolvedName(_, _) => { - context - .env - .add_diag(ice!((loc, "ICE access chain expansion failed"))); + context.add_diag(ice!((loc, "ICE access chain expansion failed"))); break; } NR::ResolutionFailure(_, _) => break, @@ -553,7 +551,6 @@ impl PathExpander for Move2024PathExpander { m_res.err_name() ); context - .env .add_diag(diag!(Attributes::AmbiguousAttributeValue, (loc, msg))); return None; } @@ -561,7 +558,7 @@ impl PathExpander for Move2024PathExpander { match result { NR::ModuleIdent(_, mident) => { if context.module_members.get(&mident).is_none() { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::UnboundModule, (loc, format!("Unbound module '{}'", mident)) )); @@ -581,11 +578,11 @@ impl PathExpander for Move2024PathExpander { } NR::Address(_, a) => EV::Address(a), result @ NR::ResolutionFailure(_, _) => { - context.env.add_diag(access_chain_resolution_error(result)); + context.add_diag(access_chain_resolution_error(result)); return None; } NR::IncompleteChain(loc) => { - context.env.add_diag(access_chain_incomplete_error(loc)); + context.add_diag(access_chain_incomplete_error(loc)); return None; } } @@ -628,7 +625,7 @@ impl PathExpander for Move2024PathExpander { access, ); diag.add_note("Variants may not be used as types. Use the enum instead."); - context.env.add_diag(diag); + context.add_diag(diag); // We could try to use the member access to try to keep going. return None; } @@ -637,7 +634,7 @@ impl PathExpander for Move2024PathExpander { (access, tyargs, is_macro) } NR::Address(_, _) => { - context.env.add_diag(unexpected_access_error( + context.add_diag(unexpected_access_error( resolved_name.loc(), resolved_name.name(), access, @@ -658,15 +655,15 @@ impl PathExpander for Move2024PathExpander { base_str, realized_str )); } - context.env.add_diag(diag); + context.add_diag(diag); return None; } result @ NR::ResolutionFailure(_, _) => { - context.env.add_diag(access_chain_resolution_error(result)); + context.add_diag(access_chain_resolution_error(result)); return None; } NR::IncompleteChain(loc) => { - context.env.add_diag(access_chain_incomplete_error(loc)); + context.add_diag(access_chain_incomplete_error(loc)); return None; } } @@ -692,7 +689,7 @@ impl PathExpander for Move2024PathExpander { (access, tyargs, is_macro) } NR::Address(_, _) | NR::ModuleIdent(_, _) => { - context.env.add_diag(unexpected_access_error( + context.add_diag(unexpected_access_error( resolved_name.loc(), resolved_name.name(), access, @@ -700,18 +697,18 @@ impl PathExpander for Move2024PathExpander { return None; } result @ NR::ResolutionFailure(_, _) => { - context.env.add_diag(access_chain_resolution_error(result)); + context.add_diag(access_chain_resolution_error(result)); return None; } NR::IncompleteChain(loc) => { - context.env.add_diag(access_chain_incomplete_error(loc)); + context.add_diag(access_chain_incomplete_error(loc)); return None; } } } }, Access::Module => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, "ICE module access should never resolve to a module member" ))); @@ -734,11 +731,11 @@ impl PathExpander for Move2024PathExpander { match resolved_name { NR::ModuleIdent(_, mident) => Some(mident), NR::UnresolvedName(_, name) => { - context.env.add_diag(unbound_module_error(name)); + context.add_diag(unbound_module_error(name)); None } NR::Address(_, _) => { - context.env.add_diag(unexpected_access_error( + context.add_diag(unexpected_access_error( resolved_name.loc(), "address".to_string(), Access::Module, @@ -746,7 +743,7 @@ impl PathExpander for Move2024PathExpander { None } NR::ModuleAccess(_, _, _) | NR::Variant(_, _, _) => { - context.env.add_diag(unexpected_access_error( + context.add_diag(unexpected_access_error( resolved_name.loc(), "module member".to_string(), Access::Module, @@ -754,11 +751,11 @@ impl PathExpander for Move2024PathExpander { None } result @ NR::ResolutionFailure(_, _) => { - context.env.add_diag(access_chain_resolution_error(result)); + context.add_diag(access_chain_resolution_error(result)); None } NR::IncompleteChain(loc) => { - context.env.add_diag(access_chain_incomplete_error(loc)); + context.add_diag(access_chain_incomplete_error(loc)); None } } @@ -767,9 +764,7 @@ impl PathExpander for Move2024PathExpander { fn ide_autocomplete_suggestion(&mut self, context: &mut DefnContext, loc: Loc) { if context.env.ide_mode() { let info = self.aliases.get_ide_alias_information(); - context - .env - .add_ide_annotation(loc, IDEAnnotation::PathAutocompleteInfo(Box::new(info))); + context.add_ide_annotation(loc, IDEAnnotation::PathAutocompleteInfo(Box::new(info))); } } } @@ -934,7 +929,7 @@ impl PathExpander for LegacyPathExpander { let sp!(_, mident_) = self.aliases.module_alias_get(&name).unwrap(); let mident = sp(ident_loc, mident_); if context.module_members.get(&mident).is_none() { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::UnboundModule, (ident_loc, format!("Unbound module '{}'", mident)) )); @@ -949,7 +944,7 @@ impl PathExpander for LegacyPathExpander { let addr = Address::anonymous(*aloc, *a); let mident = sp(ident_loc, ModuleIdent_::new(addr, ModuleName(n.name))); if context.module_members.get(&mident).is_none() { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::UnboundModule, (ident_loc, format!("Unbound module '{}'", mident)) )); @@ -971,7 +966,7 @@ impl PathExpander for LegacyPathExpander { let mident = sp(ident_loc, ModuleIdent_::new(addr, ModuleName(n2.name))); if context.module_members.get(&mident).is_none() { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::UnboundModule, (ident_loc, format!("Unbound module '{}'", mident)) )); @@ -1007,7 +1002,7 @@ impl PathExpander for LegacyPathExpander { let tn_: ModuleAccessResult = match (access, ptn_) { (Access::Pattern, _) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, "Attempted to expand a variant with the legacy path expander" ))); @@ -1042,7 +1037,7 @@ impl PathExpander for LegacyPathExpander { make_access_result(sp(name.loc, EN::Name(name)), tyargs, is_macro) } (Access::Module, single_entry!(_name, _tyargs, _is_macro)) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, "ICE path resolution produced an impossible path for a module" ))); @@ -1056,7 +1051,7 @@ impl PathExpander for LegacyPathExpander { // Error cases (sp!(aloc, LN::AnonymousAddress(_)), [_]) => { let diag = unexpected_address_module_error(loc, *aloc, access); - context.env.add_diag(diag); + context.add_diag(diag); return None; } (sp!(_aloc, LN::GlobalAddress(_)), [_]) => { @@ -1069,7 +1064,7 @@ impl PathExpander for LegacyPathExpander { loc, "Paths that start with `::` are not valid in legacy move.", )); - context.env.add_diag(diag); + context.add_diag(diag); return None; } // Others @@ -1077,7 +1072,7 @@ impl PathExpander for LegacyPathExpander { self.ide_autocomplete_suggestion(context, n1.loc); if let Some(mident) = self.aliases.module_alias_get(n1) { let n2_name = n2.name; - let (tyargs, is_macro) = if !(path.has_tyargs_last()) { + let (tyargs, is_macro) = if !path.has_tyargs_last() { let mut diag = diag!( Syntax::InvalidName, (path.tyargs_loc().unwrap(), "Invalid type argument position") @@ -1085,7 +1080,7 @@ impl PathExpander for LegacyPathExpander { diag.add_note( "Type arguments may only be used with module members", ); - context.env.add_diag(diag); + context.add_diag(diag); (None, path.is_macro()) } else { (path.take_tyargs(), path.is_macro()) @@ -1096,7 +1091,7 @@ impl PathExpander for LegacyPathExpander { is_macro.copied(), ) } else { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::UnboundModule, (n1.loc, format!("Unbound module alias '{}'", n1)) )); @@ -1120,7 +1115,7 @@ impl PathExpander for LegacyPathExpander { (path.tyargs_loc().unwrap(), "Invalid type argument position") ); diag.add_note("Type arguments may only be used with module members"); - context.env.add_diag(diag); + context.add_diag(diag); (None, path.is_macro()) } else { (path.take_tyargs(), path.is_macro()) @@ -1129,14 +1124,14 @@ impl PathExpander for LegacyPathExpander { } (_ln, []) => { let diag = ice!((loc, "Found a root path with no additional entries")); - context.env.add_diag(diag); + context.add_diag(diag); return None; } (ln, [_n1, _n2, ..]) => { self.ide_autocomplete_suggestion(context, ln.loc); let mut diag = diag!(Syntax::InvalidName, (loc, "Too many name segments")); diag.add_note("Names may only have 0, 1, or 2 segments separated by '::'"); - context.env.add_diag(diag); + context.add_diag(diag); return None; } } @@ -1157,7 +1152,7 @@ impl PathExpander for LegacyPathExpander { ice_assert!(context.env, single.is_macro.is_none(), loc, "Found macro"); match self.aliases.module_alias_get(&single.name) { None => { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::UnboundModule, ( single.name.loc, @@ -1182,9 +1177,7 @@ impl PathExpander for LegacyPathExpander { } // Error cases (_ln, []) => { - context - .env - .add_diag(ice!((loc, "Found path with no path entries"))); + context.add_diag(ice!((loc, "Found path with no path entries"))); None } (ln, [n, m, ..]) => { @@ -1199,7 +1192,7 @@ impl PathExpander for LegacyPathExpander { module: ModuleName(n.name), }; let _ = module_ident(context, sp(ident_loc, pmident_)); - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::NamePositionMismatch, if path.entries.len() < 3 { (m.name.loc, "Unexpected module member access. Expected a module identifier only") @@ -1227,7 +1220,7 @@ impl PathExpander for LegacyPathExpander { info.members.insert((*name, *mident, *member)); } let annotation = IDEAnnotation::PathAutocompleteInfo(Box::new(info)); - context.env.add_ide_annotation(loc, annotation) + context.add_ide_annotation(loc, annotation) } } } diff --git a/external-crates/move/crates/move-compiler/src/expansion/primitive_definers.rs b/external-crates/move/crates/move-compiler/src/expansion/primitive_definers.rs index cc1b92013c358..503a7c656fd51 100644 --- a/external-crates/move/crates/move-compiler/src/expansion/primitive_definers.rs +++ b/external-crates/move/crates/move-compiler/src/expansion/primitive_definers.rs @@ -20,7 +20,7 @@ use super::ast::Attribute_; /// Gather primitive defines from module declarations, erroring on duplicates for a given base /// type or for unknown base types. pub fn modules( - env: &mut CompilationEnv, + env: &CompilationEnv, pre_compiled_lib_opt: Option>, modules: &UniqueMap, ) { @@ -49,7 +49,7 @@ pub fn modules( } fn check_prim_definer( - env: &mut CompilationEnv, + env: &CompilationEnv, allow_shadowing: bool, definers: &mut BTreeMap, mident: ModuleIdent, @@ -61,12 +61,16 @@ fn check_prim_definer( let Some(sp!(attr_loc, attr_)) = defines_prim_attr else { return; }; + let warning_filters = env.top_level_warning_filter_scope(); let Attribute_::Parameterized(_, params) = attr_ else { let msg = format!( "Expected a primitive type parameterization, e.g. '{}()'", DefinesPrimitive::DEFINES_PRIM ); - env.add_diag(diag!(Attributes::InvalidUsage, (*attr_loc, msg))); + env.add_diag( + warning_filters, + diag!(Attributes::InvalidUsage, (*attr_loc, msg)), + ); return; }; if params.len() != 1 { @@ -74,7 +78,10 @@ fn check_prim_definer( "Expected a single primitive type parameterization, e.g. '{}()'", DefinesPrimitive::DEFINES_PRIM ); - env.add_diag(diag!(Attributes::InvalidUsage, (*attr_loc, msg))); + env.add_diag( + warning_filters, + diag!(Attributes::InvalidUsage, (*attr_loc, msg)), + ); return; } let (_, _, sp!(param_loc, param_)) = params.into_iter().next().unwrap(); @@ -83,7 +90,10 @@ fn check_prim_definer( "Expected a primitive type parameterization, e.g. '{}()'", DefinesPrimitive::DEFINES_PRIM ); - env.add_diag(diag!(Attributes::InvalidUsage, (*param_loc, msg))); + env.add_diag( + warning_filters, + diag!(Attributes::InvalidUsage, (*param_loc, msg)), + ); return; }; let Some(prim) = BuiltinTypeName_::resolve(name.value.as_str()) else { @@ -92,18 +102,24 @@ fn check_prim_definer( DefinesPrimitive::DEFINES_PRIM, name, ); - env.add_diag(diag!(Attributes::InvalidUsage, (name.loc, msg))); + env.add_diag( + warning_filters, + diag!(Attributes::InvalidUsage, (name.loc, msg)), + ); return; }; if let Some(prev) = definers.get(&prim) { if !allow_shadowing { let msg = format!("Duplicate definer annotated for primitive type '{}'", prim); - env.add_diag(diag!( - Attributes::InvalidUsage, - (*attr_loc, msg), - (prev.loc, "Previously declared here") - )); + env.add_diag( + warning_filters, + diag!( + Attributes::InvalidUsage, + (*attr_loc, msg), + (prev.loc, "Previously declared here") + ), + ); } } else { definers.insert(prim, mident); diff --git a/external-crates/move/crates/move-compiler/src/expansion/translate.rs b/external-crates/move/crates/move-compiler/src/expansion/translate.rs index 40eb01fe7a98c..baa9438dd8ecf 100644 --- a/external-crates/move/crates/move-compiler/src/expansion/translate.rs +++ b/external-crates/move/crates/move-compiler/src/expansion/translate.rs @@ -4,7 +4,12 @@ use crate::{ diag, - diagnostics::{codes::WarningFilter, Diagnostic, WarningFilters}, + diagnostics::{ + warning_filters::{ + WarningFilter, WarningFilters, WarningFiltersScope, FILTER_ALL, FILTER_UNUSED, + }, + Diagnostic, Diagnostics, + }, editions::{self, Edition, FeatureGate, Flavor}, expansion::{ alias_map_builder::{ @@ -33,6 +38,7 @@ use crate::{ NATIVE_MODIFIER, }, shared::{ + ide::{IDEAnnotation, IDEInfo}, known_attributes::AttributePosition, string_utils::{is_pascal_case, is_upper_snake_case}, unique_map::UniqueMap, @@ -40,8 +46,8 @@ use crate::{ }, FullyCompiledProgram, }; -use move_command_line_common::parser::{parse_u16, parse_u256, parse_u32}; use move_core_types::account_address::AccountAddress; +use move_core_types::parsing::parser::{parse_u16, parse_u256, parse_u32}; use move_ir_types::location::*; use move_proc_macros::growing_stack; use move_symbol_pool::Symbol; @@ -65,10 +71,11 @@ type ModuleMembers = BTreeMap; pub(super) struct DefnContext<'env, 'map> { pub(super) named_address_mapping: Option<&'map NamedAddressMap>, pub(super) module_members: UniqueMap, - pub(super) env: &'env mut CompilationEnv, + pub(super) env: &'env CompilationEnv, pub(super) address_conflicts: BTreeSet, pub(super) current_package: Option, pub(super) is_source_definition: bool, + warning_filters_scope: WarningFiltersScope, } struct Context<'env, 'map> { @@ -82,7 +89,7 @@ struct Context<'env, 'map> { impl<'env, 'map> Context<'env, 'map> { fn new( - compilation_env: &'env mut CompilationEnv, + compilation_env: &'env CompilationEnv, module_members: UniqueMap, address_conflicts: BTreeSet, ) -> Self { @@ -92,6 +99,7 @@ impl<'env, 'map> Context<'env, 'map> { all_filter_alls.add(f); } } + let warning_filters_scope = compilation_env.top_level_warning_filter_scope().clone(); let defn_context = DefnContext { env: compilation_env, named_address_mapping: None, @@ -99,6 +107,7 @@ impl<'env, 'map> Context<'env, 'map> { module_members, current_package: None, is_source_definition: false, + warning_filters_scope, }; Context { defn_context, @@ -108,7 +117,7 @@ impl<'env, 'map> Context<'env, 'map> { } } - fn env(&mut self) -> &mut CompilationEnv { + fn env(&mut self) -> &CompilationEnv { self.defn_context.env } @@ -141,7 +150,7 @@ impl<'env, 'map> Context<'env, 'map> { .unwrap() .push_alias_scope(loc, new_scope); match res { - Err(diag) => self.env().add_diag(*diag), + Err(diag) => self.add_diag(*diag), Ok(unnecessaries) => unnecessary_alias_errors(self, unnecessaries), } } @@ -242,7 +251,7 @@ impl<'env, 'map> Context<'env, 'map> { pub fn spec_deprecated(&mut self, loc: Loc, is_error: bool) { let diag = self.spec_deprecated_diag(loc, is_error); - self.env().add_diag(diag); + self.add_diag(diag); } pub fn spec_deprecated_diag(&mut self, loc: Loc, is_error: bool) -> Diagnostic { @@ -258,6 +267,60 @@ impl<'env, 'map> Context<'env, 'map> { ) ) } + + pub fn add_diag(&self, diag: Diagnostic) { + self.defn_context.add_diag(diag); + } + + #[allow(unused)] + pub fn add_diags(&self, diags: Diagnostics) { + self.defn_context.add_diags(diags); + } + + #[allow(unused)] + pub fn extend_ide_info(&self, info: IDEInfo) { + self.defn_context.extend_ide_info(info); + } + + #[allow(unused)] + pub fn add_ide_annotation(&self, loc: Loc, info: IDEAnnotation) { + self.defn_context.add_ide_annotation(loc, info); + } + + pub fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.defn_context.push_warning_filter_scope(filters) + } + + pub fn pop_warning_filter_scope(&mut self) { + self.defn_context.pop_warning_filter_scope() + } +} + +impl DefnContext<'_, '_> { + pub(super) fn add_diag(&self, diag: Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + pub(super) fn add_diags(&self, diags: Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } + + pub(super) fn extend_ide_info(&self, info: IDEInfo) { + self.env.extend_ide_info(&self.warning_filters_scope, info); + } + + pub(super) fn add_ide_annotation(&self, loc: Loc, info: IDEAnnotation) { + self.env + .add_ide_annotation(&self.warning_filters_scope, loc, info); + } + + pub(super) fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) + } + + pub(super) fn pop_warning_filter_scope(&mut self) { + self.warning_filters_scope.pop() + } } fn unnecessary_alias_errors(context: &mut Context, unnecessaries: Vec) { @@ -297,7 +360,7 @@ fn unnecessary_alias_error(context: &mut Context, unnecessary: UnnecessaryAlias) // nothing to point to for the default case diag.add_secondary_label((prev, "The same alias was previously declared here")) } - context.env().add_diag(diag); + context.add_diag(diag); } /// We mark named addresses as having a conflict if there is not a bidirectional mapping between @@ -402,12 +465,13 @@ fn default_aliases(context: &mut Context) -> AliasMapBuilder { //************************************************************************************************** pub fn program( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, pre_compiled_lib: Option>, prog: P::Program, ) -> E::Program { let address_conflicts = compute_address_conflicts(pre_compiled_lib.clone(), &prog); + let warning_filters_scope = compilation_env.top_level_warning_filter_scope().clone(); let mut member_computation_context = DefnContext { env: compilation_env, named_address_mapping: None, @@ -415,6 +479,7 @@ pub fn program( address_conflicts, current_package: None, is_source_definition: false, + warning_filters_scope, }; let module_members = { @@ -477,7 +542,7 @@ pub fn program( // should never fail if let Err(diag) = path_expander.push_alias_scope(Loc::invalid(), aliases) { - context.env().add_diag(*diag); + context.add_diag(*diag); } context.defn_context.named_address_mapping = Some(named_address_map); @@ -511,7 +576,7 @@ pub fn program( let aliases = named_addr_map_to_alias_map_builder(&mut context, named_address_map); // should never fail if let Err(diag) = path_expander.push_alias_scope(Loc::invalid(), aliases) { - context.env().add_diag(*diag); + context.add_diag(*diag); } context.defn_context.named_address_mapping = Some(named_address_map); context.path_expander = Some(Box::new(path_expander)); @@ -611,7 +676,7 @@ fn top_level_address_( // This should have been handled elsewhere in alias resolution for user-provided paths, and // should never occur in compiler-generated ones. P::LeadingNameAccess_::GlobalAddress(name) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, "Found an address in top-level address position that uses a global name" ))); @@ -622,7 +687,7 @@ fn top_level_address_( Some(addr) => make_address(context, name, loc, addr), None => { if name_res.is_ok() { - context.env.add_diag(address_without_value_error( + context.add_diag(address_without_value_error( suggest_declaration, loc, &name, @@ -650,7 +715,7 @@ pub(super) fn top_level_address_opt( // This should have been handled elsewhere in alias resolution for user-provided paths, and // should never occur in compiler-generated ones. P::LeadingNameAccess_::GlobalAddress(_) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, "Found an address in top-level address position that uses a global name" ))); @@ -730,7 +795,7 @@ fn check_module_address( } else { "Multiple addresses specified for module" }; - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (other_loc, msg), (loc, "Address previously specified here") @@ -750,7 +815,7 @@ fn duplicate_module( let old_mident = module_map.get_key(&mident).unwrap(); let dup_msg = format!("Duplicate definition for module '{}'", mident); let prev_msg = format!("Module previously defined here, with '{}'", old_mident); - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (mident.loc, dup_msg), (old_loc, prev_msg), @@ -791,9 +856,7 @@ fn set_module_address( address 'module

::{}''", module_name ); - context - .env() - .add_diag(diag!(Declarations::InvalidModule, (loc, msg))); + context.add_diag(diag!(Declarations::InvalidModule, (loc, msg))); Address::anonymous(loc, NumericalAddress::DEFAULT_ERROR_ADDRESS) } }) @@ -819,9 +882,7 @@ fn module_( let config = context.env().package_config(package_name); warning_filter.union(&config.warning_filter); - context - .env() - .add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); assert!(context.address.is_none()); assert!(address.is_none()); set_module_address(context, &name, module_address); @@ -831,9 +892,7 @@ fn module_( "Invalid module name '{}'. Module names cannot start with '_'", name, ); - context - .env() - .add_diag(diag!(Declarations::InvalidName, (name.loc(), msg))); + context.add_diag(diag!(Declarations::InvalidName, (name.loc(), msg))); } let name_loc = name.0.loc; @@ -906,7 +965,7 @@ fn module_( functions, warning_filter, }; - context.env().pop_warning_filter_scope(); + context.pop_warning_filter_scope(); (current_module, def) } @@ -936,15 +995,13 @@ fn check_visibility_modifiers( let loc = friend_decl.loc; let diag = if edition == Edition::E2024_MIGRATION { for aloc in &friend_decl.attr_locs { - context - .env() - .add_diag(diag!(Migration::RemoveFriend, (*aloc, friend_msg))); + context.add_diag(diag!(Migration::RemoveFriend, (*aloc, friend_msg))); } diag!(Migration::RemoveFriend, (loc, friend_msg)) } else { diag!(Editions::DeprecatedFeature, (loc, friend_msg)) }; - context.env().add_diag(diag); + context.add_diag(diag); } for (_, _, function) in functions { let E::Visibility::Friend(loc) = function.visibility else { @@ -955,7 +1012,7 @@ fn check_visibility_modifiers( } else { diag!(Editions::DeprecatedFeature, (loc, pub_msg)) }; - context.env().add_diag(diag); + context.add_diag(diag); } } @@ -985,7 +1042,7 @@ fn check_visibility_modifiers( ); let package_definition_msg = format!("'{}' visibility used here", E::Visibility::PACKAGE); for (_, _, friend) in friends { - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidVisibilityModifier, (friend.loc, friend_error_msg.clone()), ( @@ -1007,7 +1064,7 @@ fn check_visibility_modifiers( for (_, _, function) in functions { match function.visibility { E::Visibility::Friend(loc) => { - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidVisibilityModifier, (loc, friend_error_msg.clone()), ( @@ -1017,7 +1074,7 @@ fn check_visibility_modifiers( )); } E::Visibility::Package(loc) => { - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidVisibilityModifier, (loc, package_error_msg.clone()), ( @@ -1058,9 +1115,7 @@ fn known_attributes( e.g. #[{ext}({n})]", ext = known_attributes::ExternalAttribute::EXTERNAL ); - context - .env() - .add_diag(diag!(Declarations::UnknownAttribute, (loc, msg))); + context.add_diag(diag!(Declarations::UnknownAttribute, (loc, msg))); None } sp!(loc, E::AttributeName_::Known(n)) => { @@ -1111,9 +1166,7 @@ fn unique_attributes( let msg = format!( "Known attribute '{known}' is not expected in a nested attribute position" ); - context - .env() - .add_diag(diag!(Declarations::InvalidAttribute, (nloc, msg))); + context.add_diag(diag!(Declarations::InvalidAttribute, (nloc, msg))); continue; } @@ -1133,7 +1186,7 @@ fn unique_attributes( "Expected to be used with one of the following: {}", all_expected ); - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidAttribute, (nloc, msg), (nloc, expected_msg) @@ -1151,7 +1204,7 @@ fn unique_attributes( } if let Err((_, old_loc)) = attr_map.add(sp(nloc, name_), sp(loc, attr_)) { let msg = format!("Duplicate attribute '{}' attached to the same item", name_); - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (loc, msg), (old_loc, "Attribute previously given here"), @@ -1235,9 +1288,7 @@ fn warning_filter(context: &mut Context, attributes: &E::Attributes) -> WarningF DiagnosticAttribute::ALLOW, n ); - context - .env() - .add_diag(diag!(Declarations::InvalidAttribute, (inner_attr_loc, msg))); + context.add_diag(diag!(Declarations::InvalidAttribute, (inner_attr_loc, msg))); (None, vec![*n]) } }; @@ -1262,9 +1313,7 @@ fn warning_filter(context: &mut Context, attributes: &E::Attributes) -> WarningF ) } }; - context - .env() - .add_diag(diag!(Attributes::ValueWarning, (nloc, msg))); + context.add_diag(diag!(Attributes::ValueWarning, (nloc, msg))); continue; }; for f in filters { @@ -1295,9 +1344,7 @@ fn get_allow_attribute_inners<'a>( .to_str() .unwrap(), ); - context - .env() - .add_diag(diag!(Attributes::ValueWarning, (allow_attr.loc, msg))); + context.add_diag(diag!(Attributes::ValueWarning, (allow_attr.loc, msg))); None } } @@ -1322,9 +1369,7 @@ fn prefixed_warning_filters( prefix, n ); - context - .env() - .add_diag(diag!(Attributes::ValueWarning, (*loc, msg))); + context.add_diag(diag!(Attributes::ValueWarning, (*loc, msg))); *n } }) @@ -1534,7 +1579,7 @@ fn use_( otherwise they must internal to declared scope.", P::Visibility::PUBLIC ); - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidUseFun, (loc, msg), (vis_loc, vis_msg) @@ -1592,7 +1637,7 @@ fn module_use( P::ModuleUse::Module(alias_opt) => { let mident = module_ident(&mut context.defn_context, in_mident); if !context.defn_context.module_members.contains_key(&mident) { - context.env().add_diag(unbound_module(&mident)); + context.add_diag(unbound_module(&mident)); return; }; let alias = alias_opt @@ -1605,7 +1650,7 @@ fn module_use( let members = match context.defn_context.module_members.get(&mident) { Some(members) => members, None => { - context.env().add_diag(unbound_module(&mident)); + context.add_diag(unbound_module(&mident)); return; } }; @@ -1644,7 +1689,7 @@ fn module_use( "Invalid 'use'. Unbound member '{}' in module '{}'", member, mident ); - context.env().add_diag(diag!( + context.add_diag(diag!( NameResolution::UnboundModuleMember, (member.loc, msg), (mloc, format!("Module '{}' declared here", mident)), @@ -1686,7 +1731,7 @@ fn module_use( P::ModuleUse::Partial { .. } => { let mident = module_ident(&mut context.defn_context, in_mident); if !context.defn_context.module_members.contains_key(&mident) { - context.env().add_diag(unbound_module(&mident)); + context.add_diag(unbound_module(&mident)); return; }; add_module_alias!(mident, mident.value.module.0) @@ -1762,7 +1807,7 @@ fn duplicate_module_alias(context: &mut Context, old_loc: Loc, alias: Name) { "Duplicate module alias '{}'. Module aliases must be unique within a given namespace", alias ); - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (alias.loc, msg), (old_loc, "Alias previously defined here"), @@ -1774,7 +1819,7 @@ fn duplicate_module_member(context: &mut Context, old_loc: Loc, alias: Name) { "Duplicate module member or alias '{}'. Top level names in a namespace must be unique", alias ); - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (alias.loc, msg), (old_loc, "Alias previously defined here"), @@ -1803,7 +1848,7 @@ fn unused_alias(context: &mut Context, _kind: &str, alias: Name) { alias )); } - context.env().add_diag(diag); + context.add_diag(diag); } //************************************************************************************************** @@ -1836,9 +1881,7 @@ fn struct_def_( } = pstruct; let attributes = flatten_attributes(context, AttributePosition::Struct, attributes); let warning_filter = warning_filter(context, &attributes); - context - .env() - .add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let type_parameters = datatype_type_parameters(context, pty_params); context.push_type_parameters(type_parameters.iter().map(|tp| &tp.name)); let abilities = ability_set(context, "modifier", abilities_vec); @@ -1853,7 +1896,7 @@ fn struct_def_( fields, }; context.pop_alias_scope(None); - context.env().pop_warning_filter_scope(); + context.pop_warning_filter_scope(); (name, sdef) } @@ -1874,7 +1917,7 @@ fn struct_fields( for (idx, (field, pt)) in pfields_vec.into_iter().enumerate() { let t = type_(context, pt); if let Err((field, old_loc)) = field_map.add(field, (idx, t)) { - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, ( field.loc(), @@ -1920,9 +1963,7 @@ fn enum_def_( } = penum; let attributes = flatten_attributes(context, AttributePosition::Enum, attributes); let warning_filter = warning_filter(context, &attributes); - context - .env() - .add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let type_parameters = datatype_type_parameters(context, pty_params); context.push_type_parameters(type_parameters.iter().map(|tp| &tp.name)); let abilities = ability_set(context, "modifier", abilities_vec); @@ -1937,7 +1978,7 @@ fn enum_def_( variants, }; context.pop_alias_scope(None); - context.env().pop_warning_filter_scope(); + context.pop_warning_filter_scope(); (name, edef) } @@ -1949,7 +1990,7 @@ fn enum_variants( ) -> UniqueMap { let mut variants = UniqueMap::new(); if pvariants.is_empty() { - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidEnum, (eloc, "An 'enum' must define at least one variant") )) @@ -1962,7 +2003,7 @@ fn enum_variants( "Duplicate definition for variant '{}' in enum '{}'", vname, ename ); - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (loc, msg), (old_loc.1, "Variant previously defined here") @@ -2000,7 +2041,7 @@ fn variant_fields( for (idx, (field, pt)) in pfields_vec.into_iter().enumerate() { let t = type_(context, pt); if let Err((field, old_loc)) = field_map.add(field, (idx, t)) { - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, ( field.loc(), @@ -2034,7 +2075,7 @@ fn friend( unique", mident ); - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (friend.loc, msg), (old_friend.loc, "Friend previously declared here"), @@ -2096,9 +2137,7 @@ fn constant_( } = pconstant; let attributes = flatten_attributes(context, AttributePosition::Constant, pattributes); let warning_filter = warning_filter(context, &attributes); - context - .env() - .add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let signature = type_(context, psignature); let value = *exp(context, Box::new(pvalue)); let constant = E::Constant { @@ -2109,7 +2148,7 @@ fn constant_( signature, value, }; - context.env().pop_warning_filter_scope(); + context.pop_warning_filter_scope(); (name, constant) } @@ -2147,9 +2186,7 @@ fn function_( } = pfunction; let attributes = flatten_attributes(context, AttributePosition::Function, pattributes); let warning_filter = warning_filter(context, &attributes); - context - .env() - .add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); if let (Some(entry_loc), Some(macro_loc)) = (entry, macro_) { let e_msg = format!( "Invalid function declaration. \ @@ -2157,7 +2194,7 @@ fn function_( are fully-expanded inline during compilation" ); let m_msg = format!("Function declared as '{MACRO_MODIFIER}' here"); - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidFunction, (entry_loc, e_msg), (macro_loc, m_msg), @@ -2169,7 +2206,7 @@ fn function_( '{NATIVE_MODIFIER}' functions cannot be '{MACRO_MODIFIER}'", ); let m_msg = format!("Function declared as '{MACRO_MODIFIER}' here"); - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidFunction, (*native_loc, n_msg), (macro_loc, m_msg), @@ -2208,7 +2245,7 @@ fn function_( body, }; context.pop_alias_scope(None); - context.env().pop_warning_filter_scope(); + context.pop_warning_filter_scope(); (name, fdef) } @@ -2267,7 +2304,7 @@ fn ability_set(context: &mut Context, case: &str, abilities_vec: Vec) - for ability in abilities_vec { let loc = ability.loc; if let Err(prev_loc) = set.add(ability) { - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (loc, format!("Duplicate '{}' ability {}", ability, case)), (prev_loc, "Ability previously given here") @@ -2501,9 +2538,7 @@ fn exp(context: &mut Context, pe: Box) -> Box { PE::Name(pn) if pn.value.has_tyargs() => { let msg = "Expected name to be followed by a brace-enclosed list of field expressions \ or a parenthesized list of arguments for a function call"; - context - .env() - .add_diag(diag!(NameResolution::NamePositionMismatch, (loc, msg))); + context.add_diag(diag!(NameResolution::NamePositionMismatch, (loc, msg))); EE::UnresolvedError } PE::Name(pn) => { @@ -2548,11 +2583,8 @@ fn exp(context: &mut Context, pe: Box) -> Box { PE::IfElse(pb, pt, pf_opt) => { let eb = exp(context, pb); let et = exp(context, pt); - let ef = match pf_opt { - None => Box::new(sp(loc, EE::Unit { trailing: false })), - Some(pf) => exp(context, pf), - }; - EE::IfElse(eb, et, ef) + let ef_opt = pf_opt.map(|pf| exp(context, pf)); + EE::IfElse(eb, et, ef_opt) } PE::Match(subject, sp!(aloc, arms)) => EE::Match( exp(context, subject), @@ -2603,7 +2635,8 @@ fn exp(context: &mut Context, pe: Box) -> Box { Some(LValue::FieldMutate(edotted)) => EE::FieldMutate(edotted, er), } } - PE::Abort(pe) => EE::Abort(exp(context, pe)), + PE::Abort(None) => EE::Abort(None), + PE::Abort(Some(pe)) => EE::Abort(Some(exp(context, pe))), PE::Return(name_opt, pe_opt) => { let ev = match pe_opt { None => Box::new(sp(loc, EE::Unit { trailing: false })), @@ -2683,7 +2716,7 @@ fn exp(context: &mut Context, pe: Box) -> Box { consider updating your Move edition to '{valid_editions}'" )); diag.add_note(editions::UPGRADE_NOTE); - context.env().add_diag(diag); + context.add_diag(diag); EE::UnresolvedError } else { match exp_dotted(context, Box::new(sp(loc, pdotted_))) { @@ -2775,9 +2808,7 @@ fn exp_cast(context: &mut Context, in_parens: bool, plhs: Box, pty: P::T .check_feature(current_package, FeatureGate::NoParensCast, loc); if supports_feature && ambiguous_cast(&plhs) { let msg = "Potentially ambiguous 'as'. Add parentheses to disambiguate"; - context - .env() - .add_diag(diag!(Syntax::AmbiguousCast, (loc, msg))); + context.add_diag(diag!(Syntax::AmbiguousCast, (loc, msg))); } } EE::Cast(exp(context, plhs), type_(context, pty)) @@ -2807,9 +2838,7 @@ fn maybe_labeled_exp( _ => { let msg = "Invalid label. Labels can only be used on 'while', 'loop', or block '{{}}' \ expressions"; - context - .env() - .add_diag(diag!(Syntax::InvalidLabel, (loc, msg))); + context.add_diag(diag!(Syntax::InvalidLabel, (loc, msg))); E::Exp_::UnresolvedError } }; @@ -2823,7 +2852,7 @@ fn ensure_unique_label( label_opt: Option, ) { if let Some(old_label) = label_opt { - context.env().add_diag(diag!( + context.add_diag(diag!( Syntax::InvalidLabel, (loc, "Multiple labels for a single expression"), (old_label.0.loc, "Label previously given here"), @@ -2869,7 +2898,7 @@ fn move_or_copy_path_(context: &mut Context, case: PathCase, pe: Box) -> if !matches!(&inner.value, E::Exp_::Name(_, _)) { let cmsg = format!("Invalid '{}' of expression", case.case()); let emsg = "Expected a name or path access, e.g. 'x' or 'e.f'"; - context.env().add_diag(diag!( + context.add_diag(diag!( Syntax::InvalidMoveOrCopy, (cloc, cmsg), (inner.loc, emsg) @@ -2940,7 +2969,7 @@ fn check_ellipsis_usage(context: &mut Context, ellipsis_locs: &[Loc]) { diag.add_secondary_label((*loc, "Ellipsis pattern used again here")); } diag.add_note("An ellipsis pattern can only appear once in a constructor's pattern."); - context.env().add_diag(diag); + context.add_diag(diag); } } @@ -2974,7 +3003,7 @@ fn match_pattern(context: &mut Context, sp!(loc, pat_): P::MatchPattern) -> E::M EM::Variant(_, _) | EM::ModuleAccess(_, _) => Some(name), EM::Name(_) if identifier_okay => Some(name), EM::Name(_) => { - context.env().add_diag(diag!( + context.add_diag(diag!( Syntax::UnexpectedToken, ( name.loc, @@ -2999,7 +3028,7 @@ fn match_pattern(context: &mut Context, sp!(loc, pat_): P::MatchPattern) -> E::M } = context.name_access_chain_to_module_access(Access::Pattern, name_chain)?; let name = head_ctor_okay(context, access, identifier_okay)?; if let Some(loc) = is_macro { - context.env().add_diag(diag!( + context.add_diag(diag!( Syntax::InvalidMacro, (loc, "Macros are not allowed in patterns.") )); @@ -3105,14 +3134,14 @@ fn match_pattern(context: &mut Context, sp!(loc, pat_): P::MatchPattern) -> E::M resolve this constant's name", ); } - context.env().add_diag(diag); + context.add_diag(diag); error_pattern!() } else { if let Some(_tys) = pts_opt { let msg = "Invalid type arguments on a pattern variable"; let mut diag = diag!(Declarations::InvalidName, (name.loc, msg)); diag.add_note("Type arguments cannot appear on pattern variables"); - context.env().add_diag(diag); + context.add_diag(diag); } sp(loc, EP::Binder(mutability(context, loc, mut_), Var(name))) } @@ -3122,7 +3151,7 @@ fn match_pattern(context: &mut Context, sp!(loc, pat_): P::MatchPattern) -> E::M let msg = "'mut' can only be used with variable bindings in patterns"; let nmsg = "Expected a valid 'enum' variant, 'struct', or 'const', not a variable"; - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidName, (mloc, msg), (head_ctor_name.loc, nmsg) @@ -3156,7 +3185,7 @@ fn match_pattern(context: &mut Context, sp!(loc, pat_): P::MatchPattern) -> E::M ), PP::At(x, inner) => { if x.is_underscore() { - context.env().add_diag(diag!( + context.add_diag(diag!( NameResolution::InvalidPattern, (x.loc(), "Can't use '_' as a binder in an '@' pattern") )); @@ -3183,42 +3212,42 @@ pub(super) fn value(context: &mut DefnContext, sp!(loc, pvalue_): P::Value) -> O PV::Num(s) if s.ends_with("u8") => match parse_u8(&s[..s.len() - 2]) { Ok((u, _format)) => EV::U8(u), Err(_) => { - context.env.add_diag(num_too_big_error(loc, "'u8'")); + context.add_diag(num_too_big_error(loc, "'u8'")); return None; } }, PV::Num(s) if s.ends_with("u16") => match parse_u16(&s[..s.len() - 3]) { Ok((u, _format)) => EV::U16(u), Err(_) => { - context.env.add_diag(num_too_big_error(loc, "'u16'")); + context.add_diag(num_too_big_error(loc, "'u16'")); return None; } }, PV::Num(s) if s.ends_with("u32") => match parse_u32(&s[..s.len() - 3]) { Ok((u, _format)) => EV::U32(u), Err(_) => { - context.env.add_diag(num_too_big_error(loc, "'u32'")); + context.add_diag(num_too_big_error(loc, "'u32'")); return None; } }, PV::Num(s) if s.ends_with("u64") => match parse_u64(&s[..s.len() - 3]) { Ok((u, _format)) => EV::U64(u), Err(_) => { - context.env.add_diag(num_too_big_error(loc, "'u64'")); + context.add_diag(num_too_big_error(loc, "'u64'")); return None; } }, PV::Num(s) if s.ends_with("u128") => match parse_u128(&s[..s.len() - 4]) { Ok((u, _format)) => EV::U128(u), Err(_) => { - context.env.add_diag(num_too_big_error(loc, "'u128'")); + context.add_diag(num_too_big_error(loc, "'u128'")); return None; } }, PV::Num(s) if s.ends_with("u256") => match parse_u256(&s[..s.len() - 4]) { Ok((u, _format)) => EV::U256(u), Err(_) => { - context.env.add_diag(num_too_big_error(loc, "'u256'")); + context.add_diag(num_too_big_error(loc, "'u256'")); return None; } }, @@ -3226,7 +3255,7 @@ pub(super) fn value(context: &mut DefnContext, sp!(loc, pvalue_): P::Value) -> O PV::Num(s) => match parse_u256(&s) { Ok((u, _format)) => EV::InferredNum(u), Err(_) => { - context.env.add_diag(num_too_big_error( + context.add_diag(num_too_big_error( loc, "the largest possible integer type, 'u256'", )); @@ -3237,14 +3266,14 @@ pub(super) fn value(context: &mut DefnContext, sp!(loc, pvalue_): P::Value) -> O PV::HexString(s) => match hex_string::decode(loc, &s) { Ok(v) => EV::Bytearray(v), Err(e) => { - context.env.add_diag(*e); + context.add_diag(*e); return None; } }, PV::ByteString(s) => match byte_string::decode(loc, &s) { Ok(v) => EV::Bytearray(v), Err(e) => { - context.env.add_diags(e); + context.add_diags(e); return None; } }, @@ -3281,7 +3310,7 @@ fn named_fields( let mut fmap = UniqueMap::new(); for (idx, (field, x)) in xs.into_iter().enumerate() { if let Err((field, old_loc)) = fmap.add(field, (idx, x)) { - context.env().add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (loc, format!("Invalid {}", case)), ( @@ -3398,7 +3427,7 @@ fn lvalues(context: &mut Context, e: Box) -> Option { L::FieldMutate(dotted) } PE::Index(_, _) => { - context.env().add_diag(diag!( + context.add_diag(diag!( Syntax::InvalidLValue, ( loc, @@ -3425,14 +3454,14 @@ fn assign(context: &mut Context, sp!(loc, e_): P::Exp) -> Option { "If you are trying to unpack a struct, try adding fields, e.g.'{} {{}}'", name )); - context.env().add_diag(diag); + context.add_diag(diag); None } Some(access_result!(_, _ptys_opt, Some(_))) => { let msg = "Unexpected assignment of name with macro invocation"; let mut diag = diag!(Syntax::InvalidLValue, (loc, msg)); diag.add_note("Macro invocation '!' must appear on an invocation"); - context.env().add_diag(diag); + context.add_diag(diag); None } Some(access_result!(sp!(_, name @ M::Name(_)), None, None)) => { @@ -3445,7 +3474,7 @@ fn assign(context: &mut Context, sp!(loc, e_): P::Exp) -> Option { "If you are trying to unpack a struct, try adding fields, e.g.'{} {{}}'", name )); - context.env().add_diag(diag); + context.add_diag(diag); None } Some(access_result!(sp!(loc, M::Variant(_, _)), _tys_opt, _is_macro)) => { @@ -3457,7 +3486,7 @@ fn assign(context: &mut Context, sp!(loc, e_): P::Exp) -> Option { let msg = "Unexpected assignment of variant"; let mut diag = diag!(Syntax::InvalidLValue, (loc, msg)); diag.add_note("If you are trying to unpack an enum variant, use 'match'"); - context.env().add_diag(diag); + context.add_diag(diag); None } else { assert!(context.env().has_errors()); @@ -3507,7 +3536,7 @@ fn assign(context: &mut Context, sp!(loc, e_): P::Exp) -> Option { )) } _ => { - context.env().add_diag(diag!( + context.add_diag(diag!( Syntax::InvalidLValue, ( loc, diff --git a/external-crates/move/crates/move-compiler/src/hlir/ast.rs b/external-crates/move/crates/move-compiler/src/hlir/ast.rs index 36db981f504e8..91db32f3fde74 100644 --- a/external-crates/move/crates/move-compiler/src/hlir/ast.rs +++ b/external-crates/move/crates/move-compiler/src/hlir/ast.rs @@ -3,7 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - diagnostics::WarningFilters, + diagnostics::warning_filters::WarningFilters, expansion::ast::{ ability_modifiers_ast_debug, AbilitySet, Attributes, Friend, ModuleIdent, Mutability, TargetKind, diff --git a/external-crates/move/crates/move-compiler/src/hlir/detect_dead_code.rs b/external-crates/move/crates/move-compiler/src/hlir/detect_dead_code.rs index 00e1f6109a5ee..37134f460b777 100644 --- a/external-crates/move/crates/move-compiler/src/hlir/detect_dead_code.rs +++ b/external-crates/move/crates/move-compiler/src/hlir/detect_dead_code.rs @@ -3,6 +3,10 @@ use crate::{ diag, + diagnostics::{ + warning_filters::{WarningFilters, WarningFiltersScope}, + Diagnostic, Diagnostics, + }, expansion::ast::ModuleIdent, ice, naming::ast::{self as N, BlockLabel}, @@ -188,15 +192,37 @@ impl ControlFlow { } struct Context<'env> { - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, + warning_filters_scope: WarningFiltersScope, // loops: Vec, } impl<'env> Context<'env> { - pub fn new(env: &'env mut CompilationEnv) -> Self { + pub fn new(env: &'env CompilationEnv) -> Self { // let loops = vec![]; // Context { env , loops } - Context { env } + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); + Context { + env, + warning_filters_scope, + } + } + + pub fn add_diag(&self, diag: Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + #[allow(unused)] + pub fn add_diags(&self, diags: Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } + + pub fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) + } + + pub fn pop_warning_filter_scope(&mut self) { + self.warning_filters_scope.pop() } fn maybe_report_value_error(&mut self, error: &mut ControlFlow) -> bool { @@ -208,8 +234,7 @@ impl<'env> Context<'env> { reported, } if !*reported => { *reported = true; - self.env - .add_diag(diag!(UnusedItem::DeadCode, (*loc, VALUE_UNREACHABLE_MSG))); + self.add_diag(diag!(UnusedItem::DeadCode, (*loc, VALUE_UNREACHABLE_MSG))); true } CF::Divergent { .. } | CF::None | CF::Possible => false, @@ -225,8 +250,7 @@ impl<'env> Context<'env> { reported, } if !*reported => { *reported = true; - self.env - .add_diag(diag!(UnusedItem::DeadCode, (*loc, DIVERGENT_MSG))); + self.add_diag(diag!(UnusedItem::DeadCode, (*loc, DIVERGENT_MSG))); true } CF::Divergent { .. } | CF::None | CF::Possible => false, @@ -250,7 +274,7 @@ impl<'env> Context<'env> { if let Some(next_loc) = next_stmt { diag.add_secondary_label((*next_loc, UNREACHABLE_MSG)); } - self.env.add_diag(diag); + self.add_diag(diag); true } CF::Divergent { .. } | CF::None | CF::Possible => false, @@ -271,7 +295,7 @@ impl<'env> Context<'env> { reported, } if !*reported => { *reported = true; - self.env.add_diag(diag!( + self.add_diag(diag!( UnusedItem::TrailingSemi, (tail_exp.exp.loc, SEMI_MSG), (*loc, DIVERGENT_MSG), @@ -344,7 +368,7 @@ fn infinite_loop(loc: Loc) -> ControlFlow { // Entry //************************************************************************************************** -pub fn program(compilation_env: &mut CompilationEnv, prog: &T::Program) { +pub fn program(compilation_env: &CompilationEnv, prog: &T::Program) { let mut context = Context::new(compilation_env); modules(&mut context, &prog.modules); } @@ -356,16 +380,14 @@ fn modules(context: &mut Context, modules: &UniqueMap ControlFlow { // ----------------------------------------------------------------------------------------- // control flow statements // ----------------------------------------------------------------------------------------- - E::IfElse(test, conseq, alt) => do_if( + E::IfElse(test, conseq, alt_opt) => do_if( context, - (eloc, test, conseq, alt), + (eloc, test, conseq, alt_opt.as_deref()), /* tail_pos */ true, tail, |context, flow| context.maybe_report_tail_error(flow), @@ -452,9 +472,7 @@ fn tail(context: &mut Context, e: &T::Exp) -> ControlFlow { |context, flow| context.maybe_report_tail_error(flow), ), E::VariantMatch(..) => { - context - .env - .add_diag(ice!((*eloc, "Found variant match in detect_dead_code"))); + context.add_diag(ice!((*eloc, "Found variant match in detect_dead_code"))); CF::None } @@ -508,7 +526,7 @@ fn tail_block(context: &mut Context, seq: &VecDeque) -> Control None => ControlFlow::None, Some(sp!(_, S::Seq(last))) => tail(context, last), Some(sp!(loc, _)) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( *loc, "ICE last sequence item should have been an exp in dead code analysis" ))); @@ -547,7 +565,7 @@ fn value(context: &mut Context, e: &T::Exp) -> ControlFlow { // ----------------------------------------------------------------------------------------- E::IfElse(test, conseq, alt) => do_if( context, - (eloc, test, conseq, alt), + (eloc, test, conseq, alt.as_deref()), /* tail_pos */ false, value, |context, flow| context.maybe_report_value_error(flow), @@ -560,9 +578,7 @@ fn value(context: &mut Context, e: &T::Exp) -> ControlFlow { |context, flow| context.maybe_report_value_error(flow), ), E::VariantMatch(_subject, _, _arms) => { - context - .env - .add_diag(ice!((*eloc, "Found variant match in detect_dead_code"))); + context.add_diag(ice!((*eloc, "Found variant match in detect_dead_code"))); CF::None } E::While(..) => statement(context, e), @@ -618,7 +634,7 @@ fn value(context: &mut Context, e: &T::Exp) -> ControlFlow { context.maybe_report_value_error(&mut flow); } T::ExpListItem::Splat(_, _, _) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( *eloc, "ICE splat exp unsupported by dead code analysis" ))); @@ -686,7 +702,7 @@ fn value_block(context: &mut Context, seq: &VecDeque) -> Contro None => ControlFlow::None, Some(sp!(_, S::Seq(last))) => value(context, last), Some(sp!(loc, _)) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( *loc, "ICE last sequence item should have been an exp in dead code analysis" ))); @@ -726,7 +742,7 @@ fn statement(context: &mut Context, e: &T::Exp) -> ControlFlow { // about the final, total view of them. E::IfElse(test, conseq, alt) => do_if( context, - (eloc, test, conseq, alt), + (eloc, test, conseq, alt.as_deref()), /* tail_pos */ false, statement, |_, _| false, @@ -739,9 +755,7 @@ fn statement(context: &mut Context, e: &T::Exp) -> ControlFlow { |_, _| false, ), E::VariantMatch(_subject, _, _arms) => { - context - .env - .add_diag(ice!((*eloc, "Found variant match in detect_dead_code"))); + context.add_diag(ice!((*eloc, "Found variant match in detect_dead_code"))); CF::None } E::While(name, test, body) => { @@ -826,9 +840,7 @@ fn statement(context: &mut Context, e: &T::Exp) -> ControlFlow { // odds and ends -- things we need to deal with but that don't do much // ----------------------------------------------------------------------------------------- E::Use(_) => { - context - .env - .add_diag(ice!((*eloc, "ICE found unexpanded use"))); + context.add_diag(ice!((*eloc, "ICE found unexpanded use"))); CF::None } } @@ -910,7 +922,7 @@ fn has_trailing_unit(seq: &VecDeque) -> bool { fn do_if( context: &mut Context, - (loc, test, conseq, alt): (&Loc, &T::Exp, &T::Exp, &T::Exp), + (loc, test, conseq, alt_opt): (&Loc, &T::Exp, &T::Exp, Option<&T::Exp>), tail_pos: bool, arm_recur: F1, arm_error: F2, @@ -926,10 +938,15 @@ where }; let conseq_flow = arm_recur(context, conseq); - let alt_flow = arm_recur(context, alt); + let alt_flow = alt_opt + .map(|alt| arm_recur(context, alt)) + .unwrap_or(CF::None); if tail_pos && matches!(conseq.ty, sp!(_, N::Type_::Unit | N::Type_::Anything)) - && matches!(alt.ty, sp!(_, N::Type_::Unit | N::Type_::Anything)) + && matches!( + alt_opt.map(|alt| &alt.ty), + None | Some(sp!(_, N::Type_::Unit | N::Type_::Anything)) + ) { return CF::None; }; diff --git a/external-crates/move/crates/move-compiler/src/hlir/match_compilation.rs b/external-crates/move/crates/move-compiler/src/hlir/match_compilation.rs index 2ffad708230e8..2783fce778fd9 100644 --- a/external-crates/move/crates/move-compiler/src/hlir/match_compilation.rs +++ b/external-crates/move/crates/move-compiler/src/hlir/match_compilation.rs @@ -724,7 +724,7 @@ fn make_arm_unpack( let Some((queue_entries, unpack)) = arm_variant_unpack(context, None, ploc, m, e, tys, v, fs, entry) else { - context.hlir_context.env.add_diag(ice!(( + context.hlir_context.add_diag(ice!(( ploc, "Did not build an arm unpack for a value variant" ))); @@ -750,7 +750,7 @@ fn make_arm_unpack( let Some((queue_entries, unpack)) = arm_struct_unpack(context, None, ploc, m, s, tys, fs, entry) else { - context.hlir_context.env.add_diag(ice!(( + context.hlir_context.add_diag(ice!(( ploc, "Did not build an arm unpack for a value struct" ))); @@ -1277,7 +1277,7 @@ fn make_if_else(test: T::Exp, conseq: T::Exp, alt: T::Exp, result_ty: Type) -> T result_ty, sp( loc, - T::UnannotatedExp_::IfElse(Box::new(test), Box::new(conseq), Box::new(alt)), + T::UnannotatedExp_::IfElse(Box::new(test), Box::new(conseq), Some(Box::new(alt))), ), ) } diff --git a/external-crates/move/crates/move-compiler/src/hlir/translate.rs b/external-crates/move/crates/move-compiler/src/hlir/translate.rs index 26c07e9d20152..1c5441c387c6f 100644 --- a/external-crates/move/crates/move-compiler/src/hlir/translate.rs +++ b/external-crates/move/crates/move-compiler/src/hlir/translate.rs @@ -4,6 +4,10 @@ use crate::{ debug_display, debug_display_verbose, diag, + diagnostics::{ + warning_filters::{WarningFilters, WarningFiltersScope}, + Diagnostic, Diagnostics, + }, editions::{FeatureGate, Flavor}, expansion::ast::{self as E, Fields, ModuleIdent, Mutability, TargetKind}, hlir::{ @@ -127,9 +131,10 @@ pub(super) struct HLIRDebugFlags { } pub(super) struct Context<'env> { - pub env: &'env mut CompilationEnv, + pub env: &'env CompilationEnv, pub info: Arc, pub debug: HLIRDebugFlags, + warning_filters_scope: WarningFiltersScope, current_package: Option, function_locals: UniqueMap, signature: Option, @@ -142,7 +147,7 @@ pub(super) struct Context<'env> { impl<'env> Context<'env> { pub fn new( - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, _pre_compiled_lib_opt: Option>, prog: &T::Program, ) -> Self { @@ -154,8 +159,10 @@ impl<'env> Context<'env> { match_specialization: false, match_work_queue: false, }; + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); Context { env, + warning_filters_scope, info: prog.info.clone(), debug, current_package: None, @@ -168,6 +175,23 @@ impl<'env> Context<'env> { } } + pub fn add_diag(&self, diag: Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + #[allow(unused)] + pub fn add_diags(&self, diags: Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } + + pub fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) + } + + pub fn pop_warning_filter_scope(&mut self) { + self.warning_filters_scope.pop() + } + pub fn has_empty_locals(&self) -> bool { self.function_locals.is_empty() } @@ -250,7 +274,7 @@ impl<'env> Context<'env> { } impl MatchContext for Context<'_> { - fn env(&mut self) -> &mut CompilationEnv { + fn env(&mut self) -> &CompilationEnv { self.env } @@ -288,7 +312,7 @@ impl MatchContext for Context<'_> { //************************************************************************************************** pub fn program( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, pre_compiled_lib: Option>, prog: T::Program, ) -> H::Program { @@ -337,7 +361,7 @@ fn module( constants: tconstants, } = mdef; context.current_package = package_name; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let structs = tstructs.map(|name, s| struct_def(context, name, s)); let enums = tenums.map(|name, s| enum_def(context, name, s)); @@ -353,7 +377,7 @@ fn module( gen_unused_warnings(context, target_kind, &structs); context.current_package = None; - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); ( module_ident, H::ModuleDefinition { @@ -391,10 +415,10 @@ fn function(context: &mut Context, _name: FunctionName, f: T::Function) -> H::Fu body, } = f; assert!(macro_.is_none(), "ICE macros filtered above"); - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let signature = function_signature(context, signature); let body = function_body(context, &signature, _name, body); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); H::Function { warning_filter, index, @@ -499,7 +523,7 @@ fn constant(context: &mut Context, _name: ConstantName, cdef: T::Constant) -> H: signature: tsignature, value: tvalue, } = cdef; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let signature = base_type(context, tsignature); let eloc = tvalue.exp.loc; let tseq = { @@ -513,7 +537,7 @@ fn constant(context: &mut Context, _name: ConstantName, cdef: T::Constant) -> H: return_type: H::Type_::base(signature.clone()), }; let (locals, body) = function_body_defined(context, &function_signature, loc, tseq); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); H::Constant { warning_filter, index, @@ -542,9 +566,9 @@ fn struct_def( type_parameters, fields, } = sdef; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let fields = struct_fields(context, fields); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); H::StructDefinition { warning_filter, index, @@ -586,13 +610,13 @@ fn enum_def( type_parameters, variants, } = edef; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let variants = variants.map(|_, defn| H::VariantDefinition { index: defn.index, loc: defn.loc, fields: variant_fields(context, defn.fields), }); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); H::EnumDefinition { warning_filter, index, @@ -648,7 +672,7 @@ fn base_type(context: &mut Context, sp!(loc, nb_): N::Type) -> H::BaseType { use N::Type_ as NT; let b_ = match nb_ { NT::Var(_) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, format!( "ICE type inf. var not expanded: {}", @@ -658,7 +682,7 @@ fn base_type(context: &mut Context, sp!(loc, nb_): N::Type) -> H::BaseType { return error_base_type(loc); } NT::Apply(None, _, _) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, format!("ICE kind not expanded: {}", debug_display_verbose!(nb_)) ))); @@ -669,7 +693,7 @@ fn base_type(context: &mut Context, sp!(loc, nb_): N::Type) -> H::BaseType { NT::UnresolvedError => HB::UnresolvedError, NT::Anything => HB::Unreachable, NT::Ref(_, _) | NT::Unit | NT::Fun(_, _) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, format!( "ICE base type constraint failed: {}", @@ -716,7 +740,7 @@ fn type_(context: &mut Context, sp!(loc, ty_): N::Type) -> H::Type { let t_ = match ty_ { NT::Unit => HT::Unit, NT::Apply(None, _, _) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, format!("ICE kind not expanded: {}", debug_display_verbose!(ty_)) ))); @@ -793,11 +817,12 @@ fn tail( // ----------------------------------------------------------------------------------------- // control flow statements // ----------------------------------------------------------------------------------------- - E::IfElse(test, conseq, alt) => { + E::IfElse(test, conseq, alt_opt) => { let cond = value(context, block, Some(&tbool(eloc)), *test); let mut if_block = make_block!(); let conseq_exp = tail(context, &mut if_block, Some(&out_type), *conseq); let mut else_block = make_block!(); + let alt = alt_opt.unwrap_or_else(|| Box::new(typing_unit_exp(eloc))); let alt_exp = tail(context, &mut else_block, Some(&out_type), *alt); let (binders, bound_exp) = make_binders(context, eloc, out_type.clone()); @@ -971,9 +996,7 @@ fn tail( | E::Continue(_) | E::Assign(_, _, _) | E::Mutate(_, _) => { - context - .env - .add_diag(ice!((eloc, "ICE statement mishandled in HLIR lowering"))); + context.add_diag(ice!((eloc, "ICE statement mishandled in HLIR lowering"))); None } @@ -1003,9 +1026,7 @@ fn tail_block( None => None, Some(sp!(_, S::Seq(last))) => tail(context, block, expected_type, *last), Some(sp!(loc, _)) => { - context - .env - .add_diag(ice!((loc, "ICE statement mishandled in HLIR lowering"))); + context.add_diag(ice!((loc, "ICE statement mishandled in HLIR lowering"))); None } } @@ -1067,18 +1088,14 @@ fn value( let [cond_item, code_item]: [TI; 2] = match arguments.exp.value { E::ExpList(arg_list) => arg_list.try_into().unwrap(), _ => { - context - .env - .add_diag(ice!((eloc, "ICE type checking assert failed"))); + context.add_diag(ice!((eloc, "ICE type checking assert failed"))); return error_exp(eloc); } }; let (econd, ecode) = match (cond_item, code_item) { (TI::Single(econd, _), TI::Single(ecode, _)) => (econd, ecode), _ => { - context - .env - .add_diag(ice!((eloc, "ICE type checking assert failed"))); + context.add_diag(ice!((eloc, "ICE type checking assert failed"))); return error_exp(eloc); } }; @@ -1105,18 +1122,14 @@ fn value( let [cond_item, code_item]: [TI; 2] = match arguments.exp.value { E::ExpList(arg_list) => arg_list.try_into().unwrap(), _ => { - context - .env - .add_diag(ice!((eloc, "ICE type checking assert failed"))); + context.add_diag(ice!((eloc, "ICE type checking assert failed"))); return error_exp(eloc); } }; let (econd, ecode) = match (cond_item, code_item) { (TI::Single(econd, _), TI::Single(ecode, _)) => (econd, ecode), _ => { - context - .env - .add_diag(ice!((eloc, "ICE type checking assert failed"))); + context.add_diag(ice!((eloc, "ICE type checking assert failed"))); return error_exp(eloc); } }; @@ -1139,13 +1152,13 @@ fn value( // ----------------------------------------------------------------------------------------- // control flow statements // ----------------------------------------------------------------------------------------- - E::IfElse(test, conseq, alt) => { + E::IfElse(test, conseq, alt_opt) => { let cond = value(context, block, Some(&tbool(eloc)), *test); let mut if_block = make_block!(); let conseq_exp = value(context, &mut if_block, Some(&out_type), *conseq); let mut else_block = make_block!(); + let alt = alt_opt.unwrap_or_else(|| Box::new(typing_unit_exp(eloc))); let alt_exp = value(context, &mut else_block, Some(&out_type), *alt); - let (binders, bound_exp) = make_binders(context, eloc, out_type.clone()); let arms_unreachable = conseq_exp.is_unreachable() && alt_exp.is_unreachable(); @@ -1513,7 +1526,7 @@ fn value( var, } => var, _ => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( eloc, format!( "ICE invalid bind_exp for single value: {}", @@ -1537,7 +1550,7 @@ fn value( | Some(bt @ sp!(_, BT::U128)) | Some(bt @ sp!(_, BT::U256)) => *bt, _ => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( eloc, format!( "ICE typing failed for cast: {} : {}", @@ -1602,9 +1615,7 @@ fn value( | E::Continue(_) | E::Assign(_, _, _) | E::Mutate(_, _) => { - context - .env - .add_diag(ice!((eloc, "ICE statement mishandled in HLIR lowering"))); + context.add_diag(ice!((eloc, "ICE statement mishandled in HLIR lowering"))); error_exp(eloc) } @@ -1612,7 +1623,7 @@ fn value( // odds and ends -- things we need to deal with but that don't do much // ----------------------------------------------------------------------------------------- E::Use(_) => { - context.env.add_diag(ice!((eloc, "ICE unexpanded use"))); + context.add_diag(ice!((eloc, "ICE unexpanded use"))); error_exp(eloc) } E::UnresolvedError => { @@ -1636,15 +1647,11 @@ fn value_block( match last_exp { Some(sp!(_, S::Seq(last))) => value(context, block, expected_type, *last), Some(sp!(loc, _)) => { - context - .env - .add_diag(ice!((loc, "ICE last sequence item should be an exp"))); + context.add_diag(ice!((loc, "ICE last sequence item should be an exp"))); error_exp(loc) } None => { - context - .env - .add_diag(ice!((seq_loc, "ICE empty sequence in value position"))); + context.add_diag(ice!((seq_loc, "ICE empty sequence in value position"))); error_exp(seq_loc) } } @@ -1809,11 +1816,12 @@ fn statement(context: &mut Context, block: &mut Block, e: T::Exp) { // ----------------------------------------------------------------------------------------- // control flow statements // ----------------------------------------------------------------------------------------- - E::IfElse(test, conseq, alt) => { + E::IfElse(test, conseq, alt_opt) => { let cond = value(context, block, Some(&tbool(eloc)), *test); let mut if_block = make_block!(); statement(context, &mut if_block, *conseq); let mut else_block = make_block!(); + let alt = alt_opt.unwrap_or_else(|| Box::new(typing_unit_exp(eloc))); statement(context, &mut else_block, *alt); block.push_back(sp( eloc, @@ -1978,7 +1986,7 @@ fn statement(context: &mut Context, block: &mut Block, e: T::Exp) { // odds and ends -- things we need to deal with but that don't do much // ----------------------------------------------------------------------------------------- E::Use(_) => { - context.env.add_diag(ice!((eloc, "ICE unexpanded use"))); + context.add_diag(ice!((eloc, "ICE unexpanded use"))); } } } @@ -2043,6 +2051,13 @@ fn tunit(loc: Loc) -> H::Type { sp(loc, H::Type_::Unit) } +fn typing_unit_exp(loc: Loc) -> T::Exp { + T::exp( + sp(loc, N::Type_::Unit), + sp(loc, T::UnannotatedExp_::Unit { trailing: false }), + ) +} + fn unit_exp(loc: Loc) -> H::Exp { H::exp( tunit(loc), @@ -2494,7 +2509,7 @@ fn bind_value_in_block( match lvalue { H::LValue_::Var { .. } => (), lv => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( *loc, format!( "ICE tried bind_value for non-var lvalue {}", @@ -2594,9 +2609,7 @@ fn process_value(context: &mut Context, sp!(loc, ev_): E::Value) -> H::Value { use H::Value_ as HV; let v_ = match ev_ { EV::InferredNum(_) => { - context - .env - .add_diag(ice!((loc, "ICE not expanded to value"))); + context.add_diag(ice!((loc, "ICE not expanded to value"))); HV::U64(0) } EV::Address(a) => HV::Address(a.into_addr_bytes()), @@ -2932,7 +2945,7 @@ fn needs_freeze( format!("Expected type: {}", debug_display_verbose!(_expected)) ), ); - context.env.add_diag(diag); + context.add_diag(diag); } Freeze::NotNeeded } @@ -2973,7 +2986,7 @@ fn freeze(context: &mut Context, expected_type: &H::Type, e: H::Exp) -> (Block, "ICE list item has Multple type: {}", debug_display_verbose!(e.ty) ); - context.env.add_diag(ice!((e.ty.loc, msg))); + context.add_diag(ice!((e.ty.loc, msg))); H::SingleType_::base(error_base_type(e.ty.loc)) } }) @@ -3035,9 +3048,7 @@ fn gen_unused_warnings( let is_sui_mode = context.env.package_config(context.current_package).flavor == Flavor::Sui; for (_, sname, sdef) in structs { - context - .env - .add_warning_filter_scope(sdef.warning_filter.clone()); + context.push_warning_filter_scope(sdef.warning_filter.clone()); let has_key = sdef.abilities.has_ability_(Ability_::Key); @@ -3053,13 +3064,11 @@ fn gen_unused_warnings( .is_some_and(|names| names.contains(&f.value())) { let msg = format!("The '{}' field of the '{sname}' type is unused", f.value()); - context - .env - .add_diag(diag!(UnusedItem::StructField, (f.loc(), msg))); + context.add_diag(diag!(UnusedItem::StructField, (f.loc(), msg))); } } } - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); } } diff --git a/external-crates/move/crates/move-compiler/src/linters/abort_constant.rs b/external-crates/move/crates/move-compiler/src/linters/abort_constant.rs index cbdde8085887b..49f8399e8324c 100644 --- a/external-crates/move/crates/move-compiler/src/linters/abort_constant.rs +++ b/external-crates/move/crates/move-compiler/src/linters/abort_constant.rs @@ -6,6 +6,7 @@ use move_ir_types::location::Loc; use move_symbol_pool::Symbol; +use crate::diagnostics::warning_filters::{WarningFilters, WarningFiltersScope}; use crate::linters::StyleCodes; use crate::{ cfgir::{ @@ -13,7 +14,7 @@ use crate::{ visitor::{CFGIRVisitorConstructor, CFGIRVisitorContext}, }, diag, - diagnostics::WarningFilters, + diagnostics::{Diagnostic, Diagnostics}, editions::FeatureGate, hlir::ast as H, shared::CompilationEnv, @@ -23,29 +24,46 @@ pub struct AssertAbortNamedConstants; pub struct Context<'a> { package_name: Option, - env: &'a mut CompilationEnv, + env: &'a CompilationEnv, + warning_filters_scope: WarningFiltersScope, } impl CFGIRVisitorConstructor for AssertAbortNamedConstants { type Context<'a> = Context<'a>; - fn context<'a>(env: &'a mut CompilationEnv, program: &G::Program) -> Self::Context<'a> { + fn context<'a>(env: &'a CompilationEnv, program: &G::Program) -> Self::Context<'a> { let package_name = program .modules .iter() .next() .and_then(|(_, _, mdef)| mdef.package_name); - Context { env, package_name } + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); + Context { + env, + warning_filters_scope, + package_name, + } + } +} + +impl Context<'_> { + fn add_diag(&self, diag: Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + #[allow(unused)] + fn add_diags(&self, diags: Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); } } impl CFGIRVisitorContext for Context<'_> { - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) + fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) } fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() + self.warning_filters_scope.pop() } fn visit_command_custom(&mut self, cmd: &H::Command) -> bool { @@ -76,7 +94,7 @@ impl Context<'_> { diag.add_note("Consider using an error constant with the '#[error]' to allow for a more descriptive error."); } - self.env.add_diag(diag); + self.add_diag(diag); } } } diff --git a/external-crates/move/crates/move-compiler/src/linters/constant_naming.rs b/external-crates/move/crates/move-compiler/src/linters/constant_naming.rs index a5960014b5c29..2fb17efe6fba6 100644 --- a/external-crates/move/crates/move-compiler/src/linters/constant_naming.rs +++ b/external-crates/move/crates/move-compiler/src/linters/constant_naming.rs @@ -6,30 +6,14 @@ //! within a module against this convention. use crate::{ diag, - diagnostics::WarningFilters, expansion::ast::ModuleIdent, linters::StyleCodes, parser::ast::ConstantName, - shared::CompilationEnv, - typing::{ - ast as T, - visitor::{TypingVisitorConstructor, TypingVisitorContext}, - }, + typing::{ast as T, visitor::simple_visitor}, }; -pub struct ConstantNamingVisitor; -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} -impl TypingVisitorConstructor for ConstantNamingVisitor { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl TypingVisitorContext for Context<'_> { +simple_visitor!( + ConstantNamingVisitor, fn visit_constant_custom( &mut self, _module: ModuleIdent, @@ -41,19 +25,11 @@ impl TypingVisitorContext for Context<'_> { let uid_msg = format!("'{name}' should be ALL_CAPS. Or for error constants, use PascalCase",); let diagnostic = diag!(StyleCodes::ConstantNaming.diag_info(), (cdef.loc, uid_msg)); - self.env.add_diag(diagnostic); + self.add_diag(diagnostic); } false } - - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } -} +); /// Returns `true` if the string is in all caps snake case, including numeric characters. fn is_valid_name(name: &str) -> bool { diff --git a/external-crates/move/crates/move-compiler/src/linters/loop_without_exit.rs b/external-crates/move/crates/move-compiler/src/linters/loop_without_exit.rs index 17935658b908c..3a70cccdba7a9 100644 --- a/external-crates/move/crates/move-compiler/src/linters/loop_without_exit.rs +++ b/external-crates/move/crates/move-compiler/src/linters/loop_without_exit.rs @@ -4,36 +4,14 @@ use super::StyleCodes; use crate::{ diag, - diagnostics::WarningFilters, - shared::CompilationEnv, typing::{ ast::{self as T, UnannotatedExp_}, - visitor::{exp_satisfies, TypingVisitorConstructor, TypingVisitorContext}, + visitor::{exp_satisfies, simple_visitor}, }, }; -pub struct LoopWithoutExit; - -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} - -impl TypingVisitorConstructor for LoopWithoutExit { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl TypingVisitorContext for Context<'_> { - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } - +simple_visitor!( + LoopWithoutExit, fn visit_exp_custom(&mut self, exp: &T::Exp) -> bool { // we do not care about `while` since there is another lint that handles reporting // that `while (true)` should be `loop` @@ -57,10 +35,10 @@ impl TypingVisitorContext for Context<'_> { This code will until it errors, e.g. reaching an 'abort' or running out of gas" ) ); - self.env.add_diag(diag); + self.add_diag(diag); false } -} +); fn has_return(e: &T::Exp) -> bool { exp_satisfies(e, |e| matches!(e.exp.value, UnannotatedExp_::Return(_))) diff --git a/external-crates/move/crates/move-compiler/src/linters/meaningless_math_operation.rs b/external-crates/move/crates/move-compiler/src/linters/meaningless_math_operation.rs index de5d816695894..21fc0a9e19bcb 100644 --- a/external-crates/move/crates/move-compiler/src/linters/meaningless_math_operation.rs +++ b/external-crates/move/crates/move-compiler/src/linters/meaningless_math_operation.rs @@ -4,40 +4,17 @@ //! Detects meaningless math operations like `x * 0`, `x << 0`, `x >> 0`, `x * 1`, `x + 0`, `x - 0` //! Aims to reduce code redundancy and improve clarity by flagging operations with no effect. use crate::{ - cfgir::ast as G, - cfgir::visitor::{CFGIRVisitorConstructor, CFGIRVisitorContext}, + cfgir::visitor::simple_visitor, diag, - diagnostics::WarningFilters, hlir::ast::{self as H, Value_}, linters::StyleCodes, parser::ast::BinOp_, - shared::CompilationEnv, }; use move_core_types::u256::U256; use move_ir_types::location::Loc; -pub struct MeaninglessMathOperation; - -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} - -impl CFGIRVisitorConstructor for MeaninglessMathOperation { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &G::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl CFGIRVisitorContext for Context<'_> { - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } - +simple_visitor!( + MeaninglessMathOperation, fn visit_exp_custom(&mut self, exp: &H::Exp) -> bool { let H::UnannotatedExp_::BinopExp(lhs, op, rhs) = &exp.exp.value else { return false; @@ -54,7 +31,7 @@ impl CFGIRVisitorContext for Context<'_> { }; if let Some(meaningless_operand) = is_unchanged { let msg = "This operation has no effect and can be removed"; - self.env.add_diag(diag!( + self.add_diag(diag!( StyleCodes::MeaninglessMath.diag_info(), (exp.exp.loc, msg), (meaningless_operand, "Because of this operand"), @@ -70,7 +47,7 @@ impl CFGIRVisitorContext for Context<'_> { }; if let Some(zero_operand) = is_always_zero { let msg = "This operation is always zero and can be replaced with '0'"; - self.env.add_diag(diag!( + self.add_diag(diag!( StyleCodes::MeaninglessMath.diag_info(), (exp.exp.loc, msg), (zero_operand, "Because of this operand"), @@ -84,7 +61,7 @@ impl CFGIRVisitorContext for Context<'_> { }; if let Some(one_operand) = is_always_one { let msg = "This operation is always one and can be replaced with '1'"; - self.env.add_diag(diag!( + self.add_diag(diag!( StyleCodes::MeaninglessMath.diag_info(), (exp.exp.loc, msg), (one_operand, "Because of this operand"), @@ -95,7 +72,7 @@ impl CFGIRVisitorContext for Context<'_> { false } -} +); fn is_zero(exp: &H::Exp) -> Option { let H::UnannotatedExp_::Value(sp!(loc, value_)) = &exp.exp.value else { diff --git a/external-crates/move/crates/move-compiler/src/linters/mod.rs b/external-crates/move/crates/move-compiler/src/linters/mod.rs index 1aaf95a668c27..6f02e2e2cd4a9 100644 --- a/external-crates/move/crates/move-compiler/src/linters/mod.rs +++ b/external-crates/move/crates/move-compiler/src/linters/mod.rs @@ -6,8 +6,10 @@ use move_symbol_pool::Symbol; use crate::{ cfgir::visitor::CFGIRVisitor, command_line::compiler::Visitor, - diagnostics::codes::WarningFilter, - diagnostics::codes::{custom, DiagnosticInfo, Severity}, + diagnostics::{ + codes::{custom, DiagnosticInfo, Severity}, + warning_filters::WarningFilter, + }, typing::visitor::TypingVisitor, }; @@ -18,6 +20,7 @@ pub mod meaningless_math_operation; pub mod redundant_ref_deref; pub mod self_assignment; pub mod unnecessary_conditional; +pub mod unnecessary_unit; pub mod unnecessary_while_loop; pub mod unneeded_return; @@ -152,7 +155,13 @@ lints!( LinterDiagnosticCategory::Complexity, "redundant_ref_deref", "redundant reference/dereference" - ) + ), + ( + UnnecessaryUnit, + LinterDiagnosticCategory::Style, + "unnecessary_unit", + "unit `()` expression can be removed or simplified" + ), ); pub const ALLOW_ATTR_CATEGORY: &str = "lint"; @@ -189,6 +198,7 @@ pub fn linter_visitors(level: LintLevel) -> Vec { unnecessary_conditional::UnnecessaryConditional.visitor(), self_assignment::SelfAssignmentVisitor.visitor(), redundant_ref_deref::RedundantRefDerefVisitor.visitor(), + unnecessary_unit::UnnecessaryUnit.visitor(), ] } } diff --git a/external-crates/move/crates/move-compiler/src/linters/redundant_ref_deref.rs b/external-crates/move/crates/move-compiler/src/linters/redundant_ref_deref.rs index cf1ed98301767..4029661735e0a 100644 --- a/external-crates/move/crates/move-compiler/src/linters/redundant_ref_deref.rs +++ b/external-crates/move/crates/move-compiler/src/linters/redundant_ref_deref.rs @@ -8,41 +8,19 @@ use crate::linters::StyleCodes; use crate::{ diag, - diagnostics::WarningFilters, - shared::CompilationEnv, typing::{ - ast::{self as T, Exp, UnannotatedExp_ as TE}, - visitor::{TypingVisitorConstructor, TypingVisitorContext}, + ast::{Exp, UnannotatedExp_ as TE}, + visitor::simple_visitor, }, }; -pub struct RedundantRefDerefVisitor; - -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} - -impl TypingVisitorConstructor for RedundantRefDerefVisitor { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl TypingVisitorContext for Context<'_> { - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } - +simple_visitor!( + RedundantRefDerefVisitor, fn visit_exp_custom(&mut self, exp: &Exp) -> bool { self.check_redundant_ref_deref(exp); false } -} +); impl Context<'_> { // Check for &* pattern @@ -59,7 +37,7 @@ impl Context<'_> { return; } match &deref_exp.exp.value { - TE::TempBorrow(_, inner) if is_simple_deref_ref_exp(inner) => self.env.add_diag(diag!( + TE::TempBorrow(_, inner) if is_simple_deref_ref_exp(inner) => self.add_diag(diag!( StyleCodes::RedundantRefDeref.diag_info(), ( exp.exp.loc, @@ -67,7 +45,7 @@ impl Context<'_> { Remove this borrow-deref and use the expression directly." ) )), - TE::TempBorrow(_, inner) if all_deref_borrow(inner) => self.env.add_diag(diag!( + TE::TempBorrow(_, inner) if all_deref_borrow(inner) => self.add_diag(diag!( StyleCodes::RedundantRefDeref.diag_info(), ( exp.exp.loc, @@ -75,17 +53,15 @@ impl Context<'_> { Use the inner expression directly." ) )), - TE::Borrow(false, _, _) if exp.exp.loc != deref_exp.exp.loc => { - self.env.add_diag(diag!( - StyleCodes::RedundantRefDeref.diag_info(), - ( - exp.exp.loc, - "Redundant borrow-dereference detected. \ + TE::Borrow(false, _, _) if exp.exp.loc != deref_exp.exp.loc => self.add_diag(diag!( + StyleCodes::RedundantRefDeref.diag_info(), + ( + exp.exp.loc, + "Redundant borrow-dereference detected. \ Use the field access directly." - ) - )) - } - TE::Borrow(_, _, _) | TE::BorrowLocal(_, _) => self.env.add_diag(diag!( + ) + )), + TE::Borrow(_, _, _) | TE::BorrowLocal(_, _) => self.add_diag(diag!( StyleCodes::RedundantRefDeref.diag_info(), ( exp.exp.loc, diff --git a/external-crates/move/crates/move-compiler/src/linters/self_assignment.rs b/external-crates/move/crates/move-compiler/src/linters/self_assignment.rs index 808b55ee9ff44..029fdda924026 100644 --- a/external-crates/move/crates/move-compiler/src/linters/self_assignment.rs +++ b/external-crates/move/crates/move-compiler/src/linters/self_assignment.rs @@ -6,40 +6,17 @@ use super::StyleCodes; use crate::{ diag, - diagnostics::WarningFilters, naming::ast::Var, - shared::CompilationEnv, typing::{ ast::{self as T}, - visitor::{TypingVisitorConstructor, TypingVisitorContext}, + visitor::simple_visitor, }, }; use move_ir_types::location::Loc; use move_proc_macros::growing_stack; -pub struct SelfAssignmentVisitor; - -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} - -impl TypingVisitorConstructor for SelfAssignmentVisitor { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl TypingVisitorContext for Context<'_> { - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } - +simple_visitor!( + SelfAssignmentVisitor, fn visit_exp_custom(&mut self, e: &T::Exp) -> bool { use T::UnannotatedExp_ as E; match &e.exp.value { @@ -49,7 +26,7 @@ impl TypingVisitorContext for Context<'_> { } false } -} +); fn check_mutate(context: &mut Context, loc: Loc, lhs: &T::Exp, rhs: &T::Exp) { #[growing_stack] @@ -202,7 +179,7 @@ fn exp_list_items(e: &T::Exp) -> Vec<&T::Exp> { fn report_self_assignment(context: &mut Context, case: &str, eloc: Loc, lloc: Loc, rloc: Loc) { let msg = format!("Unnecessary self-{case}. The {case} is redundant and will not change the value"); - context.env.add_diag(diag!( + context.add_diag(diag!( StyleCodes::SelfAssignment.diag_info(), (eloc, msg), (lloc, "This location"), diff --git a/external-crates/move/crates/move-compiler/src/linters/unnecessary_conditional.rs b/external-crates/move/crates/move-compiler/src/linters/unnecessary_conditional.rs index 490b8eb57f33c..f3b14b3ed2725 100644 --- a/external-crates/move/crates/move-compiler/src/linters/unnecessary_conditional.rs +++ b/external-crates/move/crates/move-compiler/src/linters/unnecessary_conditional.rs @@ -8,37 +8,15 @@ use crate::expansion::ast::Value; use crate::linters::StyleCodes; use crate::{ diag, - diagnostics::WarningFilters, expansion::ast::Value_, - shared::CompilationEnv, typing::{ ast::{self as T, SequenceItem_, UnannotatedExp_}, - visitor::{TypingVisitorConstructor, TypingVisitorContext}, + visitor::simple_visitor, }, }; -pub struct UnnecessaryConditional; - -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} - -impl TypingVisitorConstructor for UnnecessaryConditional { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl TypingVisitorContext for Context<'_> { - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } - +simple_visitor!( + UnnecessaryConditional, fn visit_exp_custom(&mut self, exp: &T::Exp) -> bool { let UnannotatedExp_::IfElse(_, etrue, efalse) = &exp.exp.value else { return false; @@ -46,7 +24,7 @@ impl TypingVisitorContext for Context<'_> { let Some(vtrue) = extract_value(etrue) else { return false; }; - let Some(vfalse) = extract_value(efalse) else { + let Some(vfalse) = efalse.as_ref().and_then(|efalse| extract_value(efalse)) else { return false; }; @@ -58,7 +36,7 @@ impl TypingVisitorContext for Context<'_> { "Detected an unnecessary conditional expression 'if (cond)'. Consider using \ the condition directly, i.e. '{negation}cond'", ); - self.env.add_diag(diag!( + self.add_diag(diag!( StyleCodes::UnnecessaryConditional.diag_info(), (exp.exp.loc, msg) )); @@ -67,7 +45,7 @@ impl TypingVisitorContext for Context<'_> { let msg = "Detected a redundant conditional expression 'if (..) v else v', where each \ branch results in the same value 'v'. Consider using the value directly"; - self.env.add_diag(diag!( + self.add_diag(diag!( StyleCodes::UnnecessaryConditional.diag_info(), (exp.exp.loc, msg), (vtrue.loc, "This value"), @@ -97,7 +75,7 @@ impl TypingVisitorContext for Context<'_> { // } false } -} +); #[growing_stack] fn extract_value(block: &T::Exp) -> Option<&Value> { diff --git a/external-crates/move/crates/move-compiler/src/linters/unnecessary_unit.rs b/external-crates/move/crates/move-compiler/src/linters/unnecessary_unit.rs new file mode 100644 index 0000000000000..0451c47a83de3 --- /dev/null +++ b/external-crates/move/crates/move-compiler/src/linters/unnecessary_unit.rs @@ -0,0 +1,100 @@ +//! Detects an unnecessary unit expression in a block, sequence, if, or else. + +use crate::{ + diag, ice, + linters::StyleCodes, + typing::{ + ast::{self as T, SequenceItem_, UnannotatedExp_}, + visitor::simple_visitor, + }, +}; +use move_ir_types::location::Loc; + +simple_visitor!( + UnnecessaryUnit, + fn visit_seq_custom(&mut self, loc: Loc, (_, seq_): &T::Sequence) -> bool { + let n = seq_.len(); + match n { + 0 => { + self.add_diag(ice!((loc, "Unexpected empty block without a value"))); + } + 1 => { + // TODO probably too noisy for now, we would need more information about + // blocks were added by the programmer + // self.env.add_diag(diag!( + // StyleCodes::UnnecessaryBlock.diag_info(), + // (e.exp.loc, "Unnecessary block expression '{}')" + // (e.exp.loc, if_msg), + // )); + } + n => { + let last = n - 1; + for (i, stmt) in seq_.iter().enumerate() { + if i != last && is_unit_seq(self, stmt) { + let msg = "Unnecessary unit in sequence '();'. Consider removing"; + self.add_diag(diag!( + StyleCodes::UnnecessaryUnit.diag_info(), + (stmt.loc, msg), + )); + } + } + } + } + false + }, + fn visit_exp_custom(&mut self, e: &T::Exp) -> bool { + use UnannotatedExp_ as TE; + let TE::IfElse(e_cond, e_true, e_false_opt) = &e.exp.value else { + return false; + }; + if is_unit(self, e_true) { + let u_msg = "Unnecessary unit '()'"; + let if_msg = "Consider negating the 'if' condition and simplifying"; + let mut diag = diag!( + StyleCodes::UnnecessaryUnit.diag_info(), + (e_true.exp.loc, u_msg), + (e_cond.exp.loc, if_msg), + ); + diag.add_note("For example 'if (cond) () else e' can be simplified to 'if (!cond) e'"); + self.add_diag(diag); + } + if let Some(e_false) = e_false_opt { + if is_unit(self, e_false) { + let u_msg = "Unnecessary 'else ()'."; + let if_msg = "An 'if' without an 'else' has an implicit 'else ()'. \ + Consider removing the 'else' branch"; + let mut diag = diag!( + StyleCodes::UnnecessaryUnit.diag_info(), + (e_false.exp.loc, u_msg), + (e.exp.loc, if_msg), + ); + diag.add_note( + "For example 'if (cond) e else ()' can be simplified to 'if (cond) e'", + ); + self.add_diag(diag); + } + } + false + } +); + +fn is_unit_seq(context: &mut Context, s: &T::SequenceItem) -> bool { + match &s.value { + SequenceItem_::Seq(e) => is_unit(context, e), + SequenceItem_::Declare(_) | SequenceItem_::Bind(_, _, _) => false, + } +} + +fn is_unit(context: &mut Context, e: &T::Exp) -> bool { + use UnannotatedExp_ as TE; + match &e.exp.value { + TE::Unit { .. } => true, + TE::Annotate(inner, _) => is_unit(context, inner), + TE::Block((_, seq)) if seq.is_empty() => { + context.add_diag(ice!((e.exp.loc, "Unexpected empty block without a value"))); + false + } + TE::Block((_, seq)) if seq.len() == 1 => is_unit_seq(context, &seq[0]), + _ => false, + } +} diff --git a/external-crates/move/crates/move-compiler/src/linters/unnecessary_while_loop.rs b/external-crates/move/crates/move-compiler/src/linters/unnecessary_while_loop.rs index b13449cd9f15b..ce5a0809a2ca9 100644 --- a/external-crates/move/crates/move-compiler/src/linters/unnecessary_while_loop.rs +++ b/external-crates/move/crates/move-compiler/src/linters/unnecessary_while_loop.rs @@ -3,38 +3,16 @@ //! Aims to enhance code readability and adherence to Rust idioms. use crate::{ diag, - diagnostics::WarningFilters, expansion::ast::Value_, linters::StyleCodes, - shared::CompilationEnv, typing::{ ast::{self as T, UnannotatedExp_}, - visitor::{TypingVisitorConstructor, TypingVisitorContext}, + visitor::simple_visitor, }, }; -pub struct WhileTrueToLoop; - -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} - -impl TypingVisitorConstructor for WhileTrueToLoop { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl TypingVisitorContext for Context<'_> { - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } - +simple_visitor!( + WhileTrueToLoop, fn visit_exp_custom(&mut self, exp: &T::Exp) -> bool { let UnannotatedExp_::While(_, cond, _) = &exp.exp.value else { return false; @@ -49,8 +27,8 @@ impl TypingVisitorContext for Context<'_> { "A 'loop' is more useful in these cases. Unlike 'while', 'loop' can have a \ 'break' with a value, e.g. 'let x = loop { break 42 };'", ); - self.env.add_diag(diag); + self.add_diag(diag); false } -} +); diff --git a/external-crates/move/crates/move-compiler/src/linters/unneeded_return.rs b/external-crates/move/crates/move-compiler/src/linters/unneeded_return.rs index 5e974615629b5..2a62c7bf94e28 100644 --- a/external-crates/move/crates/move-compiler/src/linters/unneeded_return.rs +++ b/external-crates/move/crates/move-compiler/src/linters/unneeded_return.rs @@ -6,15 +6,10 @@ use crate::{ diag, - diagnostics::WarningFilters, expansion::ast::ModuleIdent, linters::StyleCodes, parser::ast::FunctionName, - shared::CompilationEnv, - typing::{ - ast as T, - visitor::{TypingVisitorConstructor, TypingVisitorContext}, - }, + typing::{ast as T, visitor::simple_visitor}, }; use move_ir_types::location::Loc; @@ -22,21 +17,8 @@ use move_proc_macros::growing_stack; use std::collections::VecDeque; -pub struct UnneededReturnVisitor; - -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} - -impl TypingVisitorConstructor for UnneededReturnVisitor { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl TypingVisitorContext for Context<'_> { +simple_visitor!( + UnneededReturnVisitor, fn visit_function_custom( &mut self, _module: ModuleIdent, @@ -48,15 +30,7 @@ impl TypingVisitorContext for Context<'_> { }; true } - - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } -} +); /// Recur down the tail (last) position of the sequence, looking for returns that /// might occur in the function's taul/return position.. @@ -75,9 +49,11 @@ fn tail_block(context: &mut Context, seq: &VecDeque) { #[growing_stack] fn tail(context: &mut Context, exp: &T::Exp) { match &exp.exp.value { - T::UnannotatedExp_::IfElse(_, conseq, alt) => { + T::UnannotatedExp_::IfElse(_, conseq, alt_opt) => { tail(context, conseq); - tail(context, alt); + if let Some(alt) = alt_opt { + tail(context, alt); + } } T::UnannotatedExp_::Match(_, arms) => { for arm in &arms.value { @@ -209,7 +185,7 @@ fn returnable_value(context: &mut Context, exp: &T::Exp) -> bool { } fn report_unneeded_return(context: &mut Context, loc: Loc) { - context.env.add_diag(diag!( + context.add_diag(diag!( StyleCodes::UnneededReturn.diag_info(), ( loc, diff --git a/external-crates/move/crates/move-compiler/src/naming/ast.rs b/external-crates/move/crates/move-compiler/src/naming/ast.rs index 52ef5c5aae880..aca723dc12584 100644 --- a/external-crates/move/crates/move-compiler/src/naming/ast.rs +++ b/external-crates/move/crates/move-compiler/src/naming/ast.rs @@ -3,7 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - diagnostics::WarningFilters, + diagnostics::warning_filters::WarningFilters, expansion::ast::{ ability_constraints_ast_debug, ability_modifiers_ast_debug, AbilitySet, Attributes, DottedUsage, Fields, Friend, ImplicitUseFunCandidate, ModuleIdent, Mutability, TargetKind, @@ -426,7 +426,7 @@ pub enum Exp_ { Builtin(BuiltinFunction, Spanned>), Vector(Loc, Option, Spanned>), - IfElse(Box, Box, Box), + IfElse(Box, Box, Option>), Match(Box, Spanned>), While(BlockLabel, Box, Box), Loop(BlockLabel, Box), @@ -1704,13 +1704,15 @@ impl AstDebug for Exp_ { }); w.write("}"); } - E::IfElse(b, t, f) => { + E::IfElse(b, t, f_opt) => { w.write("if ("); b.ast_debug(w); w.write(") "); t.ast_debug(w); - w.write(" else "); - f.ast_debug(w); + if let Some(f) = f_opt { + w.write(" else "); + f.ast_debug(w); + } } E::Match(subject, arms) => { w.write("match ("); diff --git a/external-crates/move/crates/move-compiler/src/naming/fake_natives.rs b/external-crates/move/crates/move-compiler/src/naming/fake_natives.rs index 814174cb72e6b..56574e6e7408b 100644 --- a/external-crates/move/crates/move-compiler/src/naming/fake_natives.rs +++ b/external-crates/move/crates/move-compiler/src/naming/fake_natives.rs @@ -19,7 +19,7 @@ use move_symbol_pool::symbol; /// verify fake native attribute usage usage pub fn function( - env: &mut CompilationEnv, + env: &CompilationEnv, module: ModuleIdent, function_name: FunctionName, function: &N::Function, @@ -45,7 +45,7 @@ pub fn function( (loc, attr_msg), (function_name.loc(), name_msg), ); - env.add_diag(diag); + env.add_error_diag(diag); } match &function.body.value { N::FunctionBody_::Native => (), @@ -55,7 +55,7 @@ pub fn function( NativeAttribute::BYTECODE_INSTRUCTION ); let diag = diag!(Attributes::InvalidBytecodeInst, (loc, attr_msg)); - env.add_diag(diag); + env.add_error_diag(diag); } } } diff --git a/external-crates/move/crates/move-compiler/src/naming/resolve_use_funs.rs b/external-crates/move/crates/move-compiler/src/naming/resolve_use_funs.rs index 2248154e6fcdf..5262a67858ec5 100644 --- a/external-crates/move/crates/move-compiler/src/naming/resolve_use_funs.rs +++ b/external-crates/move/crates/move-compiler/src/naming/resolve_use_funs.rs @@ -1,6 +1,8 @@ // Copyright (c) The Move Contributors // SPDX-License-Identifier: Apache-2.0 +use crate::diagnostics::warning_filters::{WarningFilters, WarningFiltersScope}; +use crate::diagnostics::{Diagnostic, Diagnostics}; use crate::expansion::ast::{self as E, ModuleIdent}; use crate::naming::ast as N; use crate::parser::ast::{FunctionName, Visibility}; @@ -15,30 +17,50 @@ use move_proc_macros::growing_stack; //************************************************************************************************** struct Context<'env, 'info> { - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, info: &'info NamingProgramInfo, + warning_filters_scope: WarningFiltersScope, current_module: ModuleIdent, } impl<'env, 'info> Context<'env, 'info> { fn new( - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, info: &'info NamingProgramInfo, current_module: ModuleIdent, ) -> Self { + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); Self { env, info, + warning_filters_scope, current_module, } } + + pub fn add_diag(&self, diag: Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + #[allow(unused)] + pub fn add_diags(&self, diags: Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } + + pub fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) + } + + pub fn pop_warning_filter_scope(&mut self) { + self.warning_filters_scope.pop() + } } //************************************************************************************************** // Entry //************************************************************************************************** -pub fn program(env: &mut CompilationEnv, info: &mut NamingProgramInfo, inner: &mut N::Program_) { +pub fn program(env: &CompilationEnv, info: &mut NamingProgramInfo, inner: &mut N::Program_) { let N::Program_ { modules } = inner; for (mident, mdef) in modules.key_cloned_iter_mut() { module(env, info, mident, mdef); @@ -59,15 +81,13 @@ pub fn program(env: &mut CompilationEnv, info: &mut NamingProgramInfo, inner: &m } fn module( - env: &mut CompilationEnv, + env: &CompilationEnv, info: &mut NamingProgramInfo, mident: ModuleIdent, mdef: &mut N::ModuleDefinition, ) { let context = &mut Context::new(env, info, mident); - context - .env - .add_warning_filter_scope(mdef.warning_filter.clone()); + context.push_warning_filter_scope(mdef.warning_filter.clone()); use_funs(context, &mut mdef.use_funs); for (_, _, c) in &mut mdef.constants { constant(context, c); @@ -75,25 +95,21 @@ fn module( for (_, _, f) in &mut mdef.functions { function(context, f); } - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); } fn constant(context: &mut Context, c: &mut N::Constant) { - context - .env - .add_warning_filter_scope(c.warning_filter.clone()); + context.push_warning_filter_scope(c.warning_filter.clone()); exp(context, &mut c.value); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); } fn function(context: &mut Context, function: &mut N::Function) { - context - .env - .add_warning_filter_scope(function.warning_filter.clone()); + context.push_warning_filter_scope(function.warning_filter.clone()); if let N::FunctionBody_::Defined(seq) = &mut function.body.value { sequence(context, seq) } - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); } //************************************************************************************************** @@ -128,7 +144,7 @@ fn use_funs(context: &mut Context, uf: &mut N::UseFuns) { if let Some(public_loc) = nuf.is_public { let defining_module = match &tn.value { N::TypeName_::Multiple(_) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( tn.loc, "ICE tuple type should not be reachable from use fun" ))); @@ -155,7 +171,7 @@ fn use_funs(context: &mut Context, uf: &mut N::UseFuns) { format!("The type '{tn}' is defined here"), )) } - context.env.add_diag(diag); + context.add_diag(diag); nuf.is_public = None; } } @@ -173,7 +189,7 @@ fn use_funs(context: &mut Context, uf: &mut N::UseFuns) { } None => format!("But '{m}::{f}' takes no arguments"), }; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidUseFun, (loc, msg), (first_ty_loc, first_tn_msg), @@ -199,9 +215,7 @@ fn use_funs(context: &mut Context, uf: &mut N::UseFuns) { let Some((target_f, tn)) = is_valid_method(context, &target_m, target_f) else { if matches!(ekind, E::ImplicitUseFunKind::UseAlias { used: false }) { let msg = format!("Unused 'use' of alias '{}'. Consider removing it", method); - context - .env - .add_diag(diag!(UnusedItem::Alias, (method.loc, msg),)) + context.add_diag(diag!(UnusedItem::Alias, (method.loc, msg),)) } continue; }; @@ -238,7 +252,7 @@ fn use_funs(context: &mut Context, uf: &mut N::UseFuns) { argument is a type defined in the same module" } }; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (nuf_loc, msg), (prev, "Previously declared here"), @@ -335,10 +349,12 @@ fn exp(context: &mut Context, sp!(_, e_): &mut N::Exp) { use_fun_color: _, body: e, }) => exp(context, e), - N::Exp_::IfElse(econd, et, ef) => { + N::Exp_::IfElse(econd, et, ef_opt) => { exp(context, econd); exp(context, et); - exp(context, ef); + if let Some(ef) = ef_opt { + exp(context, ef); + } } N::Exp_::Match(esubject, arms) => { exp(context, esubject); diff --git a/external-crates/move/crates/move-compiler/src/naming/syntax_methods.rs b/external-crates/move/crates/move-compiler/src/naming/syntax_methods.rs index d5c19587d6af8..38a46961b5625 100644 --- a/external-crates/move/crates/move-compiler/src/naming/syntax_methods.rs +++ b/external-crates/move/crates/move-compiler/src/naming/syntax_methods.rs @@ -68,7 +68,7 @@ pub(super) fn resolve_syntax_attributes( if let Some(macro_loc) = function.macro_ { let msg = "Syntax attributes may not appear on macro definitions"; let fn_msg = "This function is a macro"; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidSyntaxMethod, (attr_loc, msg), (macro_loc, fn_msg) @@ -125,7 +125,7 @@ fn prev_syntax_defn_error( kind_string, type_name ); let prev_msg = "This syntax method was previously defined here."; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidAttribute, (sloc, msg), (prev.loc, prev_msg) @@ -147,7 +147,7 @@ fn attr_param_from_str(loc: Loc, name_str: &str) -> Option /// Resolve the mapping for a function + syntax attribute into a SyntaxMethodKind. fn resolve_syntax_method_prekind( - env: &mut CompilationEnv, + env: &CompilationEnv, sp!(loc, attr_): &Attribute, ) -> Option> { match attr_ { @@ -157,7 +157,7 @@ fn resolve_syntax_method_prekind( SyntaxAttribute::SYNTAX, SyntaxAttribute::INDEX ); - env.add_diag(diag!(Declarations::InvalidAttribute, (*loc, msg))); + env.add_error_diag(diag!(Declarations::InvalidAttribute, (*loc, msg))); None } Attribute_::Parameterized(_, inner) => { @@ -169,7 +169,7 @@ fn resolve_syntax_method_prekind( if let Some(prev_kind) = kinds.replace(kind) { let msg = "Repeated syntax method identifier".to_string(); let prev = "Initially defined here".to_string(); - env.add_diag(diag!( + env.add_error_diag(diag!( Declarations::InvalidAttribute, (loc, msg), (prev_kind.loc, prev) @@ -177,7 +177,7 @@ fn resolve_syntax_method_prekind( } } else { let msg = format!("Invalid syntax method identifier '{}'", name); - env.add_diag(diag!(Declarations::InvalidAttribute, (loc, msg))); + env.add_error_diag(diag!(Declarations::InvalidAttribute, (loc, msg))); } } Attribute_::Assigned(n, _) => { @@ -186,7 +186,7 @@ fn resolve_syntax_method_prekind( SyntaxAttribute::SYNTAX, n ); - env.add_diag(diag!(Declarations::InvalidAttribute, (loc, msg))); + env.add_error_diag(diag!(Declarations::InvalidAttribute, (loc, msg))); } Attribute_::Parameterized(n, _) => { let msg = format!( @@ -194,7 +194,7 @@ fn resolve_syntax_method_prekind( SyntaxAttribute::SYNTAX, n ); - env.add_diag(diag!(Declarations::InvalidAttribute, (loc, msg))); + env.add_error_diag(diag!(Declarations::InvalidAttribute, (loc, msg))); } } } @@ -221,7 +221,7 @@ fn determine_valid_kind( SyntaxAttribute::INDEX, ); let ty_msg = "This type is not a reference"; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidAttribute, (sloc, msg), (subject_type.loc, ty_msg) @@ -231,9 +231,7 @@ fn determine_valid_kind( } SyntaxMethodPrekind_::For => { let msg = "'for' syntax attributes are not currently supported"; - context - .env - .add_diag(diag!(Declarations::InvalidAttribute, (sloc, msg),)); + context.add_diag(diag!(Declarations::InvalidAttribute, (sloc, msg),)); return None; } // SyntaxMethodPrekind_::For => match mut_opt { @@ -243,9 +241,7 @@ fn determine_valid_kind( // }, SyntaxMethodPrekind_::Assign => { let msg = "'assign' syntax attributes are not currently supported"; - context - .env - .add_diag(diag!(Declarations::InvalidAttribute, (sloc, msg),)); + context.add_diag(diag!(Declarations::InvalidAttribute, (sloc, msg),)); return None; } // SyntaxMethodPrekind_::Assign => match mut_opt { // Some((loc, true)) => SK::Assign, @@ -255,7 +251,7 @@ fn determine_valid_kind( // SyntaxAttribute::INDEX, // ); // let ty_msg = "This type is not a reference"; - // context.env.add_diag(diag!( + // context.add_diag(diag!( // Declarations::InvalidAttribute, // (sloc, msg), // (*ty_loc, msg) @@ -287,7 +283,7 @@ fn determine_subject_type_name( let msg = "Invalid type for syntax method definition"; let mut diag = diag!(Declarations::InvalidSyntaxMethod, (*loc, msg)); diag.add_note("Syntax methods may only be defined for single base types"); - context.env.add_diag(diag); + context.add_diag(diag); return None; } N::TypeName_::Builtin(sp!(_, bt_)) => context.env.primitive_definer(*bt_), @@ -296,7 +292,7 @@ fn determine_subject_type_name( if Some(cur_module) == defining_module { Some(type_name.clone()) } else { - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidSyntaxMethod, (*ann_loc, INVALID_MODULE_MSG), (*loc, INVALID_MODULE_TYPE_MSG) @@ -314,7 +310,7 @@ fn determine_subject_type_name( "But '{}' was declared as a type parameter here", param.user_specified_name ); - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidSyntaxMethod, (*ann_loc, msg), (*loc, tmsg) @@ -329,7 +325,7 @@ fn determine_subject_type_name( let msg = "Invalid type for syntax method definition"; let mut diag = diag!(Declarations::InvalidSyntaxMethod, (*loc, msg)); diag.add_note("Syntax methods may only be defined for single base types"); - context.env.add_diag(diag); + context.add_diag(diag); None } } @@ -349,7 +345,7 @@ fn valid_return_type( let msg = format!("Invalid {} annotation", SyntaxAttribute::SYNTAX); let tmsg = "This syntax method must return an immutable reference to match its subject type"; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidSyntaxMethod, (*loc, msg), (ty.loc, tmsg), @@ -362,7 +358,7 @@ fn valid_return_type( SyntaxAttribute::SYNTAX ); let tmsg = "This is not an immutable reference"; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidSyntaxMethod, (*loc, msg), (ty.loc, tmsg), @@ -379,7 +375,7 @@ fn valid_return_type( let msg = format!("Invalid {} annotation", SyntaxAttribute::SYNTAX); let tmsg = "This syntax method must return a mutable reference to match its subject type"; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidSyntaxMethod, (*loc, msg), (ty.loc, tmsg), @@ -392,7 +388,7 @@ fn valid_return_type( SyntaxAttribute::SYNTAX ); let tmsg = "This is not a mutable reference"; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidSyntaxMethod, (*loc, msg), (ty.loc, tmsg), @@ -426,7 +422,7 @@ fn valid_index_return_type( SyntaxAttribute::SYNTAX ); let tmsg = "Unit type occurs as the return type for this function"; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidSyntaxMethod, (*kind_loc, msg), (*tloc, tmsg) @@ -439,7 +435,7 @@ fn valid_index_return_type( SyntaxAttribute::SYNTAX ); let tmsg = "But a function type appears in this return type"; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidSyntaxMethod, (*kind_loc, msg), (*tloc, tmsg) @@ -466,9 +462,7 @@ fn get_first_type( "Invalid attribute. {} is only valid if the function takes at least one parameter", SyntaxAttribute::SYNTAX ); - context - .env - .add_diag(diag!(Declarations::InvalidAttribute, (*attr_loc, msg))); + context.add_diag(diag!(Declarations::InvalidAttribute, (*attr_loc, msg))); None } } diff --git a/external-crates/move/crates/move-compiler/src/naming/translate.rs b/external-crates/move/crates/move-compiler/src/naming/translate.rs index c69876a26eb4a..a4c7f6e08b98f 100644 --- a/external-crates/move/crates/move-compiler/src/naming/translate.rs +++ b/external-crates/move/crates/move-compiler/src/naming/translate.rs @@ -7,7 +7,8 @@ use crate::{ diagnostics::{ self, codes::{self, *}, - Diagnostic, + warning_filters::{WarningFilters, WarningFiltersScope}, + Diagnostic, Diagnostics, }, editions::FeatureGate, expansion::{ @@ -24,7 +25,10 @@ use crate::{ self as P, ConstantName, DatatypeName, Field, FunctionName, VariantName, MACRO_MODIFIER, }, shared::{ - ide::EllipsisMatchEntries, program_info::NamingProgramInfo, unique_map::UniqueMap, *, + ide::{EllipsisMatchEntries, IDEAnnotation, IDEInfo}, + program_info::NamingProgramInfo, + unique_map::UniqueMap, + *, }, FullyCompiledProgram, }; @@ -519,13 +523,19 @@ pub fn build_member_map( // Context //************************************************************************************************** -pub(super) struct Context<'env> { - pub env: &'env mut CompilationEnv, - current_module: Option, +pub(super) struct OuterContext { /// Nothing should ever use this directly, and should instead go through /// `resolve_module_access` because it preserves source location information. module_members: ModuleMembers, + unscoped_types: BTreeMap, +} + +pub(super) struct Context<'outer, 'env> { + pub env: &'env CompilationEnv, + outer: &'outer OuterContext, unscoped_types: Vec>, + warning_filters_scope: WarningFiltersScope, + current_module: ModuleIdent, local_scopes: Vec>, local_count: BTreeMap, used_locals: BTreeSet, @@ -546,7 +556,7 @@ macro_rules! resolve_from_module_access { Some(other) => { let diag = make_invalid_module_member_kind_error($context, &$expected_kind, $loc, &other); - $context.env.add_diag(diag); + $context.add_diag(diag); None } None => { @@ -557,26 +567,43 @@ macro_rules! resolve_from_module_access { }}; } -impl<'env> Context<'env> { +impl OuterContext { fn new( - compilation_env: &'env mut CompilationEnv, + compilation_env: &CompilationEnv, pre_compiled_lib: Option>, prog: &E::Program, ) -> Self { use ResolvedType as RT; let module_members = build_member_map(compilation_env, pre_compiled_lib, prog); - let unscoped_types = vec![N::BuiltinTypeName_::all_names() + let unscoped_types = N::BuiltinTypeName_::all_names() .iter() .map(|s| { let b_ = RT::BuiltinType(N::BuiltinTypeName_::resolve(s.as_str()).unwrap()); (*s, b_) }) - .collect()]; + .collect(); Self { - env: compilation_env, - current_module: None, module_members, unscoped_types, + } + } +} + +impl<'outer, 'env> Context<'outer, 'env> { + fn new( + env: &'env CompilationEnv, + outer: &'outer OuterContext, + current_package: Option, + current_module: ModuleIdent, + ) -> Self { + let unscoped_types = vec![outer.unscoped_types.clone()]; + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); + Self { + env, + outer, + unscoped_types, + current_module, + warning_filters_scope, local_scopes: vec![], local_count: BTreeMap::new(), nominal_blocks: vec![], @@ -584,15 +611,42 @@ impl<'env> Context<'env> { used_locals: BTreeSet::new(), used_fun_tparams: BTreeSet::new(), translating_fun: false, - current_package: None, + current_package, } } + pub fn add_diag(&self, diag: Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + #[allow(unused)] + pub fn add_diags(&self, diags: Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } + + #[allow(unused)] + pub fn extend_ide_info(&self, info: IDEInfo) { + self.env.extend_ide_info(&self.warning_filters_scope, info); + } + + pub fn add_ide_annotation(&self, loc: Loc, info: IDEAnnotation) { + self.env + .add_ide_annotation(&self.warning_filters_scope, loc, info); + } + + pub fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) + } + + pub fn pop_warning_filter_scope(&mut self) { + self.warning_filters_scope.pop() + } + fn valid_module(&mut self, m: &ModuleIdent) -> bool { - let resolved = self.module_members.contains_key(m); + let resolved = self.outer.module_members.contains_key(m); if !resolved { let diag = make_unbound_module_error(self, m.loc, m); - self.env.add_diag(diag); + self.add_diag(diag); } resolved } @@ -606,14 +660,14 @@ impl<'env> Context<'env> { m: &ModuleIdent, n: &Name, ) -> Option { - let Some(members) = self.module_members.get(m) else { - self.env.add_diag(make_unbound_module_error(self, m.loc, m)); + let Some(members) = self.outer.module_members.get(m) else { + self.add_diag(make_unbound_module_error(self, m.loc, m)); return None; }; let result = members.get(&n.value); if result.is_none() { let diag = make_unbound_module_member_error(self, kind, loc, *m, n.value); - self.env.add_diag(diag); + self.env.add_diag(&self.warning_filters_scope, diag); } result.map(|inner| { let mut result = inner.clone(); @@ -690,7 +744,7 @@ impl<'env> Context<'env> { } EN::Name(n) => match self.resolve_unscoped_type(nloc, n) { ResolvedType::ModuleType(mut module_type) => { - module_type.set_name_info(self.current_module.unwrap(), nloc); + module_type.set_name_info(self.current_module, nloc); ResolvedType::ModuleType(module_type) } ty @ (ResolvedType::BuiltinType(_) @@ -725,7 +779,7 @@ impl<'env> Context<'env> { { None => { let diag = make_unbound_local_name_error(self, &ErrorKind::Type, loc, n); - self.env.add_diag(diag); + self.add_diag(diag); ResolvedType::Unbound } Some(rn) => rn.clone(), @@ -748,7 +802,7 @@ impl<'env> Context<'env> { Some(c @ ResolvedModuleMember::Constant(_)) => { let diag = make_invalid_module_member_kind_error(self, &EK::Function, mloc, &c); - self.env.add_diag(diag); + self.add_diag(diag); ResolvedCallSubject::Unbound } Some(e @ ResolvedModuleMember::Datatype(ResolvedDatatype::Enum(_))) => { @@ -758,7 +812,7 @@ impl<'env> Context<'env> { "Enums cannot be instantiated directly. \ Instead, you must instantiate a variant.", ); - self.env.add_diag(diag); + self.add_diag(diag); ResolvedCallSubject::Unbound } None => { @@ -776,7 +830,7 @@ impl<'env> Context<'env> { _ => { let diag = make_unbound_local_name_error(self, &EK::Function, n.loc, n.value); - self.env.add_diag(diag); + self.add_diag(diag); return ResolvedCallSubject::Unbound; } }; @@ -815,7 +869,7 @@ impl<'env> Context<'env> { ResolvedCallSubject::Constructor(Box::new(variant)) } Some(ResolvedConstructor::Struct(struct_)) => { - self.env.add_diag(diag!( + self.add_diag(diag!( NameResolution::NamePositionMismatch, (sloc, "Invalid constructor. Expected an enum".to_string()), ( @@ -862,7 +916,7 @@ impl<'env> Context<'env> { _ => { let diag = make_unbound_local_name_error(self, &ErrorKind::Function, n.loc, n); - self.env.add_diag(diag); + self.add_diag(diag); return ResolvedUseFunFunction::Unbound; } }; @@ -872,11 +926,11 @@ impl<'env> Context<'env> { } EA::Name(n) => { let diag = make_unbound_local_name_error(self, &ErrorKind::Function, n.loc, n); - self.env.add_diag(diag); + self.add_diag(diag); ResolvedUseFunFunction::Unbound } EA::Variant(_, _) => { - self.env.add_diag(ice!(( + self.add_diag(ice!(( mloc, "Tried to resolve variant '{}' as a function in current scope" ),)); @@ -924,7 +978,7 @@ impl<'env> Context<'env> { } else { format!("Invalid {}. Expected a struct name", verb) }; - self.env.add_diag(diag!( + self.add_diag(diag!( NameResolution::NamePositionMismatch, (ma.loc, msg), (rtloc, rtmsg) @@ -944,7 +998,7 @@ impl<'env> Context<'env> { "Invalid {verb}. Variant '{variant_name}' is not part of this enum", ); let decl_msg = format!("Enum '{}' is defined here", enum_type.name); - self.env.add_diag(diag!( + self.add_diag(diag!( NameResolution::UnboundVariant, (ma.loc, primary_msg), (enum_type.decl_loc, decl_msg), @@ -960,7 +1014,7 @@ impl<'env> Context<'env> { Some(ResolvedConstructor::Variant(Box::new(variant_info))) } (EN::Name(_) | EN::ModuleAccess(_, _), D::Enum(enum_type)) => { - self.env.add_diag(diag!( + self.add_diag(diag!( NameResolution::NamePositionMismatch, (ma.loc, format!("Invalid {verb}. Expected a struct")), ( @@ -971,7 +1025,7 @@ impl<'env> Context<'env> { None } (EN::Variant(sp!(sloc, _), _), D::Struct(stype)) => { - self.env.add_diag(diag!( + self.add_diag(diag!( NameResolution::NamePositionMismatch, (*sloc, format!("Invalid {verb}. Expected an enum")), (stype.decl_loc, format!("But '{}' is an struct", stype.name)) @@ -1003,7 +1057,7 @@ impl<'env> Context<'env> { } } E::ModuleAccess_::Name(name) => { - self.env.add_diag(diag!( + self.add_diag(diag!( NameResolution::UnboundUnscopedName, (mloc, format!("Unbound constant '{}'", name)), )); @@ -1065,7 +1119,7 @@ impl<'env> Context<'env> { } ResolvedModuleMember::Constant(_) => (), }; - self.env.add_diag(diag); + self.add_diag(diag); ResolvedTerm::Unbound } }, @@ -1098,13 +1152,12 @@ impl<'env> Context<'env> { fn resolve_pattern_term(&mut self, sp!(mloc, ma_): E::ModuleAccess) -> ResolvedPatternTerm { match ma_ { E::ModuleAccess_::Name(name) if !is_constant_name(&name.value) => { - self.env - .add_diag(ice!((mloc, "This should have become a binder"))); + self.add_diag(ice!((mloc, "This should have become a binder"))); ResolvedPatternTerm::Unbound } // If we have a name, try to resolve it in our module. E::ModuleAccess_::Name(name) => { - let mut mident = self.current_module.unwrap(); + let mut mident = self.current_module; mident.loc = mloc; let maccess = sp(mloc, E::ModuleAccess_::ModuleAccess(mident, name)); self.resolve_pattern_term(maccess) @@ -1181,7 +1234,7 @@ impl<'env> Context<'env> { match id_opt { None => { let msg = variable_msg(name); - self.env.add_diag(diag!(code, (loc, msg))); + self.add_diag(diag!(code, (loc, msg))); None } Some(id) => { @@ -1202,7 +1255,7 @@ impl<'env> Context<'env> { match id_opt { None => { let msg = format!("Failed to resolve pattern binder {}", name); - self.env.add_diag(ice!((loc, msg))); + self.add_diag(ice!((loc, msg))); None } Some(id) => { @@ -1242,8 +1295,7 @@ impl<'env> Context<'env> { "Invalid usage of '{usage}'. \ '{usage}' can only be used inside a loop body or lambda", ); - self.env - .add_diag(diag!(TypeSafety::InvalidLoopControl, (loc, msg))); + self.add_diag(diag!(TypeSafety::InvalidLoopControl, (loc, msg))); return None; }; if *name_type == NominalBlockType::LambdaLoopCapture { @@ -1281,7 +1333,7 @@ impl<'env> Context<'env> { }; diag.add_secondary_label((loop_label.label.loc, msg)); } - self.env.add_diag(diag); + self.add_diag(diag); return None; } Some(*label) @@ -1337,13 +1389,12 @@ impl<'env> Context<'env> { not 'continue'." } }); - self.env.add_diag(diag); + self.add_diag(diag); None } } else { let msg = format!("Invalid {usage}. Unbound label '{name}"); - self.env - .add_diag(diag!(NameResolution::UnboundLabel, (loc, msg))); + self.add_diag(diag!(NameResolution::UnboundLabel, (loc, msg))); None } } @@ -1549,7 +1600,7 @@ fn make_unbound_module_member_error( name: impl std::fmt::Display, ) -> Diagnostic { let expected = expected.as_ref().unwrap_or(&ErrorKind::ModuleMember); - let same_module = context.current_module == Some(mident); + let same_module = context.current_module == mident; let (prefix, postfix) = if same_module { ("", " in current scope".to_string()) } else { @@ -1572,7 +1623,7 @@ fn make_invalid_module_member_kind_error( actual: &ResolvedModuleMember, ) -> Diagnostic { let mident = actual.mident(); - let same_module = context.current_module == Some(mident); + let same_module = context.current_module == mident; let (prefix, postfix) = if same_module { ("", " in current scope".to_string()) } else { @@ -1604,13 +1655,13 @@ fn arity_string(arity: usize) -> &'static str { //************************************************************************************************** pub fn program( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, pre_compiled_lib: Option>, prog: E::Program, ) -> N::Program { - let mut context = Context::new(compilation_env, pre_compiled_lib.clone(), &prog); + let outer_context = OuterContext::new(compilation_env, pre_compiled_lib.clone(), &prog); let E::Program { modules: emodules } = prog; - let modules = modules(&mut context, emodules); + let modules = modules(compilation_env, &outer_context, emodules); let mut inner = N::Program_ { modules }; let mut info = NamingProgramInfo::new(pre_compiled_lib, &inner); super::resolve_use_funs::program(compilation_env, &mut info, &mut inner); @@ -1618,18 +1669,19 @@ pub fn program( } fn modules( - context: &mut Context, + env: &CompilationEnv, + outer: &OuterContext, modules: UniqueMap, ) -> UniqueMap { - modules.map(|ident, mdef| module(context, ident, mdef)) + modules.map(|ident, mdef| module(env, outer, ident, mdef)) } fn module( - context: &mut Context, + env: &CompilationEnv, + outer: &OuterContext, ident: ModuleIdent, mdef: E::ModuleDefinition, ) -> N::ModuleDefinition { - context.current_module = Some(ident); let E::ModuleDefinition { loc, warning_filter, @@ -1643,8 +1695,8 @@ fn module( functions: efunctions, constants: econstants, } = mdef; - context.current_package = package_name; - context.env.add_warning_filter_scope(warning_filter.clone()); + let context = &mut Context::new(env, outer, package_name, ident); + context.push_warning_filter_scope(warning_filter.clone()); let mut use_funs = use_funs(context, euse_funs); let mut syntax_methods = N::SyntaxMethods::new(); let friends = efriends.filter_map(|mident, f| friend(context, mident, f)); @@ -1699,8 +1751,7 @@ fn module( if has_macro { mark_all_use_funs_as_used(&mut use_funs); } - context.env.pop_warning_filter_scope(); - context.current_package = None; + context.pop_warning_filter_scope(); N::ModuleDefinition { loc, warning_filter, @@ -1736,7 +1787,7 @@ fn use_funs(context: &mut Context, eufs: E::UseFuns) -> N::UseFuns { let nuf_loc = nuf.loc; if let Err((_, prev)) = methods.add(method, nuf) { let msg = format!("Duplicate 'use fun' for '{}.{}'", tn, method); - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (nuf_loc, msg), (prev, "Previously declared here"), @@ -1774,9 +1825,7 @@ fn explicit_use_fun( } ResolvedUseFunFunction::Builtin(_) => { let msg = "Invalid 'use fun'. Cannot use a builtin function as a method"; - context - .env - .add_diag(diag!(Declarations::InvalidUseFun, (loc, msg))); + context.add_diag(diag!(Declarations::InvalidUseFun, (loc, msg))); None } ResolvedUseFunFunction::Unbound => { @@ -1809,7 +1858,7 @@ fn explicit_use_fun( ResolvedType::Hole => { let msg = "Invalid 'use fun'. Cannot associate a method with an inferred type"; let tmsg = "The '_' type is a placeholder for type inference"; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidUseFun, (loc, msg), (ty_loc, tmsg) @@ -1822,7 +1871,7 @@ fn explicit_use_fun( "But '{}' was declared as a type parameter here", tp.user_specified_name ); - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidUseFun, (loc, msg,), (tloc, tmsg) @@ -1855,7 +1904,7 @@ fn check_use_fun_scope( return true; }; let current_module = context.current_module; - let Err(def_loc_opt) = use_fun_module_defines(context, use_fun_loc, current_module, rtype) + let Err(def_loc_opt) = use_fun_module_defines(context, use_fun_loc, ¤t_module, rtype) else { return true; }; @@ -1874,22 +1923,19 @@ fn check_use_fun_scope( if let Some(def_loc) = def_loc_opt { diag.add_secondary_label((def_loc, "Type defined in another module here")); } - context.env.add_diag(diag); + context.add_diag(diag); false } fn use_fun_module_defines( context: &mut Context, use_fun_loc: &Loc, - specified: Option, + specified: &ModuleIdent, rtype: &ResolvedType, ) -> Result<(), Option> { match rtype { ResolvedType::ModuleType(mtype) => { - if specified - .as_ref() - .is_some_and(|mident| mident == &mtype.mident()) - { + if specified == &mtype.mident() { Ok(()) } else { Err(Some(mtype.decl_loc())) @@ -1897,11 +1943,10 @@ fn use_fun_module_defines( } ResolvedType::BuiltinType(b_) => { let definer_opt = context.env.primitive_definer(*b_); - match (definer_opt, &specified) { - (None, _) => Err(None), - (Some(d), None) => Err(Some(d.loc)), - (Some(d), Some(s)) => { - if d == s { + match definer_opt { + None => Err(None), + Some(d) => { + if d == specified { Ok(()) } else { Err(Some(d.loc)) @@ -1910,7 +1955,7 @@ fn use_fun_module_defines( } } ResolvedType::TParam(_, _) | ResolvedType::Hole | ResolvedType::Unbound => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( *use_fun_loc, "Tried to validate use fun for invalid type" ))); @@ -1943,20 +1988,20 @@ fn mark_all_use_funs_as_used(use_funs: &mut N::UseFuns) { //************************************************************************************************** fn friend(context: &mut Context, mident: ModuleIdent, friend: E::Friend) -> Option { - let current_mident = context.current_module.as_ref().unwrap(); + let current_mident = &context.current_module; if mident.value.address != current_mident.value.address { // NOTE: in alignment with the bytecode verifier, this constraint is a policy decision // rather than a technical requirement. The compiler, VM, and bytecode verifier DO NOT // rely on the assumption that friend modules must reside within the same account address. let msg = "Cannot declare modules out of the current address as a friend"; - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidFriendDeclaration, (friend.loc, "Invalid friend declaration"), (mident.loc, msg), )); None } else if &mident == current_mident { - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidFriendDeclaration, (friend.loc, "Invalid friend declaration"), (mident.loc, "Cannot declare the module itself as a friend"), @@ -1998,7 +2043,7 @@ fn function( assert!(context.nominal_block_id == 0); assert!(context.used_fun_tparams.is_empty()); assert!(context.used_locals.is_empty()); - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); context.local_scopes = vec![BTreeMap::new()]; context.local_count = BTreeMap::new(); context.translating_fun = true; @@ -2015,9 +2060,7 @@ fn function( if !context.used_fun_tparams.contains(&tparam.id) { let sp!(loc, n) = tparam.user_specified_name; let msg = format!("Unused type parameter '{}'.", n); - context - .env - .add_diag(diag!(UnusedItem::FunTypeParam, (loc, msg))) + context.add_diag(diag!(UnusedItem::FunTypeParam, (loc, msg))) } } } @@ -2042,7 +2085,7 @@ fn function( context.nominal_block_id = 0; context.used_fun_tparams = BTreeSet::new(); context.used_locals = BTreeSet::new(); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); context.translating_fun = false; f } @@ -2073,14 +2116,14 @@ fn function_signature( ); let mut diag = diag!(NameResolution::InvalidMacroParameter, (mutloc, msg)); diag.add_note(ASSIGN_SYNTAX_IDENTIFIER_NOTE); - context.env.add_diag(diag); + context.add_diag(diag); mut_ = Mutability::Imm; } } if let Err((param, prev_loc)) = declared.add(param, ()) { if !is_underscore { let msg = format!("Duplicate parameter with name '{}'", param); - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (param.loc(), msg), (prev_loc, "Previously declared here"), @@ -2129,10 +2172,10 @@ fn struct_def( type_parameters, fields, } = sdef; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let type_parameters = datatype_type_parameters(context, type_parameters); let fields = struct_fields(context, fields); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); N::StructDefinition { warning_filter, index, @@ -2187,10 +2230,10 @@ fn enum_def( type_parameters, variants, } = edef; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); let type_parameters = datatype_type_parameters(context, type_parameters); let variants = enum_variants(context, variants); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); N::EnumDefinition { warning_filter, index, @@ -2259,7 +2302,7 @@ fn constant(context: &mut Context, _name: ConstantName, econstant: E::Constant) assert!(context.local_scopes.is_empty()); assert!(context.local_count.is_empty()); assert!(context.used_locals.is_empty()); - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); context.local_scopes = vec![BTreeMap::new()]; let signature = type_(context, TypeAnnotation::ConstantSignature, esignature); let value = *exp(context, Box::new(evalue)); @@ -2267,7 +2310,7 @@ fn constant(context: &mut Context, _name: ConstantName, econstant: E::Constant) context.local_count = BTreeMap::new(); context.used_locals = BTreeSet::new(); context.nominal_block_id = 0; - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); N::Constant { warning_filter, index, @@ -2325,7 +2368,7 @@ fn type_parameter( context.bind_type(name.value, ResolvedType::TParam(loc, tp.clone())); if let Err((name, old_loc)) = unique_tparams.add(name, ()) { let msg = format!("Duplicate type parameter declared with name '{}'", name); - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::DuplicateItem, (loc, msg), (old_loc, "Type parameter previously defined here"), @@ -2392,7 +2435,7 @@ fn type_(context: &mut Context, case: TypeAnnotation, sp!(loc, ety_): E::Type) - if let TypeAnnotation::FunctionSignature = case { diag.add_note("Only 'macro' functions can use '_' in their signatures"); } - context.env.add_diag(diag); + context.add_diag(diag); NT::UnresolvedError } else { // replaced with a type variable during type instantiation @@ -2408,7 +2451,7 @@ fn type_(context: &mut Context, case: TypeAnnotation, sp!(loc, ety_): E::Type) - } RT::TParam(_, tp) => { if !tys.is_empty() { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::NamePositionMismatch, (loc, "Generic type parameters cannot take type arguments"), )); @@ -2483,7 +2526,7 @@ fn check_type_instantiation_arity String>( arity, args_len ); - context.env.add_diag(diag!(diag_code, (loc, msg))); + context.add_diag(diag!(diag_code, (loc, msg))); } while ty_args.len() > arity { @@ -2616,7 +2659,11 @@ fn exp(context: &mut Context, e: Box) -> Box { } } - EE::IfElse(eb, et, ef) => NE::IfElse(exp(context, eb), exp(context, et), exp(context, ef)), + EE::IfElse(eb, et, ef_opt) => NE::IfElse( + exp(context, eb), + exp(context, et), + ef_opt.map(|ef| exp(context, ef)), + ), // EE::Match(esubject, sp!(_aloc, arms)) if arms.is_empty() => { // exp(context, esubject); // for error effect // let msg = "Invalid 'match' form. 'match' must have at least one arm"; @@ -2720,7 +2767,19 @@ fn exp(context: &mut Context, e: Box) -> Box { NE::Mutate(nel, ner) } - EE::Abort(es) => NE::Abort(exp(context, es)), + EE::Abort(Some(es)) => NE::Abort(exp(context, es)), + EE::Abort(None) => { + context + .env + .check_feature(context.current_package, FeatureGate::CleverAssertions, eloc); + let abort_const_expr = sp( + eloc, + N::Exp_::ErrorConstant { + line_number_loc: eloc, + }, + ); + NE::Abort(Box::new(abort_const_expr)) + } EE::Return(Some(block_name), es) => { let out_rhs = exp(context, es); context @@ -2888,7 +2947,7 @@ fn exp(context: &mut Context, e: Box) -> Box { "ICE compiler should not have parsed this form as a specification" )); diag.add_note(format!("Compiler parsed: {}", debug_display!(e))); - context.env.add_diag(diag); + context.add_diag(diag); NE::UnresolvedError } }; @@ -2917,7 +2976,7 @@ fn dotted(context: &mut Context, edot: E::ExpDotted) -> Option { modified by path operations.\n\ Path operations include 'move', 'copy', '&', '&mut', and field references", ); - context.env.add_diag(diag); + context.add_diag(diag); N::ExpDotted_::Exp(Box::new(sp(ne.loc, N::Exp_::UnresolvedError))) } _ => N::ExpDotted_::Exp(ne), @@ -3004,7 +3063,7 @@ fn check_constructor_form( } else { diag.add_note(named_note!()); } - context.env.add_diag(diag); + context.add_diag(diag); } CF::Parens if stype.field_info.is_positional() => (), CF::Parens => { @@ -3014,7 +3073,7 @@ fn check_constructor_form( (loc, &msg), (stype.decl_loc, defn_loc_error(&name)), ); - context.env.add_diag(diag); + context.add_diag(diag); } CF::Braces if stype.field_info.is_positional() => { let msg = invalid_inst_msg!("struct", POSNL_UPCASE, POSNL); @@ -3023,7 +3082,7 @@ fn check_constructor_form( (loc, &msg), (stype.decl_loc, defn_loc_error(&name)), ); - context.env.add_diag(diag); + context.add_diag(diag); } CF::Braces => (), }, @@ -3049,7 +3108,7 @@ fn check_constructor_form( } else { diag.add_note(named_note!()); } - context.env.add_diag(diag); + context.add_diag(diag); } CF::Parens if vfields.is_empty() => { let msg = invalid_inst_msg!("variant", EMPTY_UPCASE, EMPTY); @@ -3059,7 +3118,7 @@ fn check_constructor_form( (vloc, defn_loc_error(&name)), ); diag.add_note(format!("Remove '()' arguments from this {position}")); - context.env.add_diag(diag); + context.add_diag(diag); } CF::Parens if vfields.is_positional() => (), CF::Parens => { @@ -3070,7 +3129,7 @@ fn check_constructor_form( (vloc, defn_loc_error(&name)), ); diag.add_note(named_note!()); - context.env.add_diag(diag); + context.add_diag(diag); } CF::Braces if vfields.is_empty() => { let msg = invalid_inst_msg!("variant", EMPTY_UPCASE, EMPTY); @@ -3080,7 +3139,7 @@ fn check_constructor_form( (vloc, defn_loc_error(&name)), ); diag.add_note(format!("Remove '{{ }}' arguments from this {position}")); - context.env.add_diag(diag); + context.add_diag(diag); } CF::Braces if vfields.is_positional() => { let msg = invalid_inst_msg!("variant", POSNL_UPCASE, POSNL); @@ -3090,7 +3149,7 @@ fn check_constructor_form( (vloc, defn_loc_error(&name)), ); diag.add_note(posnl_note!()); - context.env.add_diag(diag); + context.add_diag(diag); } CF::Braces => (), } @@ -3209,7 +3268,7 @@ fn unique_pattern_binders( diag.add_secondary_label((*loc, "and repeated here")); } diag.add_note("A pattern variable must be unique, and must appear once in each or-pattern alternative."); - context.env.add_diag(diag); + context.add_diag(diag); } enum OrPosn { @@ -3227,7 +3286,7 @@ fn unique_pattern_binders( let mut diag = diag!(NameResolution::InvalidPattern, (var.loc(), primary_msg)); diag.add_secondary_label((other_loc, secondary_msg)); diag.add_note("Both sides of an or-pattern must bind the same variables."); - context.env.add_diag(diag); + context.add_diag(diag); } fn report_mismatched_or_mutability( @@ -3248,7 +3307,7 @@ fn unique_pattern_binders( diag.add_note( "Both sides of an or-pattern must bind the same variables with the same mutability.", ); - context.env.add_diag(diag); + context.add_diag(diag); } type Bindings = BTreeMap>; @@ -3410,7 +3469,7 @@ fn expand_positional_ellipsis( let entries = (0..=missing).map(|_| "_".into()).collect::>(); let info = EllipsisMatchEntries::Positional(entries); let info = ide::IDEAnnotation::EllipsisMatchEntries(Box::new(info)); - context.env.add_ide_annotation(eloc, info); + context.add_ide_annotation(eloc, info); } result } @@ -3447,7 +3506,7 @@ fn expand_named_ellipsis( let entries = fields.iter().map(|field| field.value()).collect::>(); let info = EllipsisMatchEntries::Named(entries); let info = ide::IDEAnnotation::EllipsisMatchEntries(Box::new(info)); - context.env.add_ide_annotation(ellipsis_loc, info); + context.add_ide_annotation(ellipsis_loc, info); } let start_idx = args.len(); @@ -3549,7 +3608,7 @@ fn match_pattern(context: &mut Context, in_pat: Box) -> Box { if etys_opt.is_some() { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::TooManyTypeArguments, (ploc, "Constants in patterns do not take type arguments") )); @@ -3659,9 +3718,7 @@ fn lvalue( ((var.loc, msg), (prev_loc, "Previously assigned here")) } }; - context - .env - .add_diag(diag!(Declarations::DuplicateItem, primary, secondary)); + context.add_diag(diag!(Declarations::DuplicateItem, primary, secondary)); } if v.is_syntax_identifier() { debug_assert!( @@ -3675,7 +3732,7 @@ fn lvalue( ); let mut diag = diag!(TypeSafety::CannotExpandMacro, (loc, msg)); diag.add_note(ASSIGN_SYNTAX_IDENTIFIER_NOTE); - context.env.add_diag(diag); + context.add_diag(diag); return None; } let nv = match case { @@ -3721,7 +3778,7 @@ fn lvalue( stype } Some(ResolvedConstructor::Variant(variant)) => { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::NamePositionMismatch, (tn.loc, format!("Invalid {}. Expected a struct", msg)), ( @@ -3791,7 +3848,7 @@ fn lvalue( "ICE compiler should not have parsed this form as a specification" )); diag.add_note(format!("Compiler parsed: {}", debug_display!(e))); - context.env.add_diag(diag); + context.add_diag(diag); NL::Ignore } }; @@ -3804,9 +3861,7 @@ fn check_mut_underscore(context: &mut Context, mut_: Option) { return; }; let msg = "Invalid 'mut' declaration. 'mut' is applied to variables and cannot be applied to the '_' pattern"; - context - .env - .add_diag(diag!(NameResolution::InvalidMut, (loc, msg))); + context.add_diag(diag!(NameResolution::InvalidMut, (loc, msg))); } fn bind_list(context: &mut Context, ls: E::LValueList) -> Option { @@ -3903,9 +3958,7 @@ fn resolve_call( match tyargs_opt.as_deref() { Some([ty]) => B::Freeze(Some(ty.clone())), Some(_tys) => { - context - .env - .add_diag(ice!((call_loc, "Builtin tyarg arity failure"))); + context.add_diag(ice!((call_loc, "Builtin tyarg arity failure"))); return N::Exp_::UnresolvedError; } None => B::Freeze(None), @@ -3926,7 +3979,7 @@ fn resolve_call( let mut diag = diag!(Uncategorized::DeprecatedWillBeRemoved, (call_loc, dep_msg),); diag.add_note(help_msg); - context.env.add_diag(diag); + context.add_diag(diag); } exp_types_opt_with_arity_check( context, @@ -4009,7 +4062,7 @@ fn resolve_call( check_is_not_macro(context, is_macro, &var.value.name); let tyargs_opt = types_opt(context, TypeAnnotation::Expression, in_tyargs_opt); if tyargs_opt.is_some() { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::TooManyTypeArguments, ( subject_loc, @@ -4030,7 +4083,7 @@ fn resolve_call( ); let mut diag = diag!(TypeSafety::InvalidCallTarget, (var.loc, msg)); diag.add_note(note); - context.env.add_diag(diag); + context.add_diag(diag); N::Exp_::UnresolvedError } else if var.value.id != 0 { let msg = format!( @@ -4038,9 +4091,7 @@ fn resolve_call( Only lambda-typed syntax parameters may be invoked", var.value.name ); - context - .env - .add_diag(diag!(TypeSafety::InvalidCallTarget, (var.loc, msg))); + context.add_diag(diag!(TypeSafety::InvalidCallTarget, (var.loc, msg))); N::Exp_::UnresolvedError } else { N::Exp_::VarCall(sp(subject_loc, var.value), args) @@ -4061,9 +4112,7 @@ fn check_is_not_macro(context: &mut Context, is_macro: Option, name: &str) macro", name ); - context - .env - .add_diag(diag!(TypeSafety::InvalidCallTarget, (mloc, msg))); + context.add_diag(diag!(TypeSafety::InvalidCallTarget, (mloc, msg))); } } @@ -4073,9 +4122,7 @@ fn report_invalid_macro(context: &mut Context, is_macro: Option, kind: &str "Unexpected macro invocation. {} cannot be invoked as macros", kind ); - context - .env - .add_diag(diag!(NameResolution::PositionalCallMismatch, (mloc, msg))); + context.add_diag(diag!(NameResolution::PositionalCallMismatch, (mloc, msg))); } } @@ -4100,7 +4147,7 @@ fn exp_types_opt_with_arity_check( }; let msg = fmsg(); let targs_msg = format!("Expected {} type argument(s) but got {}", arity, args_len); - context.env.add_diag(diag!( + context.add_diag(diag!( diag_code, (msg_loc, msg), (tyarg_error_loc, targs_msg) @@ -4223,10 +4270,12 @@ fn remove_unused_bindings_exp( | N::Exp_::Loop(_, e) | N::Exp_::Give(_, _, e) | N::Exp_::Annotate(e, _) => remove_unused_bindings_exp(context, used, e), - N::Exp_::IfElse(econd, et, ef) => { + N::Exp_::IfElse(econd, et, ef_opt) => { remove_unused_bindings_exp(context, used, econd); remove_unused_bindings_exp(context, used, et); - remove_unused_bindings_exp(context, used, ef); + if let Some(ef) = ef_opt { + remove_unused_bindings_exp(context, used, ef); + } } N::Exp_::Match(esubject, arms) => { remove_unused_bindings_exp(context, used, esubject); @@ -4372,7 +4421,5 @@ fn report_unused_local(context: &mut Context, sp!(loc, unused_): &N::Var) { let msg = format!( "Unused {kind} '{name}'. Consider removing or prefixing with an underscore: '_{name}'", ); - context - .env - .add_diag(diag!(UnusedItem::Variable, (*loc, msg))); + context.add_diag(diag!(UnusedItem::Variable, (*loc, msg))); } diff --git a/external-crates/move/crates/move-compiler/src/parser/ast.rs b/external-crates/move/crates/move-compiler/src/parser/ast.rs index ee3cc68d11b61..c04017cab5d6b 100644 --- a/external-crates/move/crates/move-compiler/src/parser/ast.rs +++ b/external-crates/move/crates/move-compiler/src/parser/ast.rs @@ -609,7 +609,7 @@ pub enum Exp_ { Assign(Box, Box), // abort e - Abort(Box), + Abort(Option>), // return e Return(Option, Option>), // break @@ -2079,8 +2079,11 @@ impl AstDebug for Exp_ { rhs.ast_debug(w); } E::Abort(e) => { - w.write("abort "); - e.ast_debug(w); + w.write("abort"); + if let Some(e) = e { + w.write(" "); + e.ast_debug(w); + } } E::Return(name, e) => { w.write("return"); diff --git a/external-crates/move/crates/move-compiler/src/parser/lexer.rs b/external-crates/move/crates/move-compiler/src/parser/lexer.rs index 610d978e733fb..18271da49805d 100644 --- a/external-crates/move/crates/move-compiler/src/parser/lexer.rs +++ b/external-crates/move/crates/move-compiler/src/parser/lexer.rs @@ -482,10 +482,7 @@ impl<'input> Lexer<'input> { // At the end of parsing, checks whether there are any unmatched documentation comments, // producing errors if so. Otherwise returns a map from file position to associated // documentation. - pub fn check_and_get_doc_comments( - &mut self, - env: &mut CompilationEnv, - ) -> MatchedFileCommentMap { + pub fn check_and_get_doc_comments(&mut self, env: &CompilationEnv) -> MatchedFileCommentMap { let msg = "Documentation comment cannot be matched to a language item"; let diags = self .doc_comments @@ -495,7 +492,8 @@ impl<'input> Lexer<'input> { diag!(Syntax::InvalidDocComment, (loc, msg)) }) .collect(); - env.add_diags(diags); + let warning_filters = env.top_level_warning_filter_scope(); + env.add_diags(warning_filters, diags); std::mem::take(&mut self.matched_doc_comments) } diff --git a/external-crates/move/crates/move-compiler/src/parser/mod.rs b/external-crates/move/crates/move-compiler/src/parser/mod.rs index 5b2e299d813e2..724e7e8012b9f 100644 --- a/external-crates/move/crates/move-compiler/src/parser/mod.rs +++ b/external-crates/move/crates/move-compiler/src/parser/mod.rs @@ -25,7 +25,7 @@ use vfs::VfsPath; /// Parses program's targets and dependencies, both of which are read from different virtual file /// systems (vfs and deps_out_vfs, respectively). pub(crate) fn parse_program( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, named_address_maps: NamedAddressMaps, mut targets: Vec, mut deps: Vec, @@ -113,7 +113,7 @@ fn ensure_targets_deps_dont_intersect( fn parse_file( path: &VfsPath, - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, files: &mut MappedFiles, package: Option, ) -> anyhow::Result<( @@ -126,8 +126,9 @@ fn parse_file( let file_hash = FileHash::new(&source_buffer); let fname = Symbol::from(path.as_str()); let source_str = Arc::from(source_buffer); + let warning_filters = compilation_env.top_level_warning_filter_scope(); if let Err(ds) = verify_string(file_hash, &source_str) { - compilation_env.add_diags(ds); + compilation_env.add_diags(warning_filters, ds); files.add(file_hash, fname, source_str); return Ok((vec![], MatchedFileCommentMap::new(), file_hash)); } @@ -135,7 +136,7 @@ fn parse_file( { Ok(defs_and_comments) => defs_and_comments, Err(ds) => { - compilation_env.add_diags(ds); + compilation_env.add_diags(warning_filters, ds); (vec![], MatchedFileCommentMap::new()) } }; diff --git a/external-crates/move/crates/move-compiler/src/parser/syntax.rs b/external-crates/move/crates/move-compiler/src/parser/syntax.rs index 7fdf9221d598a..a7d296fd19a01 100644 --- a/external-crates/move/crates/move-compiler/src/parser/syntax.rs +++ b/external-crates/move/crates/move-compiler/src/parser/syntax.rs @@ -22,14 +22,14 @@ use move_symbol_pool::{symbol, Symbol}; struct Context<'env, 'lexer, 'input> { current_package: Option, - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, tokens: &'lexer mut Lexer<'input>, stop_set: TokenSet, } impl<'env, 'lexer, 'input> Context<'env, 'lexer, 'input> { fn new( - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, tokens: &'lexer mut Lexer<'input>, package_name: Option, ) -> Self { @@ -71,8 +71,9 @@ impl<'env, 'lexer, 'input> Context<'env, 'lexer, 'input> { } } - fn add_diag(&mut self, diag: Diagnostic) { - self.env.add_diag(diag); + fn add_diag(&self, diag: Diagnostic) { + let warning_filters = self.env.top_level_warning_filter_scope(); + self.env.add_diag(warning_filters, diag); } } @@ -758,15 +759,13 @@ fn parse_name_access_chain_<'a, F: Fn() -> &'a str>( "Macro invocation are disallowed here. Expected {}", item_description() ); - context - .env - .add_diag(diag!(Syntax::InvalidName, (*loc, msg))); + context.add_diag(diag!(Syntax::InvalidName, (*loc, msg))); is_macro = None; } } if let Some(sp!(ty_loc, _)) = tys { if !tyargs_allowed { - context.env.add_diag(diag!( + context.add_diag(diag!( Syntax::InvalidName, ( ty_loc, @@ -845,7 +844,7 @@ fn parse_name_access_chain_<'a, F: Fn() -> &'a str>( parse_macro_opt_and_tyargs_opt(context, tyargs_whitespace_allowed, name.loc); if let Some(loc) = &is_macro { if !macros_allowed { - context.env.add_diag(diag!( + context.add_diag(diag!( Syntax::InvalidName, ( *loc, @@ -857,7 +856,7 @@ fn parse_name_access_chain_<'a, F: Fn() -> &'a str>( } if let Some(sp!(ty_loc, _)) = tys { if !tyargs_allowed { - context.env.add_diag(diag!( + context.add_diag(diag!( Syntax::InvalidName, ( ty_loc, @@ -870,7 +869,7 @@ fn parse_name_access_chain_<'a, F: Fn() -> &'a str>( path.push_path_entry(name, tys, is_macro) .into_iter() - .for_each(|diag| context.env.add_diag(diag)); + .for_each(|diag| context.add_diag(diag)); } Ok(NameAccessChain_::Path(path)) } @@ -1912,8 +1911,13 @@ fn parse_control_exp(context: &mut Context) -> Result<(Exp, bool), Box { context.tokens.advance()?; - let (e, ends_in_block) = parse_exp_or_sequence(context)?; - (Exp_::Abort(Box::new(e)), ends_in_block) + let (e, ends_in_block) = if !at_start_of_exp(context) { + (None, false) + } else { + let (e, ends_in_block) = parse_exp_or_sequence(context)?; + (Some(Box::new(e)), ends_in_block) + }; + (Exp_::Abort(e), ends_in_block) } Tok::Break => { context.tokens.advance()?; @@ -2124,7 +2128,7 @@ fn parse_match_pattern(context: &mut Context) -> Result { if context.at_stop_set() { - context - .env - .add_diag(*unexpected_token_error(context.tokens, "a type name")); + context.add_diag(*unexpected_token_error(context.tokens, "a type name")); Type_::UnresolvedError } else { let tn = if whitespace_sensitive_ty_args { @@ -3502,7 +3504,7 @@ fn check_enum_visibility(visibility: Option, context: &mut Context) let note = "Visibility annotations are required on enum declarations."; let mut err = diag!(Syntax::InvalidModifier, (loc, msg)); err.add_note(note); - context.env.add_diag(err); + context.add_diag(err); } } } @@ -3952,9 +3954,7 @@ fn parse_address_block( addr_name.loc.start() as usize, context.tokens.current_token_loc().end() as usize, ); - context - .env - .add_diag(diag!(Migration::AddressRemove, (loc, "address decl"))); + context.add_diag(diag!(Migration::AddressRemove, (loc, "address decl"))); } context.tokens.advance()?; let mut modules = vec![]; @@ -3969,7 +3969,7 @@ fn parse_address_block( let (module, next_mod_attributes) = parse_module(attributes, context)?; if in_migration_mode { - context.env.add_diag(diag!( + context.add_diag(diag!( Migration::AddressAdd, ( module.name.loc(), @@ -3989,7 +3989,7 @@ fn parse_address_block( } for module in &modules { if matches!(module.definition_mode, ModuleDefinitionMode::Semicolon) { - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidModule, ( module.name.loc(), @@ -4001,9 +4001,7 @@ fn parse_address_block( if in_migration_mode { let loc = context.tokens.current_token_loc(); - context - .env - .add_diag(diag!(Migration::AddressRemove, (loc, "close lbrace"))); + context.add_diag(diag!(Migration::AddressRemove, (loc, "close lbrace"))); } consume_token(context.tokens, context.tokens.peek())?; @@ -4023,7 +4021,7 @@ fn parse_address_block( format!("Replace with '{}::{}'", addr, module.name), )); } - context.env.add_diag(diag); + context.add_diag(diag); } Ok(AddressDefinition { @@ -4054,7 +4052,7 @@ fn parse_friend_decl( || "a friend declaration", )?; if friend.value.is_macro().is_some() || friend.value.has_tyargs() { - context.env.add_diag(diag!( + context.add_diag(diag!( Syntax::InvalidName, (friend.loc, "Invalid 'friend' name") )) @@ -4669,7 +4667,7 @@ fn parse_file_def( "Either move each 'module' label and definitions into its own file or \ define each as 'module { contents }'", ); - context.env.add_diag(diag); + context.add_diag(diag); } } defs.push(Definition::Module(module)); @@ -4692,7 +4690,7 @@ fn parse_file_def( /// result as either a pair of FileDefinition and doc comments or some Diagnostics. The `file` name /// is used to identify source locations in error messages. pub fn parse_file_string( - env: &mut CompilationEnv, + env: &CompilationEnv, file_hash: FileHash, input: &str, package: Option, diff --git a/external-crates/move/crates/move-compiler/src/parser/verification_attribute_filter.rs b/external-crates/move/crates/move-compiler/src/parser/verification_attribute_filter.rs index 46c94e75991dd..4e23c1a5003b8 100644 --- a/external-crates/move/crates/move-compiler/src/parser/verification_attribute_filter.rs +++ b/external-crates/move/crates/move-compiler/src/parser/verification_attribute_filter.rs @@ -15,13 +15,13 @@ use crate::{ }; struct Context<'env> { - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, is_source_def: bool, current_package: Option, } impl<'env> Context<'env> { - fn new(env: &'env mut CompilationEnv) -> Self { + fn new(env: &'env CompilationEnv) -> Self { Self { env, is_source_def: false, @@ -56,6 +56,7 @@ impl FilterContext for Context<'_> { // expansion // Ideally we would just have a warning filter scope here // (but again, need expansion for that) + let top_warning_filter_scope = self.env.top_level_warning_filter_scope(); let silence_warning = !self.is_source_def || self.env.package_config(self.current_package).is_dependency; if !silence_warning { @@ -64,8 +65,10 @@ impl FilterContext for Context<'_> { "The '{}' attribute has been deprecated along with specification blocks", VerificationAttribute::VERIFY_ONLY ); - self.env - .add_diag(diag!(Uncategorized::DeprecatedWillBeRemoved, (*loc, msg))); + self.env.add_diag( + top_warning_filter_scope, + diag!(Uncategorized::DeprecatedWillBeRemoved, (*loc, msg)), + ); } } should_remove @@ -79,7 +82,7 @@ impl FilterContext for Context<'_> { // This filters out all AST elements annotated with verify-only annotated from `prog` // if the `verify` flag in `compilation_env` is not set. If the `verify` flag is set, // no filtering is performed. -pub fn program(compilation_env: &mut CompilationEnv, prog: P::Program) -> P::Program { +pub fn program(compilation_env: &CompilationEnv, prog: P::Program) -> P::Program { let mut context = Context::new(compilation_env); filter_program(&mut context, prog) } diff --git a/external-crates/move/crates/move-compiler/src/shared/ide.rs b/external-crates/move/crates/move-compiler/src/shared/ide.rs index 6816278d34ed8..895f57da06985 100644 --- a/external-crates/move/crates/move-compiler/src/shared/ide.rs +++ b/external-crates/move/crates/move-compiler/src/shared/ide.rs @@ -16,7 +16,7 @@ use crate::{ unit_test::filter_test_members::UNIT_TEST_POISON_FUN_NAME, }; -use move_command_line_common::address::NumericalAddress; +use move_core_types::parsing::address::NumericalAddress; use move_ir_types::location::Loc; use move_symbol_pool::Symbol; diff --git a/external-crates/move/crates/move-compiler/src/shared/matching.rs b/external-crates/move/crates/move-compiler/src/shared/matching.rs index 26ca069a61b9b..9beb38564ab7b 100644 --- a/external-crates/move/crates/move-compiler/src/shared/matching.rs +++ b/external-crates/move/crates/move-compiler/src/shared/matching.rs @@ -67,7 +67,7 @@ pub struct ArmResult { /// A shared match context trait for use with counterexample generation in Typing and match /// compilation in HLIR lowering. pub trait MatchContext { - fn env(&mut self) -> &mut CompilationEnv; + fn env(&mut self) -> &CompilationEnv; fn env_ref(&self) -> &CompilationEnv; fn new_match_var(&mut self, name: String, loc: Loc) -> N::Var; fn program_info(&self) -> &ProgramInfo; @@ -481,7 +481,7 @@ impl PatternMatrix { // Make a match pattern that only holds guard binders let guard_binders = guard_binders.union_with(&const_binders, |k, _, x| { let msg = "Match compilation made a binder for this during const compilation"; - context.env().add_diag(ice!((k.loc, msg))); + context.env().add_error_diag(ice!((k.loc, msg))); *x }); let pat = apply_pattern_subst(pat, &guard_binders); diff --git a/external-crates/move/crates/move-compiler/src/shared/mod.rs b/external-crates/move/crates/move-compiler/src/shared/mod.rs index a242bcb7565ef..0c0147b8d08de 100644 --- a/external-crates/move/crates/move-compiler/src/shared/mod.rs +++ b/external-crates/move/crates/move-compiler/src/shared/mod.rs @@ -9,8 +9,12 @@ use crate::{ }, command_line as cli, diagnostics::{ - codes::{Category, Declarations, DiagnosticsID, Severity, WarningFilter}, - Diagnostic, Diagnostics, DiagnosticsFormat, WarningFilters, + codes::{DiagnosticsID, Severity}, + warning_filters::{ + FilterName, FilterPrefix, WarningFilter, WarningFilters, WarningFiltersScope, + FILTER_ALL, + }, + Diagnostic, Diagnostics, DiagnosticsFormat, }, editions::{check_feature_or_error, feature_edition_error_msg, Edition, FeatureGate, Flavor}, expansion::ast as E, @@ -33,14 +37,12 @@ use move_ir_types::location::*; use move_symbol_pool::Symbol; use petgraph::{algo::astar as petgraph_astar, graphmap::DiGraphMap}; use std::{ - cell::RefCell, collections::{BTreeMap, BTreeSet}, fmt, hash::Hash, - rc::Rc, sync::{ atomic::{AtomicUsize, Ordering as AtomicOrdering}, - Arc, + Arc, Mutex, OnceLock, RwLock, }, }; use vfs::{VfsError, VfsPath}; @@ -62,7 +64,7 @@ pub use ast_debug::AstDebug; // Numbers //************************************************************************************************** -pub use move_command_line_common::parser::{ +pub use move_core_types::parsing::parser::{ parse_address_number as parse_address, parse_u128, parse_u16, parse_u256, parse_u32, parse_u64, parse_u8, NumberFormat, }; @@ -71,7 +73,7 @@ pub use move_command_line_common::parser::{ // Address //************************************************************************************************** -pub use move_command_line_common::address::NumericalAddress; +pub use move_core_types::parsing::address::NumericalAddress; pub fn parse_named_address(s: &str) -> anyhow::Result<(String, NumericalAddress)> { let before_after = s.split('=').collect::>(); @@ -169,28 +171,6 @@ pub fn shortest_cycle<'a, T: Ord + Hash>( // Compilation Env //************************************************************************************************** -pub const FILTER_ALL: &str = "all"; -pub const FILTER_UNUSED: &str = "unused"; -pub const FILTER_MISSING_PHANTOM: &str = "missing_phantom"; -pub const FILTER_UNUSED_USE: &str = "unused_use"; -pub const FILTER_UNUSED_VARIABLE: &str = "unused_variable"; -pub const FILTER_UNUSED_ASSIGNMENT: &str = "unused_assignment"; -pub const FILTER_UNUSED_TRAILING_SEMI: &str = "unused_trailing_semi"; -pub const FILTER_UNUSED_ATTRIBUTE: &str = "unused_attribute"; -pub const FILTER_UNUSED_TYPE_PARAMETER: &str = "unused_type_parameter"; -pub const FILTER_UNUSED_FUNCTION: &str = "unused_function"; -pub const FILTER_UNUSED_STRUCT_FIELD: &str = "unused_field"; -pub const FILTER_UNUSED_CONST: &str = "unused_const"; -pub const FILTER_DEAD_CODE: &str = "dead_code"; -pub const FILTER_UNUSED_LET_MUT: &str = "unused_let_mut"; -pub const FILTER_UNUSED_MUT_REF: &str = "unused_mut_ref"; -pub const FILTER_UNUSED_MUT_PARAM: &str = "unused_mut_parameter"; -pub const FILTER_IMPLICIT_CONST_COPY: &str = "implicit_const_copy"; -pub const FILTER_DUPLICATE_ALIAS: &str = "duplicate_alias"; -pub const FILTER_DEPRECATED: &str = "deprecated_usage"; -pub const FILTER_IDE_PATH_AUTOCOMPLETE: &str = "ide_path_autocomplete"; -pub const FILTER_IDE_DOT_AUTOCOMPLETE: &str = "ide_dot_autocomplete"; - pub type NamedAddressMap = BTreeMap; #[derive(Clone, Copy, Debug, Eq, PartialEq)] @@ -232,17 +212,11 @@ pub struct PackagePaths = Symbol, NamedAddress: Into pub named_address_map: BTreeMap, } -/// None for the default 'allow'. -/// Some(prefix) for a custom set of warnings, e.g. 'allow(lint(_))'. -pub type FilterPrefix = Option; -pub type FilterName = Symbol; - pub struct CompilationEnv { flags: Flags, - // filters warnings when added. - warning_filter: Vec, - diags: Diagnostics, - visitors: Rc, + top_level_warning_filter_scope: &'static WarningFiltersScope, + diags: RwLock, + visitors: Visitors, package_configs: BTreeMap, /// Config for any package not found in `package_configs`, or for inputs without a package. default_config: PackageConfig, @@ -250,27 +224,12 @@ pub struct CompilationEnv { known_filters: BTreeMap>>, /// Maps a diagnostics ID to a known filter name. known_filter_names: BTreeMap, - prim_definers: - BTreeMap, + prim_definers: OnceLock>, // TODO(tzakian): Remove the global counter and use this counter instead // pub counter: u64, mapped_files: MappedFiles, save_hooks: Vec, - pub ide_information: IDEInfo, -} - -macro_rules! known_code_filter { - ($name:ident, $category:ident::$code:ident) => { - ( - Symbol::from($name), - BTreeSet::from([WarningFilter::Code { - prefix: None, - category: Category::$category as u8, - code: $category::$code as u8, - name: Some($name), - }]), - ) - }; + ide_information: RwLock, } impl CompilationEnv { @@ -278,66 +237,18 @@ impl CompilationEnv { flags: Flags, mut visitors: Vec, save_hooks: Vec, + warning_filters: Option, package_configs: BTreeMap, default_config: Option, ) -> Self { - use crate::diagnostics::codes::{TypeSafety, UnusedItem, IDE}; visitors.extend([ sui_mode::id_leak::IDLeakVerifier.visitor(), sui_mode::typing::SuiTypeChecks.visitor(), ]); - let mut known_filters_: BTreeMap> = BTreeMap::from([ - ( - FILTER_ALL.into(), - BTreeSet::from([WarningFilter::All(None)]), - ), - ( - FILTER_UNUSED.into(), - BTreeSet::from([WarningFilter::Category { - prefix: None, - category: Category::UnusedItem as u8, - name: Some(FILTER_UNUSED), - }]), - ), - known_code_filter!(FILTER_MISSING_PHANTOM, Declarations::InvalidNonPhantomUse), - known_code_filter!(FILTER_UNUSED_USE, UnusedItem::Alias), - known_code_filter!(FILTER_UNUSED_VARIABLE, UnusedItem::Variable), - known_code_filter!(FILTER_UNUSED_ASSIGNMENT, UnusedItem::Assignment), - known_code_filter!(FILTER_UNUSED_TRAILING_SEMI, UnusedItem::TrailingSemi), - known_code_filter!(FILTER_UNUSED_ATTRIBUTE, UnusedItem::Attribute), - known_code_filter!(FILTER_UNUSED_FUNCTION, UnusedItem::Function), - known_code_filter!(FILTER_UNUSED_STRUCT_FIELD, UnusedItem::StructField), - ( - FILTER_UNUSED_TYPE_PARAMETER.into(), - BTreeSet::from([ - WarningFilter::Code { - prefix: None, - category: Category::UnusedItem as u8, - code: UnusedItem::StructTypeParam as u8, - name: Some(FILTER_UNUSED_TYPE_PARAMETER), - }, - WarningFilter::Code { - prefix: None, - category: Category::UnusedItem as u8, - code: UnusedItem::FunTypeParam as u8, - name: Some(FILTER_UNUSED_TYPE_PARAMETER), - }, - ]), - ), - known_code_filter!(FILTER_UNUSED_CONST, UnusedItem::Constant), - known_code_filter!(FILTER_DEAD_CODE, UnusedItem::DeadCode), - known_code_filter!(FILTER_UNUSED_LET_MUT, UnusedItem::MutModifier), - known_code_filter!(FILTER_UNUSED_MUT_REF, UnusedItem::MutReference), - known_code_filter!(FILTER_UNUSED_MUT_PARAM, UnusedItem::MutParam), - known_code_filter!(FILTER_IMPLICIT_CONST_COPY, TypeSafety::ImplicitConstantCopy), - known_code_filter!(FILTER_DUPLICATE_ALIAS, Declarations::DuplicateAlias), - known_code_filter!(FILTER_DEPRECATED, TypeSafety::DeprecatedUsage), - ]); + let mut known_filters_: BTreeMap> = + WarningFilter::compiler_known_filters(); if flags.ide_mode() { - known_filters_.extend([ - known_code_filter!(FILTER_IDE_PATH_AUTOCOMPLETE, IDE::PathAutocomplete), - known_code_filter!(FILTER_IDE_DOT_AUTOCOMPLETE, IDE::DotAutocomplete), - ]); + known_filters_.extend(WarningFilter::ide_known_filters()); } let known_filters: BTreeMap>> = BTreeMap::from([(None, known_filters_)]); @@ -363,30 +274,32 @@ impl CompilationEnv { }) .collect(); - let warning_filter = if flags.silence_warnings() { + let top_level_warning_filter = if flags.silence_warnings() { let mut f = WarningFilters::new_for_source(); f.add(WarningFilter::All(None)); - vec![f] + Some(f) } else { - vec![] + warning_filters }; + let top_level_warning_filter_scope = + Box::leak(Box::new(WarningFiltersScope::new(top_level_warning_filter))); let mut diags = Diagnostics::new(); if flags.json_errors() { diags.set_format(DiagnosticsFormat::JSON); } Self { flags, - warning_filter, - diags, - visitors: Rc::new(Visitors::new(visitors)), + top_level_warning_filter_scope, + diags: RwLock::new(diags), + visitors: Visitors::new(visitors), package_configs, default_config: default_config.unwrap_or_default(), known_filters, known_filter_names, - prim_definers: BTreeMap::new(), + prim_definers: OnceLock::new(), mapped_files: MappedFiles::empty(), save_hooks, - ide_information: IDEInfo::new(), + ide_information: RwLock::new(IDEInfo::new()), } } @@ -403,10 +316,16 @@ impl CompilationEnv { &self.mapped_files } - pub fn add_diag(&mut self, mut diag: Diagnostic) { + pub fn top_level_warning_filter_scope(&self) -> &'static WarningFiltersScope { + self.top_level_warning_filter_scope + } + + pub fn add_diag(&self, warning_filters: &WarningFiltersScope, mut diag: Diagnostic) { if diag.info().severity() <= Severity::NonblockingError && self .diags + .read() + .unwrap() .any_syntax_error_with_primary_loc(diag.primary_loc()) { // do not report multiple diags for the same location (unless they are blocking) to @@ -417,7 +336,7 @@ impl CompilationEnv { return; } - if !self.is_filtered(&diag) { + if !warning_filters.is_filtered(&diag) { // add help to suppress warning, if applicable // TODO do we want a centralized place for tips like this? if diag.info().severity() == Severity::Warning { @@ -434,21 +353,34 @@ impl CompilationEnv { diag = diag.set_severity(Severity::NonblockingError) } } - self.diags.add(diag) - } else if !self.filter_for_dependency() { + self.diags.write().unwrap().add(diag) + } else if !warning_filters.is_filtered_for_dependency() { // unwrap above is safe as the filter has been used (thus it must exist) - self.diags.add_source_filtered(diag) + self.diags.write().unwrap().add_source_filtered(diag) } } - pub fn add_diags(&mut self, diags: Diagnostics) { + pub fn add_diags(&self, warning_filters: &WarningFiltersScope, diags: Diagnostics) { for diag in diags.into_vec() { - self.add_diag(diag) + self.add_diag(warning_filters, diag) + } + } + + /// Aborts if the diagnostic is a warning + pub fn add_error_diag(&self, diag: Diagnostic) { + assert!(diag.info().severity() > Severity::Warning); + self.add_diag(WarningFiltersScope::EMPTY, diag) + } + + /// Aborts if any diagnostic is a warning + pub fn add_error_diags(&self, diags: Diagnostics) { + for diag in diags.into_vec() { + self.add_error_diag(diag) } } pub fn has_warnings_or_errors(&self) -> bool { - !self.diags.is_empty() + !self.diags.read().unwrap().is_empty() } pub fn has_errors(&self) -> bool { @@ -457,63 +389,45 @@ impl CompilationEnv { } pub fn count_diags(&self) -> usize { - self.diags.len() + self.diags.read().unwrap().len() } pub fn count_diags_at_or_above_severity(&self, threshold: Severity) -> usize { - self.diags.count_diags_at_or_above_severity(threshold) + self.diags + .read() + .unwrap() + .count_diags_at_or_above_severity(threshold) } pub fn has_diags_at_or_above_severity(&self, threshold: Severity) -> bool { - self.diags.max_severity_at_or_above_severity(threshold) + self.diags + .read() + .unwrap() + .max_severity_at_or_above_severity(threshold) } - pub fn check_diags_at_or_above_severity( - &mut self, - threshold: Severity, - ) -> Result<(), Diagnostics> { + pub fn check_diags_at_or_above_severity(&self, threshold: Severity) -> Result<(), Diagnostics> { if self.has_diags_at_or_above_severity(threshold) { - Err(std::mem::take(&mut self.diags)) + let diagnostics: &mut Diagnostics = &mut self.diags.write().unwrap(); + Err(std::mem::take(diagnostics)) } else { Ok(()) } } /// Should only be called after compilation is finished - pub fn take_final_diags(&mut self) -> Diagnostics { - std::mem::take(&mut self.diags) + pub fn take_final_diags(&self) -> Diagnostics { + let diagnostics: &mut Diagnostics = &mut self.diags.write().unwrap(); + std::mem::take(diagnostics) } /// Should only be called after compilation is finished - pub fn take_final_warning_diags(&mut self) -> Diagnostics { + pub fn take_final_warning_diags(&self) -> Diagnostics { let final_diags = self.take_final_diags(); debug_assert!(final_diags.max_severity_at_or_under_severity(Severity::Warning)); final_diags } - /// Add a new filter for warnings - pub fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.warning_filter.push(filter) - } - - pub fn pop_warning_filter_scope(&mut self) { - self.warning_filter.pop().unwrap(); - } - - fn is_filtered(&self, diag: &Diagnostic) -> bool { - self.warning_filter - .iter() - .rev() - .any(|filter| filter.is_filtered(diag)) - } - - fn filter_for_dependency(&self) -> bool { - self.warning_filter - .iter() - .rev() - .any(|filter| filter.for_dependency()) - } - pub fn known_filter_names(&self) -> impl IntoIterator + '_ { self.known_filters.keys().copied() } @@ -573,24 +487,19 @@ impl CompilationEnv { &self.flags } - pub fn visitors(&self) -> Rc { - self.visitors.clone() + pub fn visitors(&self) -> &Visitors { + &self.visitors } // Logs an error if the feature isn't supported. Returns `false` if the feature is not // supported, and `true` otherwise. - pub fn check_feature( - &mut self, - package: Option, - feature: FeatureGate, - loc: Loc, - ) -> bool { + pub fn check_feature(&self, package: Option, feature: FeatureGate, loc: Loc) -> bool { check_feature_or_error(self, self.package_config(package).edition, feature, loc) } // Returns an error string if if the feature isn't supported, or None otherwise. pub fn feature_edition_error_msg( - &mut self, + &self, feature: FeatureGate, package: Option, ) -> Option { @@ -619,15 +528,12 @@ impl CompilationEnv { ) } - pub fn set_primitive_type_definers( - &mut self, - m: BTreeMap, - ) { - self.prim_definers = m + pub fn set_primitive_type_definers(&self, m: BTreeMap) { + self.prim_definers.set(m).unwrap() } pub fn primitive_definer(&self, t: N::BuiltinTypeName_) -> Option<&E::ModuleIdent> { - self.prim_definers.get(&t) + self.prim_definers.get().and_then(|m| m.get(&t)) } pub fn save_parser_ast(&self, ast: &P::Program) { @@ -678,22 +584,34 @@ impl CompilationEnv { self.flags.ide_mode() } - pub fn extend_ide_info(&mut self, info: IDEInfo) { + pub fn extend_ide_info(&self, warning_filters: &WarningFiltersScope, info: IDEInfo) { if self.flags().ide_test_mode() { for entry in info.annotations.iter() { let diag = entry.clone().into(); - self.add_diag(diag); + self.add_diag(warning_filters, diag); } } - self.ide_information.extend(info); + self.ide_information.write().unwrap().extend(info); } - pub fn add_ide_annotation(&mut self, loc: Loc, info: IDEAnnotation) { + pub fn add_ide_annotation( + &self, + warning_filters: &WarningFiltersScope, + loc: Loc, + info: IDEAnnotation, + ) { if self.flags().ide_test_mode() { let diag = (loc, info.clone()).into(); - self.add_diag(diag); + self.add_diag(warning_filters, diag); } - self.ide_information.add_ide_annotation(loc, info); + self.ide_information + .write() + .unwrap() + .add_ide_annotation(loc, info); + } + + pub fn ide_information(&self) -> std::sync::RwLockReadGuard<'_, IDEInfo> { + self.ide_information.read().unwrap() } } @@ -974,6 +892,7 @@ fn check() {} fn check_all() { check::(); check::<&Visitors>(); + check::<&CompilationEnv>(); } //************************************************************************************************** @@ -981,7 +900,7 @@ fn check_all() { //************************************************************************************************** #[derive(Clone)] -pub struct SaveHook(Rc>); +pub struct SaveHook(Arc>); #[derive(Clone)] pub(crate) struct SavedInfo { @@ -1009,7 +928,7 @@ pub enum SaveFlag { impl SaveHook { pub fn new(flags: impl IntoIterator) -> Self { let flags = flags.into_iter().collect(); - Self(Rc::new(RefCell::new(SavedInfo { + Self(Arc::new(Mutex::new(SavedInfo { flags, parser: None, expansion: None, @@ -1022,56 +941,56 @@ impl SaveHook { } pub(crate) fn save_parser_ast(&self, ast: &P::Program) { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); if r.parser.is_none() && r.flags.contains(&SaveFlag::Parser) { r.parser = Some(ast.clone()) } } pub(crate) fn save_expansion_ast(&self, ast: &E::Program) { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); if r.expansion.is_none() && r.flags.contains(&SaveFlag::Expansion) { r.expansion = Some(ast.clone()) } } pub(crate) fn save_naming_ast(&self, ast: &N::Program) { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); if r.naming.is_none() && r.flags.contains(&SaveFlag::Naming) { r.naming = Some(ast.clone()) } } pub(crate) fn save_typing_ast(&self, ast: &T::Program) { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); if r.typing.is_none() && r.flags.contains(&SaveFlag::Typing) { r.typing = Some(ast.clone()) } } pub(crate) fn save_typing_info(&self, info: &Arc) { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); if r.typing_info.is_none() && r.flags.contains(&SaveFlag::TypingInfo) { r.typing_info = Some(info.clone()) } } pub(crate) fn save_hlir_ast(&self, ast: &H::Program) { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); if r.hlir.is_none() && r.flags.contains(&SaveFlag::HLIR) { r.hlir = Some(ast.clone()) } } pub(crate) fn save_cfgir_ast(&self, ast: &G::Program) { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); if r.cfgir.is_none() && r.flags.contains(&SaveFlag::CFGIR) { r.cfgir = Some(ast.clone()) } } pub fn take_parser_ast(&self) -> P::Program { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); assert!( r.flags.contains(&SaveFlag::Parser), "Parser AST not saved. Please set the flag when creating the SaveHook" @@ -1080,7 +999,7 @@ impl SaveHook { } pub fn take_expansion_ast(&self) -> E::Program { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); assert!( r.flags.contains(&SaveFlag::Expansion), "Expansion AST not saved. Please set the flag when creating the SaveHook" @@ -1089,7 +1008,7 @@ impl SaveHook { } pub fn take_naming_ast(&self) -> N::Program { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); assert!( r.flags.contains(&SaveFlag::Naming), "Naming AST not saved. Please set the flag when creating the SaveHook" @@ -1098,7 +1017,7 @@ impl SaveHook { } pub fn take_typing_ast(&self) -> T::Program { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); assert!( r.flags.contains(&SaveFlag::Typing), "Typing AST not saved. Please set the flag when creating the SaveHook" @@ -1107,7 +1026,7 @@ impl SaveHook { } pub fn take_typing_info(&self) -> Arc { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); assert!( r.flags.contains(&SaveFlag::TypingInfo), "Typing info not saved. Please set the flag when creating the SaveHook" @@ -1116,7 +1035,7 @@ impl SaveHook { } pub fn take_hlir_ast(&self) -> H::Program { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); assert!( r.flags.contains(&SaveFlag::HLIR), "HLIR AST not saved. Please set the flag when creating the SaveHook" @@ -1125,7 +1044,7 @@ impl SaveHook { } pub fn take_cfgir_ast(&self) -> G::Program { - let mut r = RefCell::borrow_mut(&self.0); + let mut r = self.0.lock().unwrap(); assert!( r.flags.contains(&SaveFlag::CFGIR), "CFGIR AST not saved. Please set the flag when creating the SaveHook" diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/id_leak.rs b/external-crates/move/crates/move-compiler/src/sui_mode/id_leak.rs index 2a67686ce4ba2..03d7d42a388c6 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/id_leak.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/id_leak.rs @@ -20,7 +20,7 @@ use crate::{ expansion::ast::{ModuleIdent, TargetKind}, hlir::ast::{self as H, Exp, Label, ModuleCall, SingleType, Type, Type_, Var}, parser::ast::Ability_, - shared::{program_info::TypingProgramInfo, CompilationEnv, Identifier}, + shared::{program_info::TypingProgramInfo, Identifier}, sui_mode::{OBJECT_NEW, TEST_SCENARIO_MODULE_NAME, TS_NEW_OBJECT}, }; use std::collections::BTreeMap; @@ -94,7 +94,6 @@ impl SimpleAbsIntConstructor for IDLeakVerifier { type AI<'a> = IDLeakVerifierAI<'a>; fn new<'a>( - env: &CompilationEnv, context: &'a CFGContext<'a>, cfg: &ImmForwardCFG, _init_state: &mut as SimpleAbsInt>::State, @@ -102,7 +101,7 @@ impl SimpleAbsIntConstructor for IDLeakVerifier { let module = &context.module; let minfo = context.info.module(module); let package_name = minfo.package; - let config = env.package_config(package_name); + let config = context.env.package_config(package_name); if config.flavor != Flavor::Sui { // Skip if not sui return None; diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/info.rs b/external-crates/move/crates/move-compiler/src/sui_mode/info.rs index 2bfaedeafef51..f4068b4c67048 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/info.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/info.rs @@ -10,6 +10,7 @@ use std::{ }; use crate::{ + diagnostics::warning_filters::WarningFilters, expansion::ast::{Fields, ModuleIdent}, naming::ast as N, parser::ast::{Ability_, DatatypeName, Field}, @@ -271,7 +272,7 @@ fn add_private_transfers( transferred: &'a mut BTreeMap<(ModuleIdent, DatatypeName), TransferKind>, } impl<'a> TypingVisitorContext for TransferVisitor<'a> { - fn add_warning_filter_scope(&mut self, _: crate::diagnostics::WarningFilters) { + fn push_warning_filter_scope(&mut self, _: WarningFilters) { unreachable!("no warning filters in function bodies") } @@ -303,6 +304,6 @@ fn add_private_transfers( let mut visitor = TransferVisitor { transferred }; match &fdef.body.value { T::FunctionBody_::Native | &T::FunctionBody_::Macro => (), - T::FunctionBody_::Defined(seq) => visitor.visit_seq(seq), + T::FunctionBody_::Defined(seq) => visitor.visit_seq(fdef.body.loc, seq), } } diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/coin_field.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/coin_field.rs index 2af05f7f6e247..1553e71292c2c 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/coin_field.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/coin_field.rs @@ -7,12 +7,11 @@ use crate::{ diag, diagnostics::codes::{custom, DiagnosticInfo, Severity}, + expansion::ast::ModuleIdent, naming::ast as N, - shared::CompilationEnv, - typing::{ast as T, visitor::TypingVisitor}, + parser::ast::DatatypeName, + typing::{ast as T, visitor::simple_visitor}, }; -use move_ir_types::location::Loc; -use move_symbol_pool::Symbol; use super::{ LinterDiagnosticCategory, LinterDiagnosticCode, COIN_MOD_NAME, COIN_STRUCT_NAME, @@ -27,40 +26,35 @@ const COIN_FIELD_DIAG: DiagnosticInfo = custom( "sub-optimal 'sui::coin::Coin' field type", ); -pub struct CoinFieldVisitor; - -impl TypingVisitor for CoinFieldVisitor { - fn visit(&self, env: &mut CompilationEnv, program: &T::Program) { - for (_, _, mdef) in program.modules.iter() { - if mdef.attributes.is_test_or_test_only() { - continue; - } - env.add_warning_filter_scope(mdef.warning_filter.clone()); - mdef.structs - .iter() - .filter(|(_, _, sdef)| !sdef.attributes.is_test_or_test_only()) - .for_each(|(sloc, sname, sdef)| struct_def(env, *sname, sdef, sloc)); - env.pop_warning_filter_scope(); +simple_visitor!( + CoinFieldVisitor, + fn visit_module_custom(&mut self, _ident: ModuleIdent, mdef: &T::ModuleDefinition) -> bool { + // skip if test only + mdef.attributes.is_test_or_test_only() + }, + // TODO enums + fn visit_struct_custom( + &mut self, + _module: ModuleIdent, + _sname: DatatypeName, + sdef: &N::StructDefinition, + ) -> bool { + if sdef.attributes.is_test_or_test_only() { + return false; } - } -} -fn struct_def(env: &mut CompilationEnv, sname: Symbol, sdef: &N::StructDefinition, sloc: Loc) { - env.add_warning_filter_scope(sdef.warning_filter.clone()); - - if let N::StructFields::Defined(_, sfields) = &sdef.fields { - for (floc, fname, (_, ftype)) in sfields.iter() { - if is_field_coin_type(ftype) { - let msg = format!("The field '{fname}' of '{sname}' has type 'sui::coin::Coin'"); - let uid_msg = "Storing 'sui::balance::Balance' in this field will typically be more space-efficient"; - let d = diag!(COIN_FIELD_DIAG, (sloc, msg), (floc, uid_msg)); - env.add_diag(d); + if let N::StructFields::Defined(_, sfields) = &sdef.fields { + for (_floc, _fname, (_, ftype)) in sfields { + if is_field_coin_type(ftype) { + let msg = "Sub-optimal 'sui::coin::Coin' field type. Using \ + 'sui::balance::Balance' instead will be more space efficient"; + self.add_diag(diag!(COIN_FIELD_DIAG, (ftype.loc, msg))); + } } } + false } - - env.pop_warning_filter_scope(); -} +); fn is_field_coin_type(sp!(_, t): &N::Type) -> bool { use N::Type_ as T; diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/collection_equality.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/collection_equality.rs index ab61b891ca6b3..083cdb8b5831a 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/collection_equality.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/collection_equality.rs @@ -7,17 +7,11 @@ use crate::{ diag, - diagnostics::{ - codes::{custom, DiagnosticInfo, Severity}, - WarningFilters, - }, + diagnostics::codes::{custom, DiagnosticInfo, Severity}, naming::ast as N, parser::ast as P, - shared::{CompilationEnv, Identifier}, - typing::{ - ast as T, - visitor::{TypingVisitorConstructor, TypingVisitorContext}, - }, + shared::Identifier, + typing::{ast as T, visitor::simple_visitor}, }; use super::{ @@ -55,20 +49,8 @@ const COLLECTION_TYPES: &[(&str, &str, &str)] = &[ (SUI_PKG_NAME, VEC_SET_MOD_NAME, VEC_SET_STRUCT_NAME), ]; -pub struct CollectionEqualityVisitor; -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} - -impl TypingVisitorConstructor for CollectionEqualityVisitor { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl TypingVisitorContext for Context<'_> { +simple_visitor!( + CollectionEqualityVisitor, fn visit_exp_custom(&mut self, exp: &T::Exp) -> bool { use T::UnannotatedExp_ as E; if let E::BinopExp(_, op, t, _) = &exp.exp.value { @@ -98,18 +80,10 @@ impl TypingVisitorContext for Context<'_> { format!("Equality for collections of type '{caddr}::{cmodule}::{cname}' IS NOT a structural check based on content"); let mut d = diag!(COLLECTIONS_EQUALITY_DIAG, (op.loc, msg),); d.add_note(note_msg); - self.env.add_diag(d); + self.add_diag(d); return true; } } false } - - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } -} +); diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/custom_state_change.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/custom_state_change.rs index 085135f676b0d..f0e542d5a50af 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/custom_state_change.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/custom_state_change.rs @@ -29,7 +29,7 @@ use crate::{ BaseType_, Label, ModuleCall, SingleType, SingleType_, Type, TypeName_, Type_, Var, }, parser::ast::Ability_, - shared::{CompilationEnv, Identifier}, + shared::Identifier, }; use std::collections::BTreeMap; @@ -87,7 +87,6 @@ impl SimpleAbsIntConstructor for CustomStateChangeVerifier { type AI<'a> = CustomStateChangeVerifierAI; fn new<'a>( - _env: &CompilationEnv, context: &'a CFGContext<'a>, cfg: &ImmForwardCFG, init_state: &mut State, diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/freeze_wrapped.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/freeze_wrapped.rs index 717355f78c823..705a09750291d 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/freeze_wrapped.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/freeze_wrapped.rs @@ -11,7 +11,8 @@ use crate::{ diag, diagnostics::{ codes::{custom, DiagnosticInfo, Severity}, - WarningFilters, + warning_filters::{WarningFilters, WarningFiltersScope}, + Diagnostic, Diagnostics, }, expansion::ast as E, naming::ast as N, @@ -74,7 +75,8 @@ type WrappingFields = pub struct FreezeWrappedVisitor; pub struct Context<'a> { - env: &'a mut CompilationEnv, + env: &'a CompilationEnv, + warning_filters_scope: WarningFiltersScope, program_info: Arc, /// Memoizes information about struct fields wrapping other objects as they are discovered wrapping_fields: WrappingFields, @@ -83,15 +85,28 @@ pub struct Context<'a> { impl TypingVisitorConstructor for FreezeWrappedVisitor { type Context<'a> = Context<'a>; - fn context<'a>(env: &'a mut CompilationEnv, program: &T::Program) -> Self::Context<'a> { + fn context<'a>(env: &'a CompilationEnv, program: &T::Program) -> Self::Context<'a> { + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); Context { env, + warning_filters_scope, program_info: program.info.clone(), wrapping_fields: WrappingFields::new(), } } } +impl Context<'_> { + fn add_diag(&self, diag: Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + #[allow(unused)] + fn add_diags(&self, diags: Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } +} + impl<'a> TypingVisitorContext for Context<'a> { fn visit_module_custom(&mut self, _ident: E::ModuleIdent, mdef: &T::ModuleDefinition) -> bool { // skips if true @@ -128,7 +143,7 @@ impl<'a> TypingVisitorContext for Context<'a> { }; if let Some(wrapping_field_info) = self.find_wrapping_field_loc(mident, sname) { add_diag( - self.env, + self, fun.arguments.exp.loc, sname.value(), wrapping_field_info, @@ -140,12 +155,12 @@ impl<'a> TypingVisitorContext for Context<'a> { false } - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) + fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) } fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() + self.warning_filters_scope.pop() } } @@ -233,7 +248,7 @@ impl<'a> Context<'a> { } fn add_diag( - env: &mut CompilationEnv, + context: &mut Context, freeze_arg_loc: Loc, frozen_struct_name: Symbol, info: WrappingFieldInfo, @@ -261,5 +276,5 @@ fn add_diag( if !direct { d.add_secondary_label((wrapped_tloc, "Indirectly wrapped object is of this type")); } - env.add_diag(d); + context.add_diag(d); } diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/freezing_capability.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/freezing_capability.rs index 3f77fa2417a19..34e9deb90d126 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/freezing_capability.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/freezing_capability.rs @@ -7,17 +7,11 @@ use super::{LinterDiagnosticCategory, LinterDiagnosticCode, LINT_WARNING_PREFIX}; use crate::{ diag, - diagnostics::{ - codes::{custom, DiagnosticInfo, Severity}, - WarningFilters, - }, + diagnostics::codes::{custom, DiagnosticInfo, Severity}, naming::ast::TypeName_, - shared::{CompilationEnv, Identifier}, + shared::Identifier, sui_mode::linters::{FREEZE_FUN, PUBLIC_FREEZE_FUN, SUI_PKG_NAME, TRANSFER_MOD_NAME}, - typing::{ - ast as T, core, - visitor::{TypingVisitorConstructor, TypingVisitorContext}, - }, + typing::{ast as T, core, visitor::simple_visitor}, }; use move_ir_types::location::*; use once_cell::sync::Lazy; @@ -36,22 +30,10 @@ const FREEZE_FUNCTIONS: &[(&str, &str, &str)] = &[ (SUI_PKG_NAME, TRANSFER_MOD_NAME, FREEZE_FUN), ]; -pub struct WarnFreezeCapability; - -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} - static REGEX: Lazy = Lazy::new(|| Regex::new(r".*Cap(?:[A-Z0-9_]+|ability|$).*").unwrap()); -impl TypingVisitorConstructor for WarnFreezeCapability { - type Context<'a> = Context<'a>; - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl<'a> TypingVisitorContext for Context<'a> { +simple_visitor!( + WarnFreezeCapability, fn visit_module_custom( &mut self, _ident: crate::expansion::ast::ModuleIdent, @@ -59,8 +41,7 @@ impl<'a> TypingVisitorContext for Context<'a> { ) -> bool { // skips if true mdef.attributes.is_test_or_test_only() - } - + }, fn visit_function_custom( &mut self, _module: crate::expansion::ast::ModuleIdent, @@ -69,8 +50,7 @@ impl<'a> TypingVisitorContext for Context<'a> { ) -> bool { // skips if true fdef.attributes.is_test_or_test_only() - } - + }, fn visit_exp_custom(&mut self, exp: &T::Exp) -> bool { if let T::UnannotatedExp_::ModuleCall(fun) = &exp.exp.value { if is_freeze_function(fun) { @@ -79,15 +59,7 @@ impl<'a> TypingVisitorContext for Context<'a> { } false } - - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } -} +); fn is_freeze_function(fun: &T::ModuleCall) -> bool { FREEZE_FUNCTIONS.iter().any(|(addr, module, fname)| { @@ -110,7 +82,7 @@ fn check_type_arguments(context: &mut Context, fun: &T::ModuleCall, loc: Loc) { "Freezing a capability might lock out critical operations \ or otherwise open access to operations that otherwise should be restricted", ); - context.env.add_diag(diag); + context.add_diag(diag); }; } } diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/missing_key.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/missing_key.rs index fae0bb94446db..8289ee38f7cc2 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/missing_key.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/missing_key.rs @@ -8,17 +8,10 @@ use crate::expansion::ast::ModuleIdent; use crate::parser::ast::DatatypeName; use crate::{ diag, - diagnostics::{ - codes::{custom, DiagnosticInfo, Severity}, - WarningFilters, - }, + diagnostics::codes::{custom, DiagnosticInfo, Severity}, naming::ast::{StructDefinition, StructFields}, parser::ast::Ability_, - shared::CompilationEnv, - typing::{ - ast as T, - visitor::{TypingVisitorConstructor, TypingVisitorContext}, - }, + typing::visitor::simple_visitor, }; const MISSING_KEY_ABILITY_DIAG: DiagnosticInfo = custom( @@ -29,28 +22,8 @@ const MISSING_KEY_ABILITY_DIAG: DiagnosticInfo = custom( "struct with id but missing key ability", ); -pub struct MissingKeyVisitor; - -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} -impl TypingVisitorConstructor for MissingKeyVisitor { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl TypingVisitorContext for Context<'_> { - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } - +simple_visitor!( + MissingKeyVisitor, fn visit_struct_custom( &mut self, _module: ModuleIdent, @@ -61,11 +34,11 @@ impl TypingVisitorContext for Context<'_> { let uid_msg = "Struct's first field has an 'id' field of type 'sui::object::UID' but is missing the 'key' ability."; let diagnostic = diag!(MISSING_KEY_ABILITY_DIAG, (sdef.loc, uid_msg)); - self.env.add_diag(diagnostic); + self.add_diag(diagnostic); } false } -} +); fn first_field_has_id_field_of_type_uid(sdef: &StructDefinition) -> bool { match &sdef.fields { diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/mod.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/mod.rs index d8e21d6bb3167..d308381283f1e 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/mod.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/mod.rs @@ -4,7 +4,7 @@ use crate::{ cfgir::visitor::AbstractInterpreterVisitor, command_line::compiler::Visitor, - diagnostics::codes::WarningFilter, + diagnostics::warning_filters::WarningFilter, expansion::ast as E, hlir::ast::{BaseType_, SingleType, SingleType_}, linters::{LintLevel, LinterDiagnosticCategory, ALLOW_ATTR_CATEGORY, LINT_WARNING_PREFIX}, diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/public_mut_tx_context.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/public_mut_tx_context.rs index b150e5deb8d0f..80c11583af028 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/public_mut_tx_context.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/public_mut_tx_context.rs @@ -4,23 +4,16 @@ //! Enforces that public functions use `&mut TxContext` instead of `&TxContext` to ensure upgradability. //! Detects and reports instances where a non-mutable reference to `TxContext` is used in public function signatures. //! Promotes best practices for future-proofing smart contract code by allowing mutation of the transaction context. -use super::{LinterDiagnosticCategory, LinterDiagnosticCode, LINT_WARNING_PREFIX}; +use super::{LinterDiagnosticCategory, LinterDiagnosticCode, LINT_WARNING_PREFIX}; use crate::{ diag, - diagnostics::{ - codes::{custom, DiagnosticInfo, Severity}, - WarningFilters, - }, + diagnostics::codes::{custom, DiagnosticInfo, Severity}, expansion::ast::{ModuleIdent, Visibility}, naming::ast::Type_, parser::ast::FunctionName, - shared::CompilationEnv, sui_mode::{SUI_ADDR_NAME, TX_CONTEXT_MODULE_NAME, TX_CONTEXT_TYPE_NAME}, - typing::{ - ast as T, - visitor::{TypingVisitorConstructor, TypingVisitorContext}, - }, + typing::{ast as T, visitor::simple_visitor}, }; use move_ir_types::location::Loc; @@ -32,33 +25,12 @@ const REQUIRE_MUTABLE_TX_CONTEXT_DIAG: DiagnosticInfo = custom( "prefer '&mut TxContext' over '&TxContext'", ); -pub struct PreferMutableTxContext; - -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} - -impl TypingVisitorConstructor for PreferMutableTxContext { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl TypingVisitorContext for Context<'_> { - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } - +simple_visitor!( + PreferMutableTxContext, fn visit_module_custom(&mut self, ident: ModuleIdent, _mdef: &T::ModuleDefinition) -> bool { // skip if in 'sui::tx_context' ident.value.is(SUI_ADDR_NAME, TX_CONTEXT_MODULE_NAME) - } - + }, fn visit_function_custom( &mut self, _module: ModuleIdent, @@ -74,15 +46,15 @@ impl TypingVisitorContext for Context<'_> { param_ty_, Type_::Ref(false, t) if t.value.is(SUI_ADDR_NAME, TX_CONTEXT_MODULE_NAME, TX_CONTEXT_TYPE_NAME), ) { - report_non_mutable_tx_context(self.env, *loc); + report_non_mutable_tx_context(self, *loc); } } false } -} +); -fn report_non_mutable_tx_context(env: &mut CompilationEnv, loc: Loc) { +fn report_non_mutable_tx_context(context: &mut Context, loc: Loc) { let msg = format!( "'public' functions should prefer '&mut {0}' over '&{0}' for better upgradability.", TX_CONTEXT_TYPE_NAME @@ -93,5 +65,5 @@ fn report_non_mutable_tx_context(env: &mut CompilationEnv, loc: Loc) { of '&TxContext'. As such, it is recommended to consider using '&mut TxContext' to \ future-proof the function.", ); - env.add_diag(diag); + context.add_diag(diag); } diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/public_random.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/public_random.rs index 59cb91685f371..5f7df11f22137 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/public_random.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/public_random.rs @@ -3,17 +3,15 @@ //! This analysis flags uses of random::Random and random::RandomGenerator in public functions. -use crate::diagnostics::WarningFilters; use crate::expansion::ast::ModuleIdent; use crate::parser::ast::FunctionName; use crate::sui_mode::SUI_ADDR_NAME; -use crate::typing::visitor::{TypingVisitorConstructor, TypingVisitorContext}; +use crate::typing::visitor::simple_visitor; use crate::{ diag, diagnostics::codes::{custom, DiagnosticInfo, Severity}, expansion::ast::Visibility, naming::ast as N, - shared::CompilationEnv, typing::ast as T, }; @@ -30,33 +28,12 @@ const PUBLIC_RANDOM_DIAG: DiagnosticInfo = custom( "Risky use of 'sui::random'", ); -pub struct PublicRandomVisitor; -pub struct Context<'a> { - env: &'a mut CompilationEnv, -} - -impl TypingVisitorConstructor for PublicRandomVisitor { - type Context<'a> = Context<'a>; - - fn context<'a>(env: &'a mut CompilationEnv, _program: &T::Program) -> Self::Context<'a> { - Context { env } - } -} - -impl TypingVisitorContext for Context<'_> { - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) - } - - fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() - } - +simple_visitor!( + PublicRandomVisitor, fn visit_module_custom(&mut self, ident: ModuleIdent, mdef: &T::ModuleDefinition) -> bool { // skips if true mdef.attributes.is_test_or_test_only() || ident.value.address.is(SUI_ADDR_NAME) - } - + }, fn visit_function_custom( &mut self, _module: ModuleIdent, @@ -78,12 +55,12 @@ impl TypingVisitorContext for Context<'_> { SUI_PKG_NAME, RANDOM_MOD_NAME, struct_name); d.add_note(note); d.add_note("Non-public functions are preferred"); - self.env.add_diag(d); + self.add_diag(d); } } true } -} +); fn is_random_or_random_generator(sp!(_, t): &N::Type) -> Option<&str> { use N::Type_ as T; diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/self_transfer.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/self_transfer.rs index 84f8ca01f41e7..45ab199583213 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/self_transfer.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/self_transfer.rs @@ -23,7 +23,6 @@ use crate::{ }, hlir::ast::{Label, ModuleCall, Type, Type_, Var}, parser::ast::Ability_, - shared::CompilationEnv, }; use std::collections::BTreeMap; @@ -80,7 +79,6 @@ impl SimpleAbsIntConstructor for SelfTransferVerifier { type AI<'a> = SelfTransferVerifierAI; fn new<'a>( - _env: &CompilationEnv, context: &'a CFGContext<'a>, cfg: &ImmForwardCFG, _init_state: &mut as SimpleAbsInt>::State, diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/linters/share_owned.rs b/external-crates/move/crates/move-compiler/src/sui_mode/linters/share_owned.rs index a3e44f87a3a2d..86003cd647ca6 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/linters/share_owned.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/linters/share_owned.rs @@ -32,7 +32,7 @@ use crate::{ parser::ast::{Ability_, DatatypeName}, shared::{ program_info::{DatatypeKind, TypingProgramInfo}, - CompilationEnv, Identifier, + Identifier, }, sui_mode::{ info::{SuiInfo, TransferKind}, @@ -96,7 +96,6 @@ impl SimpleAbsIntConstructor for ShareOwnedVerifier { type AI<'a> = ShareOwnedVerifierAI<'a>; fn new<'a>( - _env: &CompilationEnv, context: &'a CFGContext<'a>, cfg: &ImmForwardCFG, _init_state: &mut as SimpleAbsInt>::State, diff --git a/external-crates/move/crates/move-compiler/src/sui_mode/typing.rs b/external-crates/move/crates/move-compiler/src/sui_mode/typing.rs index b895758f2602f..a15563e19050d 100644 --- a/external-crates/move/crates/move-compiler/src/sui_mode/typing.rs +++ b/external-crates/move/crates/move-compiler/src/sui_mode/typing.rs @@ -8,7 +8,10 @@ use move_symbol_pool::Symbol; use crate::{ diag, - diagnostics::{Diagnostic, WarningFilters}, + diagnostics::{ + warning_filters::{WarningFilters, WarningFiltersScope}, + Diagnostic, Diagnostics, + }, editions::Flavor, expansion::ast::{AbilitySet, Fields, ModuleIdent, Mutability, TargetKind, Visibility}, naming::ast::{ @@ -32,7 +35,7 @@ pub struct SuiTypeChecks; impl TypingVisitorConstructor for SuiTypeChecks { type Context<'a> = Context<'a>; - fn context<'a>(env: &'a mut CompilationEnv, program: &T::Program) -> Self::Context<'a> { + fn context<'a>(env: &'a CompilationEnv, program: &T::Program) -> Self::Context<'a> { Context::new(env, program.info.clone()) } } @@ -43,7 +46,8 @@ impl TypingVisitorConstructor for SuiTypeChecks { #[allow(unused)] pub struct Context<'a> { - env: &'a mut CompilationEnv, + env: &'a CompilationEnv, + warning_filters_scope: WarningFiltersScope, info: Arc, sui_transfer_ident: Option, current_module: Option, @@ -53,14 +57,16 @@ pub struct Context<'a> { } impl<'a> Context<'a> { - fn new(env: &'a mut CompilationEnv, info: Arc) -> Self { + fn new(env: &'a CompilationEnv, info: Arc) -> Self { let sui_module_ident = info .modules .key_cloned_iter() .find(|(m, _)| m.value.is(SUI_ADDR_NAME, TRANSFER_MODULE_NAME)) .map(|(m, _)| m); + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); Context { env, + warning_filters_scope, info, sui_transfer_ident: sui_module_ident, current_module: None, @@ -70,6 +76,15 @@ impl<'a> Context<'a> { } } + fn add_diag(&self, diag: Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + #[allow(unused)] + fn add_diags(&self, diags: Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } + fn set_module(&mut self, current_module: ModuleIdent) { self.current_module = Some(current_module); self.otw_name = Some(Symbol::from( @@ -98,12 +113,12 @@ const OTW_NOTE: &str = "One-time witness types are structs with the following re //************************************************************************************************** impl<'a> TypingVisitorContext for Context<'a> { - fn add_warning_filter_scope(&mut self, filter: WarningFilters) { - self.env.add_warning_filter_scope(filter) + fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) } fn pop_warning_filter_scope(&mut self) { - self.env.pop_warning_filter_scope() + self.warning_filters_scope.pop() } fn visit_module_custom(&mut self, ident: ModuleIdent, mdef: &T::ModuleDefinition) -> bool { @@ -205,9 +220,7 @@ fn struct_def(context: &mut Context, name: DatatypeName, sdef: &N::StructDefinit }; if let Some(loc) = invalid_first_field { // no fields or an invalid 'id' field - context - .env - .add_diag(invalid_object_id_field_diag(key_loc, loc, name)); + context.add_diag(invalid_object_id_field_diag(key_loc, loc, name)); return; }; @@ -223,7 +236,7 @@ fn struct_def(context: &mut Context, name: DatatypeName, sdef: &N::StructDefinit ); let mut diag = invalid_object_id_field_diag(key_loc, *id_field_loc, name); diag.add_secondary_label((id_field_type.loc, actual)); - context.env.add_diag(diag); + context.add_diag(diag); } } @@ -261,7 +274,7 @@ fn enum_def(context: &mut Context, name: DatatypeName, edef: &N::EnumDefinition) let msg = format!("Invalid object '{name}'"); let key_msg = format!("Enums cannot have the '{}' ability.", Ability_::Key); let diag = diag!(OBJECT_DECL_DIAG, (name.loc(), msg), (key_loc, key_msg)); - context.env.add_diag(diag); + context.add_diag(diag); }; } @@ -293,7 +306,7 @@ fn function(context: &mut Context, name: FunctionName, fdef: &T::Function) { entry_signature(context, *entry_loc, name, signature); } if let sp!(_, T::FunctionBody_::Defined(seq)) = body { - context.visit_seq(seq) + context.visit_seq(body.loc, seq) } context.in_test = prev_in_test; } @@ -309,17 +322,16 @@ fn init_visibility( entry: Option, ) { match visibility { - Visibility::Public(loc) | Visibility::Friend(loc) | Visibility::Package(loc) => { - context.env.add_diag(diag!( + Visibility::Public(loc) | Visibility::Friend(loc) | Visibility::Package(loc) => context + .add_diag(diag!( INIT_FUN_DIAG, (name.loc(), "Invalid 'init' function declaration"), (loc, "'init' functions must be internal to their module"), - )) - } + )), Visibility::Internal => (), } if let Some(entry) = entry { - context.env.add_diag(diag!( + context.add_diag(diag!( INIT_FUN_DIAG, (name.loc(), "Invalid 'init' function declaration"), (entry, "'init' functions cannot be 'entry' functions"), @@ -335,7 +347,7 @@ fn init_signature(context: &mut Context, name: FunctionName, signature: &Functio } = signature; if !type_parameters.is_empty() { let tp_loc = type_parameters[0].user_specified_name.loc; - context.env.add_diag(diag!( + context.add_diag(diag!( INIT_FUN_DIAG, (name.loc(), "Invalid 'init' function declaration"), (tp_loc, "'init' functions cannot have type parameters"), @@ -346,7 +358,7 @@ fn init_signature(context: &mut Context, name: FunctionName, signature: &Functio "'init' functions must have a return type of {}", error_format_(&Type_::Unit, &Subst::empty()) ); - context.env.add_diag(diag!( + context.add_diag(diag!( INIT_FUN_DIAG, (name.loc(), "Invalid 'init' function declaration"), (return_type.loc, msg), @@ -368,7 +380,7 @@ fn init_signature(context: &mut Context, name: FunctionName, signature: &Functio m = TX_CONTEXT_MODULE_NAME, t = TX_CONTEXT_TYPE_NAME, ); - context.env.add_diag(diag!( + context.add_diag(diag!( INIT_FUN_DIAG, (name.loc(), "Invalid 'init' function declaration"), (last_loc, msg), @@ -397,7 +409,7 @@ fn init_signature(context: &mut Context, name: FunctionName, signature: &Functio (otw_loc, otw_msg), ); diag.add_note(OTW_NOTE); - context.env.add_diag(diag) + context.add_diag(diag) } else if parameters.len() > 1 { // if there is more than one parameter, the first must be the OTW let (_, first_var, first_ty) = parameters.first().unwrap(); @@ -421,7 +433,7 @@ fn init_signature(context: &mut Context, name: FunctionName, signature: &Functio (first_ty.loc, msg) ); diag.add_note(OTW_NOTE); - context.env.add_diag(diag) + context.add_diag(diag) } else if let Some(sdef) = info .module(context.current_module()) .structs @@ -439,7 +451,7 @@ fn init_signature(context: &mut Context, name: FunctionName, signature: &Functio if parameters.len() > 2 { // no init function can take more than 2 parameters (the OTW and the TxContext) let (_, third_var, _) = ¶meters[2]; - context.env.add_diag(diag!( + context.add_diag(diag!( INIT_FUN_DIAG, (name.loc(), "Invalid 'init' function declaration"), ( @@ -474,7 +486,7 @@ fn check_otw_type( let mut valid = true; if let Some(tp) = sdef.type_parameters.first() { let msg = "One-time witness types cannot have type parameters"; - context.env.add_diag(otw_diag(diag!( + context.add_diag(otw_diag(diag!( OTW_DECL_DIAG, (name.loc(), "Invalid one-time witness declaration"), (tp.param.user_specified_name.loc, msg), @@ -496,7 +508,7 @@ fn check_otw_type( (loc, format!("Found more than one field. {msg_base}")) } }; - context.env.add_diag(otw_diag(diag!( + context.add_diag(otw_diag(diag!( OTW_DECL_DIAG, (name.loc(), "Invalid one-time witness declaration"), (invalid_loc, invalid_msg), @@ -527,7 +539,7 @@ fn check_otw_type( "One-time witness types can only have the have the '{}' ability", Ability_::Drop ); - context.env.add_diag(otw_diag(diag!( + context.add_diag(otw_diag(diag!( OTW_DECL_DIAG, (name.loc(), "Invalid one-time witness declaration"), (loc, msg), @@ -691,7 +703,7 @@ fn entry_param_ty( .to_owned() }; let emsg = format!("'{name}' was declared 'entry' here"); - context.env.add_diag(diag!( + context.add_diag(diag!( ENTRY_FUN_SIGNATURE_DIAG, (param.loc, pmsg), (param_ty.loc, tmsg), @@ -843,7 +855,7 @@ fn entry_return( Type_::Ref(_, _) => { let fmsg = format!("Invalid return type for entry function '{}'", name); let tmsg = "Expected a non-reference type"; - context.env.add_diag(diag!( + context.add_diag(diag!( ENTRY_FUN_SIGNATURE_DIAG, (entry_loc, fmsg), (*tloc, tmsg) @@ -917,7 +929,7 @@ fn invalid_entry_return_ty<'a>( declared_abilities, ty_args, ); - context.env.add_diag(diag) + context.add_diag(diag) } //************************************************************************************************** @@ -941,7 +953,7 @@ fn exp(context: &mut Context, e: &T::Exp) { consider extracting the logic into a new function and \ calling that instead.", ); - context.env.add_diag(diag) + context.add_diag(diag) } if module.value.is(SUI_ADDR_NAME, EVENT_MODULE_NAME) && name.value() == EVENT_FUNCTION_NAME @@ -965,7 +977,7 @@ fn exp(context: &mut Context, e: &T::Exp) { cannot be created manually, but are passed as an argument 'init'"; let mut diag = diag!(OTW_USAGE_DIAG, (e.exp.loc, msg)); diag.add_note(OTW_NOTE); - context.env.add_diag(diag) + context.add_diag(diag) } } _ => (), @@ -1005,7 +1017,7 @@ fn check_event_emit(context: &mut Context, loc: Loc, mcall: &ModuleCall) { "The type {} is not declared in the current module", error_format(first_ty, &Subst::empty()), ); - context.env.add_diag(diag!( + context.add_diag(diag!( EVENT_EMIT_CALL_DIAG, (loc, msg), (first_ty.loc, ty_msg) @@ -1083,6 +1095,6 @@ fn check_private_transfer(context: &mut Context, loc: Loc, mcall: &ModuleCall) { ); diag.add_secondary_label((store_loc, store_msg)) } - context.env.add_diag(diag) + context.add_diag(diag) } } diff --git a/external-crates/move/crates/move-compiler/src/to_bytecode/context.rs b/external-crates/move/crates/move-compiler/src/to_bytecode/context.rs index 627c30fa3049c..cd99815152253 100644 --- a/external-crates/move/crates/move-compiler/src/to_bytecode/context.rs +++ b/external-crates/move/crates/move-compiler/src/to_bytecode/context.rs @@ -27,7 +27,7 @@ pub type DatatypeDeclarations = /// Compilation context for a single compilation unit (module). /// Contains all of the dependencies actually used in the module pub struct Context<'a> { - pub env: &'a mut CompilationEnv, + pub env: &'a CompilationEnv, current_package: Option, current_module: Option<&'a ModuleIdent>, seen_datatypes: BTreeSet<(ModuleIdent, DatatypeName)>, @@ -36,7 +36,7 @@ pub struct Context<'a> { impl<'a> Context<'a> { pub fn new( - env: &'a mut CompilationEnv, + env: &'a CompilationEnv, current_package: Option, current_module: Option<&'a ModuleIdent>, ) -> Self { diff --git a/external-crates/move/crates/move-compiler/src/to_bytecode/translate.rs b/external-crates/move/crates/move-compiler/src/to_bytecode/translate.rs index bb1ba8ab8727b..d024c40bf6b03 100644 --- a/external-crates/move/crates/move-compiler/src/to_bytecode/translate.rs +++ b/external-crates/move/crates/move-compiler/src/to_bytecode/translate.rs @@ -38,7 +38,7 @@ type CollectedInfos = UniqueMap; type CollectedInfo = (Vec<(Mutability, Var, H::SingleType)>, Attributes); fn extract_decls( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, pre_compiled_lib: Option>, prog: &G::Program, ) -> ( @@ -127,7 +127,7 @@ fn extract_decls( //************************************************************************************************** pub fn program( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, pre_compiled_lib: Option>, prog: G::Program, ) -> Vec { @@ -153,7 +153,7 @@ pub fn program( } fn module( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, ident: ModuleIdent, mdef: G::ModuleDefinition, dependency_orderings: &HashMap, @@ -227,7 +227,7 @@ fn module( match move_ir_to_bytecode::compiler::compile_module(ir_module, deps) { Ok(res) => res, Err(e) => { - compilation_env.add_diag(diag!( + compilation_env.add_error_diag(diag!( Bug::BytecodeGeneration, (ident_loc, format!("IR ERROR: {}", e)) )); diff --git a/external-crates/move/crates/move-compiler/src/typing/ast.rs b/external-crates/move/crates/move-compiler/src/typing/ast.rs index d847ba757f9ca..5bc529e4b73eb 100644 --- a/external-crates/move/crates/move-compiler/src/typing/ast.rs +++ b/external-crates/move/crates/move-compiler/src/typing/ast.rs @@ -3,23 +3,19 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - debug_display, - diagnostics::WarningFilters, + diagnostics::warning_filters::WarningFilters, expansion::ast::{ Address, Attributes, Fields, Friend, ModuleIdent, Mutability, TargetKind, Value, Visibility, }, - ice, naming::ast::{ BlockLabel, EnumDefinition, FunctionSignature, Neighbor, StructDefinition, SyntaxMethods, - Type, TypeName_, Type_, UseFuns, Var, + Type, Type_, UseFuns, Var, }, parser::ast::{ BinOp, ConstantName, DatatypeName, Field, FunctionName, UnaryOp, VariantName, ENTRY_MODIFIER, MACRO_MODIFIER, NATIVE_MODIFIER, }, - shared::{ - ast_debug::*, program_info::TypingProgramInfo, unique_map::UniqueMap, CompilationEnv, Name, - }, + shared::{ast_debug::*, program_info::TypingProgramInfo, unique_map::UniqueMap, Name}, }; use move_ir_types::location::*; use move_symbol_pool::Symbol; @@ -191,7 +187,7 @@ pub enum UnannotatedExp_ { Builtin(Box, Box), Vector(Loc, usize, Box, Box), - IfElse(Box, Box, Box), + IfElse(Box, Box, Option>), Match(Box, Spanned>), VariantMatch( Box, @@ -361,20 +357,6 @@ pub fn single_item(e: Exp) -> ExpListItem { ExpListItem::Single(e, ty) } -pub fn splat_item(env: &mut CompilationEnv, splat_loc: Loc, e: Exp) -> ExpListItem { - let ss = match &e.ty { - sp!(_, Type_::Unit) => vec![], - sp!(_, Type_::Apply(_, sp!(_, TypeName_::Multiple(_)), ss)) => ss.clone(), - _ => { - let mut diag = ice!((splat_loc, "ICE called `splat_item` on a non-list type")); - diag.add_note(format!("Expression: {}", debug_display!(e))); - env.add_diag(diag); - vec![] - } - }; - ExpListItem::Splat(splat_loc, e, ss) -} - pub fn pat(ty: Type, pat: UnannotatedPat) -> MatchPattern { MatchPattern { ty, pat } } @@ -683,13 +665,15 @@ impl AstDebug for UnannotatedExp_ { }); w.write("}"); } - E::IfElse(b, t, f) => { + E::IfElse(b, t, f_opt) => { w.write("if ("); b.ast_debug(w); w.write(") "); t.ast_debug(w); - w.write(" else "); - f.ast_debug(w); + if let Some(f) = f_opt { + w.write(" else "); + f.ast_debug(w); + } } E::Match(esubject, arms) => { w.write("match ("); diff --git a/external-crates/move/crates/move-compiler/src/typing/core.rs b/external-crates/move/crates/move-compiler/src/typing/core.rs index 8c99ff9e66074..67f52ac8c3b67 100644 --- a/external-crates/move/crates/move-compiler/src/typing/core.rs +++ b/external-crates/move/crates/move-compiler/src/typing/core.rs @@ -6,7 +6,8 @@ use crate::{ debug_display, diag, diagnostics::{ codes::{NameResolution, TypeSafety}, - Diagnostic, + warning_filters::{WarningFilters, WarningFiltersScope}, + Diagnostic, Diagnostics, }, editions::FeatureGate, expansion::ast::{AbilitySet, ModuleIdent, ModuleIdent_, Mutability, Visibility}, @@ -91,7 +92,8 @@ pub(super) struct TypingDebugFlags { pub struct Context<'env> { pub modules: NamingProgramInfo, macros: UniqueMap>, - pub env: &'env mut CompilationEnv, + pub env: &'env CompilationEnv, + warning_filters_scope: WarningFiltersScope, pub(super) debug: TypingDebugFlags, deprecations: Deprecations, @@ -179,7 +181,7 @@ impl UseFunsScope { impl<'env> Context<'env> { pub fn new( - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, _pre_compiled_lib: Option>, info: NamingProgramInfo, ) -> Self { @@ -191,6 +193,7 @@ impl<'env> Context<'env> { function_translation: false, type_elaboration: false, }; + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); Context { use_funs: vec![global_use_funs], subst: Subst::empty(), @@ -206,6 +209,7 @@ impl<'env> Context<'env> { macros: UniqueMap::new(), named_block_map: BTreeMap::new(), env, + warning_filters_scope, debug, next_match_var_id: 0, new_friends: BTreeSet::new(), @@ -217,6 +221,31 @@ impl<'env> Context<'env> { } } + pub fn add_diag(&self, diag: Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + pub fn add_diags(&self, diags: Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } + + pub fn extend_ide_info(&self, info: IDEInfo) { + self.env.extend_ide_info(&self.warning_filters_scope, info); + } + + pub fn add_ide_annotation(&self, loc: Loc, info: IDEAnnotation) { + self.env + .add_ide_annotation(&self.warning_filters_scope, loc, info); + } + + pub fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) + } + + pub fn pop_warning_filter_scope(&mut self) { + self.warning_filters_scope.pop() + } + pub fn set_macros( &mut self, macros: UniqueMap>, @@ -266,7 +295,7 @@ impl<'env> Context<'env> { let (target_m, target_f) = &use_fun.target_function; let msg = format!("{case} method alias '{tn}.{method}' for '{target_m}::{target_f}'"); - self.env.add_diag(diag!( + self.add_diag(diag!( Declarations::DuplicateAlias, (use_fun.loc, msg), (prev_loc, "The same alias was previously declared here") @@ -306,18 +335,18 @@ impl<'env> Context<'env> { UseFunKind::Explicit => { let msg = format!("Unused 'use fun' of '{tn}.{method}'. Consider removing it"); - self.env.add_diag(diag!(UnusedItem::Alias, (*loc, msg))) + self.add_diag(diag!(UnusedItem::Alias, (*loc, msg))) } UseFunKind::UseAlias => { let msg = format!("Unused 'use' of alias '{method}'. Consider removing it"); - self.env.add_diag(diag!(UnusedItem::Alias, (*loc, msg))) + self.add_diag(diag!(UnusedItem::Alias, (*loc, msg))) } UseFunKind::FunctionDeclaration => { let diag = ice!(( *loc, "ICE fun declaration 'use' funs should never be added to 'use' funs" )); - self.env.add_diag(diag); + self.add_diag(diag); } } } @@ -411,7 +440,7 @@ impl<'env> Context<'env> { }; diag.add_secondary_label((*prev_loc, msg)); } - self.env.add_diag(diag); + self.add_diag(diag); false } else { self.macro_expansion @@ -433,7 +462,7 @@ impl<'env> Context<'env> { loc, "ICE macro expansion stack should have a call when leaving a macro expansion" )); - self.env.add_diag(diag); + self.add_diag(diag); return false; } }; @@ -471,7 +500,7 @@ impl<'env> Context<'env> { loc, "ICE macro expansion stack should have a lambda when leaving a lambda", )); - self.env.add_diag(diag); + self.add_diag(diag); } } } @@ -507,8 +536,7 @@ impl<'env> Context<'env> { self.lambda_expansion = vec![]; if !self.ide_info.is_empty() { - self.env - .add_diag(ice!((loc, "IDE info should be cleared after each item"))); + self.add_diag(ice!((loc, "IDE info should be cleared after each item"))); self.ide_info = IDEInfo::new(); } } @@ -575,15 +603,14 @@ impl<'env> Context<'env> { pub fn declare_local(&mut self, _: Mutability, var: Var, ty: Type) { if let Err((_, prev_loc)) = self.locals.add(var, ty) { let msg = format!("ICE duplicate {var:?}. Should have been made unique in naming"); - self.env - .add_diag(ice!((var.loc, msg), (prev_loc, "Previously declared here"))); + self.add_diag(ice!((var.loc, msg), (prev_loc, "Previously declared here"))); } } pub fn get_local_type(&mut self, var: &Var) -> Type { if !self.locals.contains_key(var) { let msg = format!("ICE unbound {var:?}. Should have failed in naming"); - self.env.add_diag(ice!((var.loc, msg))); + self.add_diag(ice!((var.loc, msg))); return self.error_type(var.loc); } @@ -659,7 +686,8 @@ impl<'env> Context<'env> { if deprecation.location == AttributePosition::Module && in_same_module { return; } - deprecation.emit_deprecation_warning(self.env, name, method_opt); + let diags = deprecation.deprecation_warnings(name, method_opt); + self.add_diags(diags); } } @@ -847,7 +875,7 @@ impl<'env> Context<'env> { } impl MatchContext for Context<'_> { - fn env(&mut self) -> &mut CompilationEnv { + fn env(&mut self) -> &CompilationEnv { self.env } @@ -1102,7 +1130,7 @@ fn debug_abilities_info(context: &mut Context, ty: &Type) -> (Option, Abili loc, "ICE did not call unfold_type before debug_abiliites_info" )); - context.env.add_diag(diag); + context.add_diag(diag); (None, AbilitySet::all(loc), vec![]) } T::UnresolvedError | T::Anything => (None, AbilitySet::all(loc), vec![]), @@ -1238,7 +1266,7 @@ pub fn make_struct_field_type( N::StructFields::Native(nloc) => { let nloc = *nloc; let msg = format!("Unbound field '{}' for native struct '{}::{}'", field, m, n); - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::UnboundField, (loc, msg), (nloc, "Struct declared 'native' here") @@ -1249,7 +1277,7 @@ pub fn make_struct_field_type( }; match fields_map.get(field).cloned() { None => { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::UnboundField, (loc, format!("Unbound field '{}' in '{}::{}'", field, m, n)), )); @@ -1364,7 +1392,7 @@ pub fn make_constant_type( let msg = format!("Invalid access of '{}::{}'", m, c); let internal_msg = "Constants are internal to their module, and cannot can be accessed \ outside of their module"; - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::Visibility, (loc, msg), (defined_loc, internal_msg) @@ -1396,7 +1424,7 @@ pub fn make_method_call_type( loc, format!("ICE method on tuple type {}", debug_display!(tn)) )); - context.env.add_diag(diag); + context.add_diag(diag); return None; } TypeName_::Builtin(sp!(_, bt_)) => context.env.primitive_definer(*bt_), @@ -1433,7 +1461,7 @@ pub fn make_method_call_type( No known method '{method}' on type '{lhs_ty_str}'" ); let fmsg = format!("The function '{m}::{method}' exists, {arg_msg}"); - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::InvalidMethodCall, (loc, msg), (first_ty_loc, fmsg) @@ -1451,7 +1479,7 @@ pub fn make_method_call_type( }; let fmsg = format!("No local 'use fun' alias was found for '{lhs_ty_str}.{method}'{decl_msg}"); - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::InvalidMethodCall, (loc, msg), (method.loc, fmsg) @@ -1739,7 +1767,7 @@ fn report_visibility_error_( diag.add_secondary_label((call.invocation, "While expanding this macro")); } _ => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( call_loc, "Error when dealing with macro visibilities" ))); @@ -1752,7 +1780,7 @@ fn report_visibility_error_( "Visibility inside of expanded macros is resolved in the scope of the caller.", ); } - context.env.add_diag(diag); + context.add_diag(diag); } pub fn check_call_arity S>( @@ -1777,7 +1805,7 @@ pub fn check_call_arity S>( arity, given_len ); - context.env.add_diag(diag!( + context.add_diag(diag!( code, (loc, cmsg), (argloc, format!("Found {} argument(s) here", given_len)), @@ -1873,7 +1901,7 @@ fn solve_ability_constraint( format!("'{}' constraint declared here", constraint), )); } - context.env.add_diag(diag) + context.add_diag(diag) } } @@ -1973,7 +2001,7 @@ fn solve_builtin_type_constraint( } _ => { let tmsg = mk_tmsg(); - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::BuiltinOperation, (loc, format!("Invalid argument to '{}'", op)), (tloc, tmsg) @@ -1991,7 +2019,7 @@ fn solve_base_type_constraint(context: &mut Context, loc: Loc, msg: String, ty: Unit | Ref(_, _) | Apply(_, sp!(_, Multiple(_)), _) => { let tystr = error_format(ty, &context.subst); let tmsg = format!("Expected a single non-reference type, but found: {}", tystr); - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::ExpectedBaseType, (loc, msg), (tyloc, tmsg) @@ -2012,7 +2040,7 @@ fn solve_single_type_constraint(context: &mut Context, loc: Loc, msg: String, ty "Expected a single type, but found expression list type: {}", error_format(ty, &context.subst) ); - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::ExpectedSingleType, (loc, msg), (tyloc, tmsg) @@ -2363,7 +2391,7 @@ fn check_type_argument_arity String>( arity, args_len ); - context.env.add_diag(diag!(code, (loc, msg))); + context.add_diag(diag!(code, (loc, msg))); } while ty_args.len() > arity { diff --git a/external-crates/move/crates/move-compiler/src/typing/dependency_ordering.rs b/external-crates/move/crates/move-compiler/src/typing/dependency_ordering.rs index be058f0afac57..0db5e1b0f4a2b 100644 --- a/external-crates/move/crates/move-compiler/src/typing/dependency_ordering.rs +++ b/external-crates/move/crates/move-compiler/src/typing/dependency_ordering.rs @@ -20,7 +20,7 @@ use std::collections::{BTreeMap, BTreeSet}; //************************************************************************************************** pub fn program( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, modules: &mut UniqueMap, ) { let imm_modules = &modules; @@ -38,7 +38,7 @@ pub fn program( Err(cycle_node) => { let cycle_ident = *cycle_node.node_id(); let error = cycle_error(&module_neighbors, cycle_ident); - compilation_env.add_diag(error); + compilation_env.add_error_diag(error); } Ok(ordered_ids) => { for (order, mident) in ordered_ids.iter().rev().enumerate() { @@ -63,7 +63,7 @@ enum DepType { } struct Context<'a, 'env> { - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, modules: &'a UniqueMap, // A union of uses and friends for modules (used for cyclyc dependency checking) // - if A uses B, add edge A -> B @@ -79,7 +79,7 @@ struct Context<'a, 'env> { impl<'a, 'env> Context<'a, 'env> { fn new( - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, modules: &'a UniqueMap, ) -> Self { Context { @@ -372,7 +372,7 @@ fn lvalue(context: &mut Context, sp!(loc, lv_): &T::LValue) { } } L::BorrowUnpackVariant(..) | L::UnpackVariant(..) => { - context.env.add_diag(ice!(( + context.env.add_error_diag(ice!(( *loc, "variant unpacking shouldn't occur before match expansion" ))); @@ -402,10 +402,12 @@ fn exp(context: &mut Context, e: &T::Exp) { type_(context, ty); exp(context, e); } - E::IfElse(e1, e2, e3) => { + E::IfElse(e1, e2, e3_opt) => { exp(context, e1); exp(context, e2); - exp(context, e3); + if let Some(e3) = e3_opt { + exp(context, e3); + } } E::Match(esubject, arms) => { exp(context, esubject); @@ -418,7 +420,7 @@ fn exp(context: &mut Context, e: &T::Exp) { } } E::VariantMatch(..) => { - context.env.add_diag(ice!(( + context.env.add_error_diag(ice!(( e.exp.loc, "shouldn't find variant match before HLIR lowering" ))); diff --git a/external-crates/move/crates/move-compiler/src/typing/deprecation_warnings.rs b/external-crates/move/crates/move-compiler/src/typing/deprecation_warnings.rs index 9ba6765d66dc3..e4df3a5fe4aec 100644 --- a/external-crates/move/crates/move-compiler/src/typing/deprecation_warnings.rs +++ b/external-crates/move/crates/move-compiler/src/typing/deprecation_warnings.rs @@ -3,6 +3,7 @@ use crate::{ diag, + diagnostics::Diagnostics, expansion::ast::{self as E, ModuleIdent}, ice, shared::{ @@ -43,7 +44,7 @@ pub struct Deprecations { impl Deprecations { /// Index the modules and their members for deprecation attributes and register each /// deprecation attribute for use later on. - pub fn new(env: &mut CompilationEnv, info: &NamingProgramInfo) -> Self { + pub fn new(env: &CompilationEnv, info: &NamingProgramInfo) -> Self { let mut deprecated_members = HashMap::new(); for (mident, module_info) in info.modules.key_cloned_iter() { @@ -120,12 +121,7 @@ impl Deprecations { impl Deprecation { /// Emit a warning for the deprecation of a module member. - pub fn emit_deprecation_warning( - &self, - env: &mut CompilationEnv, - member_name: Name, - method_opt: Option, - ) { + pub fn deprecation_warnings(&self, member_name: Name, method_opt: Option) -> Diagnostics { let mident_string = self.module_ident.to_string(); let location_string = match (self.location, method_opt) { (AttributePosition::Module, None) => { @@ -159,7 +155,10 @@ impl Deprecation { let location = method_opt.map_or(member_name.loc, |method| method.loc); - env.add_diag(diag!(TypeSafety::DeprecatedUsage, (location, message))); + Diagnostics::from(vec![diag!( + TypeSafety::DeprecatedUsage, + (location, message) + )]) } } @@ -168,7 +167,7 @@ impl Deprecation { // #[deprecated] attributes (malformed, or multiple on the member), add an error diagnostic to // `env` and return None. fn deprecations( - env: &mut CompilationEnv, + env: &CompilationEnv, attr_position: AttributePosition, attrs: &E::Attributes, source_location: Loc, @@ -184,7 +183,7 @@ fn deprecations( } if deprecations.len() != 1 { - env.add_diag(ice!(( + env.add_error_diag(ice!(( source_location, "ICE: verified that there is at at least one deprecation attribute above, \ and expansion should have failed if there were multiple deprecation attributes." @@ -196,7 +195,7 @@ fn deprecations( .last() .expect("Verified deprecations is not empty above"); - let mut make_invalid_deprecation_diag = || { + let make_invalid_deprecation_diag = || { let mut diag = diag!( Attributes::InvalidUsage, ( @@ -209,7 +208,7 @@ fn deprecations( DeprecationAttribute.name() ); diag.add_note(note); - env.add_diag(diag); + env.add_error_diag(diag); None }; diff --git a/external-crates/move/crates/move-compiler/src/typing/expand.rs b/external-crates/move/crates/move-compiler/src/typing/expand.rs index 9f36764277e55..bae757038b6c7 100644 --- a/external-crates/move/crates/move-compiler/src/typing/expand.rs +++ b/external-crates/move/crates/move-compiler/src/typing/expand.rs @@ -69,14 +69,14 @@ pub fn type_(context: &mut Context, ty: &mut Type) { ty.loc, "ICE unfold_type_base failed to expand type inf. var" )); - context.env.add_diag(diag); + context.env.add_error_diag(diag); sp(loc, UnresolvedError) } sp!(loc, Anything) => { let msg = "Could not infer this type. Try adding an annotation"; context .env - .add_diag(diag!(TypeSafety::UninferredType, (ty.loc, msg))); + .add_error_diag(diag!(TypeSafety::UninferredType, (ty.loc, msg))); sp(loc, UnresolvedError) } sp!(loc, Fun(_, _)) if !context.in_macro_function => { @@ -96,7 +96,7 @@ pub fn type_(context: &mut Context, ty: &mut Type) { ty.loc, format!("ICE expanding pre-expanded type {}", debug_display!(aty)) )); - context.env.add_diag(diag); + context.env.add_error_diag(diag); *ty = sp(ty.loc, UnresolvedError) } Apply(None, _, _) => { @@ -108,7 +108,7 @@ pub fn type_(context: &mut Context, ty: &mut Type) { } _ => { let diag = ice!((ty.loc, "ICE type-apply switched to non-apply")); - context.env.add_diag(diag); + context.env.add_error_diag(diag); *ty = sp(ty.loc, UnresolvedError) } } @@ -134,7 +134,7 @@ fn unexpected_lambda_type(context: &mut Context, loc: Loc) { Lambdas can only be used with 'macro' functions, as parameters or direct arguments"; context .env - .add_diag(diag!(TypeSafety::UnexpectedFunctionType, (loc, msg))); + .add_error_diag(diag!(TypeSafety::UnexpectedFunctionType, (loc, msg))); } } @@ -234,10 +234,12 @@ pub fn exp(context: &mut Context, e: &mut T::Exp) { exp(context, args); } - E::IfElse(eb, et, ef) => { + E::IfElse(eb, et, ef_opt) => { exp(context, eb); exp(context, et); - exp(context, ef); + if let Some(ef) = ef_opt { + exp(context, ef) + } } E::Match(esubject, arms) => { exp(context, esubject); @@ -246,7 +248,7 @@ pub fn exp(context: &mut Context, e: &mut T::Exp) { } } E::VariantMatch(subject, _, arms) => { - context.env.add_diag(ice!(( + context.env.add_error_diag(ice!(( e.exp.loc, "shouldn't find variant match before match compilation" ))); @@ -355,7 +357,7 @@ fn inferred_numerical_value( "Annotating the literal might help inference: '{value}{type}'", type=fix_bt, ); - context.env.add_diag(diag!( + context.env.add_error_diag(diag!( TypeSafety::InvalidNum, (eloc, "Invalid numerical literal"), (ty.loc, msg), diff --git a/external-crates/move/crates/move-compiler/src/typing/infinite_instantiations.rs b/external-crates/move/crates/move-compiler/src/typing/infinite_instantiations.rs index 03e98d4e657cb..0e9745d4b4f1f 100644 --- a/external-crates/move/crates/move-compiler/src/typing/infinite_instantiations.rs +++ b/external-crates/move/crates/move-compiler/src/typing/infinite_instantiations.rs @@ -140,7 +140,7 @@ impl<'a> Context<'a> { //************************************************************************************************** pub fn modules( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, modules: &UniqueMap, ) { let tparams = modules @@ -171,7 +171,7 @@ macro_rules! scc_edges { } fn module<'a>( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, tparams: &'a BTreeMap>>, mname: ModuleIdent, module: &T::ModuleDefinition, @@ -188,7 +188,7 @@ fn module<'a>( petgraph_scc(&graph) .into_iter() .filter(|scc| scc_edges!(&graph, scc).any(|(_, e, _)| e == Edge::Nested)) - .for_each(|scc| compilation_env.add_diag(cycle_error(context, &graph, scc))) + .for_each(|scc| compilation_env.add_error_diag(cycle_error(context, &graph, scc))) } //************************************************************************************************** @@ -239,10 +239,12 @@ fn exp(context: &mut Context, e: &T::Exp) { exp(context, &call.arguments) } - E::IfElse(eb, et, ef) => { + E::IfElse(eb, et, ef_opt) => { exp(context, eb); exp(context, et); - exp(context, ef); + if let Some(ef) = ef_opt { + exp(context, ef) + } } E::Match(esubject, arms) => { exp(context, esubject); diff --git a/external-crates/move/crates/move-compiler/src/typing/macro_expand.rs b/external-crates/move/crates/move-compiler/src/typing/macro_expand.rs index db7dcb2a7fb35..58fb3c512d551 100644 --- a/external-crates/move/crates/move-compiler/src/typing/macro_expand.rs +++ b/external-crates/move/crates/move-compiler/src/typing/macro_expand.rs @@ -64,7 +64,7 @@ pub(crate) fn call( let reloc_clever_errors = match &context.macro_expansion[0] { core::MacroExpansion::Call(call) => call.invocation, core::MacroExpansion::Argument { .. } => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( call_loc, "ICE top level macro scope should never be an argument" ))); @@ -92,7 +92,7 @@ pub(crate) fn call( return None; } Err(Some(diag)) => { - context.env.add_diag(*diag); + context.add_diag(*diag); return None; } }; @@ -288,9 +288,7 @@ fn bind_lambda( "Unable to bind lambda to parameter '{}'. The lambda must be passed directly", param.name ); - context - .env - .add_diag(diag!(TypeSafety::CannotExpandMacro, (arg.loc, msg))); + context.add_diag(diag!(TypeSafety::CannotExpandMacro, (arg.loc, msg))); None } } @@ -551,10 +549,12 @@ fn recolor_exp(ctx: &mut Recolor, sp!(_, e_): &mut N::Exp) { recolor_lvalues(ctx, lvalues); recolor_exp(ctx, e) } - N::Exp_::IfElse(econd, et, ef) => { + N::Exp_::IfElse(econd, et, ef_opt) => { recolor_exp(ctx, econd); recolor_exp(ctx, et); - recolor_exp(ctx, ef); + if let Some(ef) = ef_opt { + recolor_exp(ctx, ef); + } } N::Exp_::Match(subject, arms) => { recolor_exp(ctx, subject); @@ -745,9 +745,7 @@ fn report_unused_argument(context: &mut core::Context, loc: EvalStrategy { + N::Exp_::IfElse(econd, et, ef_opt) => { exp(context, econd); exp(context, et); - exp(context, ef); + if let Some(ef) = ef_opt { + exp(context, ef) + } } N::Exp_::Match(subject, arms) => { macro_rules! take_and_mut_replace { @@ -1061,7 +1061,6 @@ fn exp(context: &mut Context, sp!(eloc, e_): &mut N::Exp) { if context.core.env.ide_mode() { context .core - .env .add_ide_annotation(*eloc, IDEAnnotation::ExpandedLambda); } *e_ = block; @@ -1100,7 +1099,7 @@ fn exp(context: &mut Context, sp!(eloc, e_): &mut N::Exp) { N::Exp_::VarCall(sp!(_, v_), _) if context.by_name_args.contains_key(v_) => { context.mark_used(v_); let (arg, _expected_ty) = context.by_name_args.get(v_).unwrap(); - context.core.env.add_diag(diag!( + context.core.add_diag(diag!( TypeSafety::CannotExpandMacro, (*eloc, "Cannot call non-lambda argument"), (arg.loc, "Expected a lambda argument") diff --git a/external-crates/move/crates/move-compiler/src/typing/match_analysis.rs b/external-crates/move/crates/move-compiler/src/typing/match_analysis.rs index 5c3fcc78e31d1..7bbf7b29032da 100644 --- a/external-crates/move/crates/move-compiler/src/typing/match_analysis.rs +++ b/external-crates/move/crates/move-compiler/src/typing/match_analysis.rs @@ -3,6 +3,7 @@ use crate::{ diag, + diagnostics::warning_filters::WarningFilters, expansion::ast::{ModuleIdent, Value_}, ice, naming::ast::BuiltinTypeName_, @@ -70,12 +71,12 @@ impl TypingMutVisitorContext for MatchCompiler<'_, '_> { } } - fn add_warning_filter_scope(&mut self, filter: crate::diagnostics::WarningFilters) { - self.context.env.add_warning_filter_scope(filter); + fn push_warning_filter_scope(&mut self, filter: WarningFilters) { + self.context.push_warning_filter_scope(filter); } fn pop_warning_filter_scope(&mut self) { - self.context.env.pop_warning_filter_scope(); + self.context.pop_warning_filter_scope(); } } @@ -564,7 +565,7 @@ fn find_counterexample_impl( } else { // An error case: no entry on the fringe but no if !context.env.has_errors() { - context.env.add_diag(ice!(( + context.add_diag(ice!(( matrix.loc, "Non-empty matrix with non errors but no type" ))); @@ -593,7 +594,7 @@ fn find_counterexample_impl( if has_guards { diag.add_note("Match arms with guards are not considered for coverage."); } - context.env.add_diag(diag); + context.add_diag(diag); true } else { false @@ -622,9 +623,7 @@ fn ide_report_missing_arms(context: &mut Context, loc: Loc, matrix: &PatternMatr if !unused.is_empty() { let arms = unused.into_iter().map(PS::Value).collect::>(); let info = MissingMatchArmsInfo { arms }; - context - .env - .add_ide_annotation(loc, IDEAnnotation::MissingMatchArms(Box::new(info))); + context.add_ide_annotation(loc, IDEAnnotation::MissingMatchArms(Box::new(info))); } } @@ -635,9 +634,7 @@ fn ide_report_missing_arms(context: &mut Context, loc: Loc, matrix: &PatternMatr let info = MissingMatchArmsInfo { arms: vec![PS::Wildcard], }; - context - .env - .add_ide_annotation(loc, IDEAnnotation::MissingMatchArms(Box::new(info))); + context.add_ide_annotation(loc, IDEAnnotation::MissingMatchArms(Box::new(info))); } } @@ -657,7 +654,7 @@ fn ide_report_missing_arms(context: &mut Context, loc: Loc, matrix: &PatternMatr // If the matrix _is_ empty, we suggest adding an unpack. let is_positional = context.modules.struct_is_positional(&mident, &name); let Some(fields) = context.modules.struct_fields(&mident, &name) else { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, "Tried to look up fields for this struct and found none" ))); @@ -684,9 +681,7 @@ fn ide_report_missing_arms(context: &mut Context, loc: Loc, matrix: &PatternMatr let info = MissingMatchArmsInfo { arms: vec![suggestion], }; - context - .env - .add_ide_annotation(loc, IDEAnnotation::MissingMatchArms(Box::new(info))); + context.add_ide_annotation(loc, IDEAnnotation::MissingMatchArms(Box::new(info))); } else { // If there's a default arm, no suggestion is necessary. if matrix.has_default_arm() { @@ -722,7 +717,7 @@ fn ide_report_missing_arms(context: &mut Context, loc: Loc, matrix: &PatternMatr .modules .enum_variant_fields(&mident, &name, &variant) else { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, "Tried to look up fields for this enum and found none" ))); @@ -752,14 +747,12 @@ fn ide_report_missing_arms(context: &mut Context, loc: Loc, matrix: &PatternMatr arms.push(suggestion); } let info = MissingMatchArmsInfo { arms }; - context - .env - .add_ide_annotation(loc, IDEAnnotation::MissingMatchArms(Box::new(info))); + context.add_ide_annotation(loc, IDEAnnotation::MissingMatchArms(Box::new(info))); } } let Some(ty) = matrix.tys.first() else { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, "Pattern matrix with no types handed to IDE function" ))); @@ -778,7 +771,7 @@ fn ide_report_missing_arms(context: &mut Context, loc: Loc, matrix: &PatternMatr } else { if !context.env.has_errors() { // It's unclear how we got here, so report an ICE and suggest a wildcard. - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, format!( "Found non-matchable type {} as match subject", @@ -790,9 +783,7 @@ fn ide_report_missing_arms(context: &mut Context, loc: Loc, matrix: &PatternMatr let info = MissingMatchArmsInfo { arms: vec![PS::Wildcard], }; - context - .env - .add_ide_annotation(loc, IDEAnnotation::MissingMatchArms(Box::new(info))); + context.add_ide_annotation(loc, IDEAnnotation::MissingMatchArms(Box::new(info))); } } } diff --git a/external-crates/move/crates/move-compiler/src/typing/match_compilation.rs b/external-crates/move/crates/move-compiler/src/typing/match_compilation.rs index 347376fe9eda9..bb80f3f350b3f 100644 --- a/external-crates/move/crates/move-compiler/src/typing/match_compilation.rs +++ b/external-crates/move/crates/move-compiler/src/typing/match_compilation.rs @@ -61,8 +61,8 @@ impl TypingVisitorContext for MatchCompiler<'_, '_> { } } - fn add_warning_filter_scope(&mut self, filter: crate::diagnostics::WarningFilters) { - self.context.env.add_warning_filter_scope(filter); + fn push_warning_filter_scope(&mut self, filter: crate::diagnostics::WarningFilters) { + self.context.env.push_warning_filter_scope(filter); } fn pop_warning_filter_scope(&mut self) { diff --git a/external-crates/move/crates/move-compiler/src/typing/recursive_datatypes.rs b/external-crates/move/crates/move-compiler/src/typing/recursive_datatypes.rs index 58a9053120255..11f053a366236 100644 --- a/external-crates/move/crates/move-compiler/src/typing/recursive_datatypes.rs +++ b/external-crates/move/crates/move-compiler/src/typing/recursive_datatypes.rs @@ -54,7 +54,7 @@ impl Context { //************************************************************************************************** pub fn modules( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, modules: &UniqueMap, ) { modules @@ -62,7 +62,7 @@ pub fn modules( .for_each(|(mname, m)| module(compilation_env, mname, m)) } -fn module(compilation_env: &mut CompilationEnv, mname: ModuleIdent, module: &T::ModuleDefinition) { +fn module(compilation_env: &CompilationEnv, mname: ModuleIdent, module: &T::ModuleDefinition) { let context = &mut Context::new(mname); module .structs @@ -79,7 +79,7 @@ fn module(compilation_env: &mut CompilationEnv, mname: ModuleIdent, module: &T:: petgraph_scc(&graph) .into_iter() .filter(|scc| scc.len() > 1 || graph.contains_edge(scc[0], scc[0])) - .for_each(|scc| compilation_env.add_diag(cycle_error(context, &graph, scc[0]))) + .for_each(|scc| compilation_env.add_error_diag(cycle_error(context, &graph, scc[0]))) } fn struct_def(context: &mut Context, sname: DatatypeName, sdef: &N::StructDefinition) { diff --git a/external-crates/move/crates/move-compiler/src/typing/syntax_methods.rs b/external-crates/move/crates/move-compiler/src/typing/syntax_methods.rs index d1ba995249756..6e8b839c068de 100644 --- a/external-crates/move/crates/move-compiler/src/typing/syntax_methods.rs +++ b/external-crates/move/crates/move-compiler/src/typing/syntax_methods.rs @@ -73,7 +73,7 @@ fn validate_index_syntax_methods( diag.add_note( "Index operations on the same type must take the name number of type arguments", ); - context.env.add_diag(diag); + context.add_diag(diag); return false; } @@ -92,7 +92,7 @@ fn validate_index_syntax_methods( (index_mut.loc, index_mut_msg), ); diag.add_note("Index operations on the same type must take the name number of parameters"); - context.env.add_diag(diag); + context.add_diag(diag); return false; } @@ -121,7 +121,7 @@ fn validate_index_syntax_methods( diag.add_note( "Index operations on use the same abilities for their type parameters", ); - context.env.add_diag(diag); + context.add_diag(diag); valid = false; } } @@ -142,7 +142,7 @@ fn validate_index_syntax_methods( diag.add_note( "Index operations on use the same abilities for their type parameters", ); - context.env.add_diag(diag); + context.add_diag(diag); valid = false; } } @@ -200,7 +200,7 @@ fn validate_index_syntax_methods( let N::Type_::Ref(false, inner) = core::ready_tvars(&subst, subject_ref_type.clone()).value else { - context.env.add_diag(ice!(( + context.add_diag(ice!(( index_finfo.signature.return_type.loc, "This index function got to type verification with an invalid type" ))); @@ -228,7 +228,7 @@ fn validate_index_syntax_methods( diag.add_note( "These functions must take the same subject type, differing only by mutability", ); - context.env.add_diag(diag); + context.add_diag(diag); valid = false; } } else { @@ -262,7 +262,7 @@ fn validate_index_syntax_methods( &mut_finfo.signature.type_parameters, ); diag.add_note("Index operation non-subject parameter types must match exactly"); - context.env.add_diag(diag); + context.add_diag(diag); valid = false; } } @@ -282,7 +282,7 @@ fn validate_index_syntax_methods( let index_msg = format!("This index function returns type {}", ty_str(index_type)); let N::Type_::Ref(false, inner) = core::ready_tvars(&subst, index_ty.return_.clone()).value else { - context.env.add_diag(ice!(( + context.add_diag(ice!(( index_finfo.signature.return_type.loc, "This index function got to type verification with an invalid type" ))); @@ -308,7 +308,7 @@ fn validate_index_syntax_methods( &mut_finfo.signature.type_parameters, ); diag.add_note("These functions must return the same type, differing only by mutability"); - context.env.add_diag(diag); + context.add_diag(diag); valid = false; } diff --git a/external-crates/move/crates/move-compiler/src/typing/translate.rs b/external-crates/move/crates/move-compiler/src/typing/translate.rs index f5e374f2a1817..ba75d91516ec4 100644 --- a/external-crates/move/crates/move-compiler/src/typing/translate.rs +++ b/external-crates/move/crates/move-compiler/src/typing/translate.rs @@ -53,7 +53,7 @@ use std::{ //************************************************************************************************** pub fn program( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, pre_compiled_lib: Option>, prog: N::Program, ) -> T::Program { @@ -226,7 +226,7 @@ fn module( } = mdef; context.current_module = Some(ident); context.current_package = package_name; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); context.add_use_funs_scope(use_funs); structs .iter_mut() @@ -238,7 +238,7 @@ fn module( assert!(context.constraints.is_empty()); context.current_package = None; let use_funs = context.pop_use_funs_scope(); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); let typed_module = T::ModuleDefinition { loc, warning_filter, @@ -270,7 +270,7 @@ fn finalize_ide_info(context: &mut Context) { for (_loc, ann) in info.iter_mut() { expand::ide_annotation(context, ann); } - context.env.extend_ide_info(info); + context.extend_ide_info(info); } //************************************************************************************************** @@ -289,7 +289,7 @@ fn function(context: &mut Context, name: FunctionName, f: N::Function) -> T::Fun mut signature, body: n_body, } = f; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); assert!(context.constraints.is_empty()); context.reset_for_module_item(name.loc()); context.current_function = Some(name); @@ -310,7 +310,7 @@ fn function(context: &mut Context, name: FunctionName, f: N::Function) -> T::Fun finalize_ide_info(context); context.current_function = None; context.in_macro_function = false; - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); T::Function { warning_filter, index, @@ -394,7 +394,7 @@ fn constant(context: &mut Context, name: ConstantName, nconstant: N::Constant) - signature, value: nvalue, } = nconstant; - context.env.add_warning_filter_scope(warning_filter.clone()); + context.push_warning_filter_scope(warning_filter.clone()); process_attributes(context, &attributes); @@ -426,7 +426,7 @@ fn constant(context: &mut Context, name: ConstantName, nconstant: N::Constant) - if context.env.ide_mode() { finalize_ide_info(context); } - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); T::Constant { warning_filter, @@ -503,9 +503,7 @@ mod check_valid_constant { core::error_format(ty, &Subst::empty()), format_comma(tys), ); - context - .env - .add_diag(diag!(code, (sloc, fmsg()), (loc, tmsg))) + context.add_diag(diag!(code, (sloc, fmsg()), (loc, tmsg))) } pub fn exp(context: &mut Context, e: &T::Exp) { @@ -572,10 +570,12 @@ mod check_valid_constant { s = format!("'{}' is", b); &s } - E::IfElse(eb, et, ef) => { + E::IfElse(eb, et, ef_opt) => { exp(context, eb); exp(context, et); - exp(context, ef); + if let Some(ef) = ef_opt { + exp(context, ef) + } "'if' expressions are" } E::Match(esubject, sp!(_, arms)) => { @@ -589,7 +589,7 @@ mod check_valid_constant { "'match' expressions are" } E::VariantMatch(_subject, _, _arms) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( *loc, "shouldn't find variant match before match compilation" ))); @@ -642,7 +642,7 @@ mod check_valid_constant { "Enum variants are" } }; - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::UnsupportedConstant, (*loc, format!("{} not supported in constants", error_case)) )); @@ -689,9 +689,7 @@ mod check_valid_constant { } }; let msg = format!("{} are not supported in constants", error_case); - context - .env - .add_diag(diag!(TypeSafety::UnsupportedConstant, (*loc, msg),)) + context.add_diag(diag!(TypeSafety::UnsupportedConstant, (*loc, msg),)) } } @@ -702,9 +700,7 @@ mod check_valid_constant { fn struct_def(context: &mut Context, sloc: Loc, s: &mut N::StructDefinition) { assert!(context.constraints.is_empty()); context.reset_for_module_item(sloc); - context - .env - .add_warning_filter_scope(s.warning_filter.clone()); + context.push_warning_filter_scope(s.warning_filter.clone()); let field_map = match &mut s.fields { N::StructFields::Native(_) => return, @@ -747,15 +743,13 @@ fn struct_def(context: &mut Context, sloc: Loc, s: &mut N::StructDefinition) { expand::type_(context, &mut idx_ty.1); } check_type_params_usage(context, &s.type_parameters, field_map); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); } fn enum_def(context: &mut Context, enum_: &mut N::EnumDefinition) { assert!(context.constraints.is_empty()); - context - .env - .add_warning_filter_scope(enum_.warning_filter.clone()); + context.push_warning_filter_scope(enum_.warning_filter.clone()); let enum_abilities = &enum_.abilities; let enum_type_params = &enum_.type_parameters; @@ -768,7 +762,7 @@ fn enum_def(context: &mut Context, enum_: &mut N::EnumDefinition) { } check_variant_type_params_usage(context, enum_type_params, field_types); - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); } fn variant_def( @@ -1029,7 +1023,7 @@ fn invalid_phantom_use_error( } }; let decl_msg = format!("'{}' declared here as phantom", ¶m.user_specified_name); - context.env.add_diag(diag!( + context.add_diag(diag!( Declarations::InvalidPhantomUse, (ty_loc, msg), (param.user_specified_name.loc, decl_msg), @@ -1048,9 +1042,7 @@ fn check_non_phantom_param_usage( "Unused type parameter '{}'. Consider declaring it as phantom", name ); - context - .env - .add_diag(diag!(UnusedItem::StructTypeParam, (name.loc, msg))) + context.add_diag(diag!(UnusedItem::StructTypeParam, (name.loc, msg))) } Some(false) => { let msg = format!( @@ -1058,9 +1050,7 @@ fn check_non_phantom_param_usage( adding a phantom declaration here", name ); - context - .env - .add_diag(diag!(Declarations::InvalidNonPhantomUse, (name.loc, msg))) + context.add_diag(diag!(Declarations::InvalidNonPhantomUse, (name.loc, msg))) } Some(true) => {} } @@ -1246,7 +1236,7 @@ fn subtype_impl T>( Err(e) => { context.subst = subst; let diag = typing_error(context, /* from_subtype */ true, loc, msg, e); - context.env.add_diag(diag); + context.add_diag(diag); Err(rhs) } Ok((next_subst, ty)) => { @@ -1296,7 +1286,7 @@ fn join_opt T>( Err(e) => { context.subst = subst; let diag = typing_error(context, /* from_subtype */ false, loc, msg, e); - context.env.add_diag(diag); + context.add_diag(diag); None } Ok((next_subst, ty)) => { @@ -1348,7 +1338,7 @@ fn invariant_impl T>( Err(e) => { context.subst = subst; let diag = typing_error(context, /* from_subtype */ false, loc, msg, e); - context.env.add_diag(diag); + context.add_diag(diag); Err(rhs) } Ok((next_subst, ty)) => { @@ -1575,7 +1565,7 @@ fn exp(context: &mut Context, ne: Box) -> Box { vector_pack(context, eloc, vec_loc, ty_opt, argloc, args_) } - NE::IfElse(nb, nt, nf) => { + NE::IfElse(nb, nt, nf_opt) => { let eb = exp(context, nb); let bloc = eb.exp.loc; subtype( @@ -1586,15 +1576,24 @@ fn exp(context: &mut Context, ne: Box) -> Box { Type_::bool(bloc), ); let et = exp(context, nt); - let ef = exp(context, nf); - let ty = join( - context, - eloc, - || "Incompatible branches", - et.ty.clone(), - ef.ty.clone(), - ); - (ty, TE::IfElse(eb, et, ef)) + let ef_opt = nf_opt.map(|nf| exp(context, nf)); + let ty = match &ef_opt { + Some(ef) => join( + context, + eloc, + || "Incompatible branches", + et.ty.clone(), + ef.ty.clone(), + ), + None => { + let ty = sp(eloc, Type_::Unit); + let msg = + "Invalid 'if'. The body of an 'if' without an 'else' must have type '()'"; + subtype(context, eloc, || msg, et.ty.clone(), ty.clone()); + ty + } + }; + (ty, TE::IfElse(eb, et, ef_opt)) } NE::Match(nsubject, sp!(aloc, narms_)) => { let esubject = exp(context, nsubject); @@ -1612,7 +1611,7 @@ fn exp(context: &mut Context, ne: Box) -> Box { } }; let result_type = core::make_tvar(context, aloc); - let earms = match_arms(context, &subject_type, &result_type, narms_, &ref_mut); + let earms = match_arms(context, &esubject.ty, &result_type, narms_, &ref_mut); (result_type, TE::Match(esubject, sp(aloc, earms))) } NE::While(name, nb, nloop) => { @@ -1670,9 +1669,7 @@ fn exp(context: &mut Context, ne: Box) -> Box { .check_feature(context.current_package, FeatureGate::Lambda, eloc) { let msg = "Lambdas can only be used directly as arguments to 'macro' functions"; - context - .env - .add_diag(diag!(TypeSafety::UnexpectedLambda, (eloc, msg))) + context.add_diag(diag!(TypeSafety::UnexpectedLambda, (eloc, msg))) } (context.error_type(eloc), TE::UnresolvedError) } @@ -2032,9 +2029,7 @@ fn binop( } Range | Implies | Iff => { - context - .env - .add_diag(ice!((loc, "ICE unexpect specification operator"))); + context.add_diag(ice!((loc, "ICE unexpect specification operator"))); (context.error_type(loc), context.error_type(loc)) } }; @@ -2312,9 +2307,7 @@ fn match_pattern_( matched in the module in which they are declared", &m, &struct_, ); - context - .env - .add_diag(diag!(TypeSafety::Visibility, (loc, msg))); + context.add_diag(diag!(TypeSafety::Visibility, (loc, msg))); } let bt = rtype!(bt); let pat_ = if field_error { @@ -2797,7 +2790,7 @@ fn resolve_field(context: &mut Context, loc: Loc, ty: Type, field: &Field) -> Ty match core::ready_tvars(&context.subst, ty) { sp!(_, UnresolvedError) => context.error_type(loc), sp!(tloc, Anything) => { - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::UninferredType, (loc, msg()), (tloc, UNINFERRED_MSG), @@ -2805,7 +2798,7 @@ fn resolve_field(context: &mut Context, loc: Loc, ty: Type, field: &Field) -> Ty context.error_type(loc) } sp!(tloc, Var(i)) if !context.subst.is_num_var(i) => { - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::UninferredType, (loc, msg()), (tloc, UNINFERRED_MSG), @@ -2818,9 +2811,7 @@ fn resolve_field(context: &mut Context, loc: Loc, ty: Type, field: &Field) -> Ty "Invalid access of field '{field}' on the struct '{m}::{n}'. The field '{field}' can only \ be accessed within the module '{m}' since it defines '{n}'" ); - context - .env - .add_diag(diag!(TypeSafety::Visibility, (loc, msg))); + context.add_diag(diag!(TypeSafety::Visibility, (loc, msg))); } match context.datatype_kind(&m, &n) { DatatypeKind::Struct => { @@ -2832,9 +2823,7 @@ fn resolve_field(context: &mut Context, loc: Loc, ty: Type, field: &Field) -> Ty structs, not enums", field, &m, &n ); - context - .env - .add_diag(diag!(TypeSafety::ExpectedSpecificType, (loc, msg))); + context.add_diag(diag!(TypeSafety::ExpectedSpecificType, (loc, msg))); context.error_type(loc) } } @@ -2844,7 +2833,7 @@ fn resolve_field(context: &mut Context, loc: Loc, ty: Type, field: &Field) -> Ty "Expected a struct type in the current module but got: {}", core::error_format(&t, &context.subst) ); - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::ExpectedSpecificType, (loc, msg()), (t.loc, smsg), @@ -2872,7 +2861,7 @@ fn add_struct_field_types( constructed/deconstructed, and their fields cannot be dirctly accessed", verb, m, n ); - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::InvalidNativeUsage, (loc, msg), (nloc, "Struct declared 'native' here") @@ -2883,15 +2872,13 @@ fn add_struct_field_types( for (_, f_, _) in &fields_ty { if fields.get_(f_).is_none() { let msg = format!("Missing {} for field '{}' in '{}::{}'", verb, f_, m, n); - context - .env - .add_diag(diag!(TypeSafety::TooFewArguments, (loc, msg))) + context.add_diag(diag!(TypeSafety::TooFewArguments, (loc, msg))) } } fields.map(|f, (idx, x)| { let fty = match fields_ty.remove(&f) { None => { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::UnboundField, (loc, format!("Unbound field '{}' in '{}::{}'", &f, m, n)) )); @@ -2936,15 +2923,13 @@ fn add_variant_field_types( "Missing {} for field '{}' in '{}::{}::{}'", verb, f_, m, n, v ); - context - .env - .add_diag(diag!(TypeSafety::TooFewArguments, (loc, msg))) + context.add_diag(diag!(TypeSafety::TooFewArguments, (loc, msg))) } } fields.map(|f, (idx, x)| { let fty = match fields_ty.remove(&f) { None => { - context.env.add_diag(diag!( + context.add_diag(diag!( NameResolution::UnboundField, ( loc, @@ -2981,7 +2966,7 @@ fn find_index_funs(context: &mut Context, loc: Loc, ty: &Type) -> Option None, sp!(tloc, T::Anything) => { - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::UninferredType, (loc, msg()), (*tloc, UNINFERRED_MSG), @@ -2989,7 +2974,7 @@ fn find_index_funs(context: &mut Context, loc: Loc, ty: &Type) -> Option { - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::UninferredType, (loc, msg()), (*tloc, UNINFERRED_MSG), @@ -2999,9 +2984,7 @@ fn find_index_funs(context: &mut Context, loc: Loc, ty: &Type) -> Option { let index_opt = core::find_index_funs(context, type_name); if index_opt.is_none() { - context - .env - .add_diag(diag!(Declarations::MissingSyntaxMethod, (loc, msg()),)); + context.add_diag(diag!(Declarations::MissingSyntaxMethod, (loc, msg()),)); } index_opt } @@ -3010,7 +2993,7 @@ fn find_index_funs(context: &mut Context, loc: Loc, ty: &Type) -> Option *base, ty @ sp!(_, Type_::UnresolvedError) => ty, _ => { - context - .env - .add_diag(ice!((dloc, "Index should have failed in naming"))); + context.add_diag(ice!((dloc, "Index should have failed in naming"))); sp(dloc, Type_::UnresolvedError) } }; @@ -3231,9 +3210,7 @@ fn process_exp_dotted( inner } N::ExpDotted_::DotAutocomplete(_loc, ndot) => { - context - .env - .add_diag(ice!((dloc, "Found a dot autocomplete where unsupported"))); + context.add_diag(ice!((dloc, "Found a dot autocomplete where unsupported"))); // Keep going after the ICE. process_exp_dotted_inner(context, constraint_verb, *ndot) } @@ -3331,7 +3308,7 @@ fn resolve_exp_dotted( }, ), TE::Constant(_, _) if edotted.accessors.is_empty() => { - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::InvalidMoveOp, (loc, "Invalid 'move'. Cannot 'move' constants") )); @@ -3339,7 +3316,7 @@ fn resolve_exp_dotted( } TE::UnresolvedError => make_exp(edotted.base.ty, TE::UnresolvedError), _ if edotted.accessors.is_empty() => { - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::InvalidMoveOp, (loc, "Invalid 'move'. Expected a variable or path.") )); @@ -3355,9 +3332,7 @@ fn resolve_exp_dotted( borrow_exp_dotted(context, error_loc, false, edotted); let msg = "Invalid 'move'. 'move' works only with \ variables, e.g. 'move x'. 'move' on a path access is not supported"; - context - .env - .add_diag(diag!(TypeSafety::InvalidMoveOp, (loc, msg))); + context.add_diag(diag!(TypeSafety::InvalidMoveOp, (loc, msg))); make_error(context) } else { make_error(context) @@ -3386,9 +3361,7 @@ fn resolve_exp_dotted( TE::UnresolvedError => make_exp(edotted.base.ty, TE::UnresolvedError), _ => { let msg = "Invalid 'copy'. Expected a variable or path.".to_owned(); - context - .env - .add_diag(diag!(TypeSafety::InvalidCopyOp, (loc, msg))); + context.add_diag(diag!(TypeSafety::InvalidCopyOp, (loc, msg))); make_error(context) } } @@ -3473,7 +3446,7 @@ fn borrow_exp_dotted( }; // lhs is immutable and current borrow is mutable if !cur_mut && expected_mut { - context.env.add_diag(diag!( + context.add_diag(diag!( ReferenceSafety::RefTrans, (loc, "Invalid mutable borrow from an immutable reference"), (tyloc, "Immutable because of this position"), @@ -3539,7 +3512,6 @@ fn borrow_exp_dotted( } else { let msg = "Could not find a mutable index 'syntax' method"; context - .env .add_diag(diag!(Declarations::MissingSyntaxMethod, (index_loc, msg),)); exp = make_error_exp(context, index_loc); break; @@ -3548,9 +3520,7 @@ fn borrow_exp_dotted( index.target_function } else { let msg = "Could not find an immutable index 'syntax' method"; - context - .env - .add_diag(diag!(Declarations::MissingSyntaxMethod, (index_loc, msg),)); + context.add_diag(diag!(Declarations::MissingSyntaxMethod, (index_loc, msg),)); exp = make_error_exp(context, index_loc); break; }; @@ -3566,7 +3536,7 @@ fn borrow_exp_dotted( core::error_format(&ret_ty, &context.subst), core::error_format(&mut_type, &context.subst) ); - context.env.add_diag(ice!((loc, msg))); + context.add_diag(ice!((loc, msg))); exp = make_error_exp(context, index_loc); break; } @@ -3608,7 +3578,7 @@ fn exp_dotted_to_owned( } } } else { - context.env.add_diag(ice!(( + context.add_diag(ice!(( ed.loc, "Attempted to make a dotted path with no dots" ))); @@ -3616,15 +3586,11 @@ fn exp_dotted_to_owned( }; let case = match usage { DottedUsage::Move(_) => { - context - .env - .add_diag(ice!((ed.loc, "Invalid dotted usage 'move' in to_owned"))); + context.add_diag(ice!((ed.loc, "Invalid dotted usage 'move' in to_owned"))); return make_error_exp(context, ed.loc); } DottedUsage::Borrow(_) => { - context - .env - .add_diag(ice!((ed.loc, "Invalid dotted usage 'borrow' in to_owned"))); + context.add_diag(ice!((ed.loc, "Invalid dotted usage 'borrow' in to_owned"))); return make_error_exp(context, ed.loc); } DottedUsage::Use => "implicit copy", @@ -3715,9 +3681,7 @@ fn warn_on_constant_borrow(context: &mut Context, loc: Loc, e: &T::Exp) { if matches!(&e.exp.value, TE::Constant(_, _)) { let msg = "This access will make a new copy of the constant. \ Consider binding the value to a variable first to make this copy explicit"; - context - .env - .add_diag(diag!(TypeSafety::ImplicitConstantCopy, (loc, msg))) + context.add_diag(diag!(TypeSafety::ImplicitConstantCopy, (loc, msg))) } } @@ -3864,7 +3828,7 @@ fn type_to_type_name_( return None; } Ty::Ref(_, _) | Ty::Var(_) => { - context.env.add_diag(ice!(( + context.add_diag(ice!(( loc, "Typing did not unfold type before resolving type name" ))); @@ -3873,7 +3837,7 @@ fn type_to_type_name_( Ty::Apply(_, _, _) => unreachable!(), }; if report_error { - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::InvalidMethodCall, (loc, format!("Invalid {error_msg}")), (ty.loc, msg), @@ -4011,7 +3975,7 @@ fn annotated_error_const(context: &mut Context, e: &mut T::Exp, abort_or_assert_ the '#[error]' attribute is added to them." .to_string(), ); - context.env.add_diag(err); + context.add_diag(err); e.ty = context.error_type(e.ty.loc); e.exp = sp(e.exp.loc, T::UnannotatedExp_::UnresolvedError); @@ -4261,7 +4225,7 @@ fn check_call_target( } else { "Normal (non-'macro') function is declared here" }; - context.env.add_diag(diag!( + context.add_diag(diag!( TypeSafety::InvalidCallTarget, (macro_call_loc, call_msg), (decl_loc, decl_msg), @@ -4485,7 +4449,7 @@ fn expand_macro( { None => { if !(context.env.has_errors() || context.env.ide_mode()) { - context.env.add_diag(ice!(( + context.add_diag(ice!(( call_loc, "No macro found, but name resolution passed." ))); @@ -4614,24 +4578,18 @@ fn unused_module_members(context: &mut Context, mident: &ModuleIdent_, mdef: &T: } let is_sui_mode = context.env.package_config(mdef.package_name).flavor == Flavor::Sui; - context - .env - .add_warning_filter_scope(mdef.warning_filter.clone()); + context.push_warning_filter_scope(mdef.warning_filter.clone()); for (loc, name, c) in &mdef.constants { - context - .env - .add_warning_filter_scope(c.warning_filter.clone()); + context.push_warning_filter_scope(c.warning_filter.clone()); let members = context.used_module_members.get(mident); if members.is_none() || !members.unwrap().contains(name) { let msg = format!("The constant '{name}' is never used. Consider removing it."); - context - .env - .add_diag(diag!(UnusedItem::Constant, (loc, msg))) + context.add_diag(diag!(UnusedItem::Constant, (loc, msg))) } - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); } for (loc, name, fun) in &mdef.functions { @@ -4647,9 +4605,7 @@ fn unused_module_members(context: &mut Context, mident: &ModuleIdent_, mdef: &T: // a Sui-specific filter to avoid signaling that the init function is unused continue; } - context - .env - .add_warning_filter_scope(fun.warning_filter.clone()); + context.push_warning_filter_scope(fun.warning_filter.clone()); let members = context.used_module_members.get(mident); if fun.entry.is_none() @@ -4662,12 +4618,10 @@ fn unused_module_members(context: &mut Context, mident: &ModuleIdent_, mdef: &T: "The non-'public', non-'entry' function '{name}' is never called. \ Consider removing it." ); - context - .env - .add_diag(diag!(UnusedItem::Function, (loc, msg))) + context.add_diag(diag!(UnusedItem::Function, (loc, msg))) } - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); } - context.env.pop_warning_filter_scope(); + context.pop_warning_filter_scope(); } diff --git a/external-crates/move/crates/move-compiler/src/typing/visitor.rs b/external-crates/move/crates/move-compiler/src/typing/visitor.rs index 5e5aa490931d7..1628d165150af 100644 --- a/external-crates/move/crates/move-compiler/src/typing/visitor.rs +++ b/external-crates/move/crates/move-compiler/src/typing/visitor.rs @@ -3,7 +3,7 @@ use crate::{ command_line::compiler::Visitor, - diagnostics::WarningFilters, + diagnostics::warning_filters::WarningFilters, expansion::ast::ModuleIdent, naming::ast as N, parser::ast::{ConstantName, DatatypeName, FunctionName, VariantName}, @@ -17,7 +17,7 @@ use move_proc_macros::growing_stack; pub type TypingVisitorObj = Box; pub trait TypingVisitor: Send + Sync { - fn visit(&self, env: &mut CompilationEnv, program: &T::Program); + fn visit(&self, env: &CompilationEnv, program: &T::Program); fn visitor(self) -> Visitor where @@ -30,9 +30,9 @@ pub trait TypingVisitor: Send + Sync { pub trait TypingVisitorConstructor: Send + Sync { type Context<'a>: Sized + TypingVisitorContext; - fn context<'a>(env: &'a mut CompilationEnv, program: &T::Program) -> Self::Context<'a>; + fn context<'a>(env: &'a CompilationEnv, program: &T::Program) -> Self::Context<'a>; - fn visit(env: &mut CompilationEnv, program: &T::Program) { + fn visit(env: &CompilationEnv, program: &T::Program) { let mut context = Self::context(env, program); context.visit(program); } @@ -44,7 +44,7 @@ pub enum LValueKind { } pub trait TypingVisitorContext { - fn add_warning_filter_scope(&mut self, filter: WarningFilters); + fn push_warning_filter_scope(&mut self, filters: WarningFilters); fn pop_warning_filter_scope(&mut self); /// Indicates if types should be visited during the traversal of other forms (struct and enum @@ -75,7 +75,7 @@ pub trait TypingVisitorContext { } fn visit_module(&mut self, ident: ModuleIdent, mdef: &T::ModuleDefinition) { - self.add_warning_filter_scope(mdef.warning_filter.clone()); + self.push_warning_filter_scope(mdef.warning_filter.clone()); if self.visit_module_custom(ident, mdef) { self.pop_warning_filter_scope(); return; @@ -116,7 +116,7 @@ pub trait TypingVisitorContext { struct_name: DatatypeName, sdef: &N::StructDefinition, ) { - self.add_warning_filter_scope(sdef.warning_filter.clone()); + self.push_warning_filter_scope(sdef.warning_filter.clone()); if self.visit_struct_custom(module, struct_name, sdef) { self.pop_warning_filter_scope(); return; @@ -149,7 +149,7 @@ pub trait TypingVisitorContext { enum_name: DatatypeName, edef: &N::EnumDefinition, ) { - self.add_warning_filter_scope(edef.warning_filter.clone()); + self.push_warning_filter_scope(edef.warning_filter.clone()); if self.visit_enum_custom(module, enum_name, edef) { self.pop_warning_filter_scope(); return; @@ -192,6 +192,8 @@ pub trait TypingVisitorContext { } } + // TODO field visitor + fn visit_constant_custom( &mut self, _module: ModuleIdent, @@ -207,7 +209,7 @@ pub trait TypingVisitorContext { constant_name: ConstantName, cdef: &T::Constant, ) { - self.add_warning_filter_scope(cdef.warning_filter.clone()); + self.push_warning_filter_scope(cdef.warning_filter.clone()); if self.visit_constant_custom(module, constant_name, cdef) { self.pop_warning_filter_scope(); return; @@ -231,7 +233,7 @@ pub trait TypingVisitorContext { function_name: FunctionName, fdef: &T::Function, ) { - self.add_warning_filter_scope(fdef.warning_filter.clone()); + self.push_warning_filter_scope(fdef.warning_filter.clone()); if self.visit_function_custom(module, function_name, fdef) { self.pop_warning_filter_scope(); return; @@ -245,7 +247,7 @@ pub trait TypingVisitorContext { self.visit_type(None, &fdef.signature.return_type); } if let T::FunctionBody_::Defined(seq) = &fdef.body.value { - self.visit_seq(seq); + self.visit_seq(fdef.body.loc, seq); } self.pop_warning_filter_scope(); } @@ -291,11 +293,19 @@ pub trait TypingVisitorContext { // -- SEQUENCES AND EXPRESSIONS -- - fn visit_seq(&mut self, (use_funs, seq): &T::Sequence) { + /// Custom visit for a sequence. It will skip `visit_seq` if `visit_seq_custom` returns true. + fn visit_seq_custom(&mut self, _loc: Loc, _seq: &T::Sequence) -> bool { + false + } + + fn visit_seq(&mut self, loc: Loc, seq @ (use_funs, seq_): &T::Sequence) { + if self.visit_seq_custom(loc, seq) { + return; + } if Self::VISIT_USE_FUNS { self.visit_use_funs(use_funs); } - for s in seq { + for s in seq_ { self.visit_seq_item(s); } } @@ -431,10 +441,12 @@ pub trait TypingVisitorContext { } self.visit_exp(e); } - E::IfElse(e1, e2, e3) => { + E::IfElse(e1, e2, e3_opt) => { self.visit_exp(e1); self.visit_exp(e2); - self.visit_exp(e3); + if let Some(e3) = e3_opt { + self.visit_exp(e3); + } } E::Match(esubject, arms) => { self.visit_exp(esubject); @@ -456,8 +468,8 @@ pub trait TypingVisitorContext { self.visit_exp(e2); } E::Loop { body, .. } => self.visit_exp(body), - E::NamedBlock(_, seq) => self.visit_seq(seq), - E::Block(seq) => self.visit_seq(seq), + E::NamedBlock(_, seq) => self.visit_seq(exp.exp.loc, seq), + E::Block(seq) => self.visit_seq(exp.exp.loc, seq), E::Assign(lvalues, ty_ann, e) => { // visit the RHS first to better match control flow self.visit_exp(e); @@ -555,32 +567,86 @@ impl From for TypingVisitorObj { } impl TypingVisitor for V { - fn visit(&self, env: &mut CompilationEnv, program: &T::Program) { + fn visit(&self, env: &CompilationEnv, program: &T::Program) { Self::visit(env, program) } } +macro_rules! simple_visitor { + ($visitor:ident, $($overrides:item),*) => { + pub struct $visitor; + + pub struct Context<'a> { + env: &'a crate::shared::CompilationEnv, + warning_filters_scope: crate::diagnostics::warning_filters::WarningFiltersScope, + } + + impl crate::typing::visitor::TypingVisitorConstructor for $visitor { + type Context<'a> = Context<'a>; + + fn context<'a>( + env: &'a crate::shared::CompilationEnv, + _program: &crate::typing::ast::Program, + ) -> Self::Context<'a> { + let warning_filters_scope = env.top_level_warning_filter_scope().clone(); + Context { + env, + warning_filters_scope, + } + } + } + + impl Context<'_> { + #[allow(unused)] + fn add_diag(&self, diag: crate::diagnostics::Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + #[allow(unused)] + fn add_diags(&self, diags: crate::diagnostics::Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } + } + + impl crate::typing::visitor::TypingVisitorContext for Context<'_> { + fn push_warning_filter_scope( + &mut self, + filters: crate::diagnostics::warning_filters::WarningFilters, + ) { + self.warning_filters_scope.push(filters) + } + + fn pop_warning_filter_scope(&mut self) { + self.warning_filters_scope.pop() + } + + $($overrides)* + } + } +} +pub(crate) use simple_visitor; + //************************************************************************************************** // Mut Vistor //************************************************************************************************** pub trait TypingMutVisitor: Send + Sync { - fn visit(&self, env: &mut CompilationEnv, program: &mut T::Program); + fn visit(&self, env: &CompilationEnv, program: &mut T::Program); } pub trait TypingMutVisitorConstructor: Send + Sync { type Context<'a>: Sized + TypingMutVisitorContext; - fn context<'a>(env: &'a mut CompilationEnv, program: &T::Program) -> Self::Context<'a>; + fn context<'a>(env: &'a CompilationEnv, program: &T::Program) -> Self::Context<'a>; - fn visit(env: &mut CompilationEnv, program: &mut T::Program) { + fn visit(env: &CompilationEnv, program: &mut T::Program) { let mut context = Self::context(env, program); context.visit(program); } } pub trait TypingMutVisitorContext { - fn add_warning_filter_scope(&mut self, filter: WarningFilters); + fn push_warning_filter_scope(&mut self, filter: WarningFilters); fn pop_warning_filter_scope(&mut self); /// Indicates if types should be visited during the traversal of other forms (struct and enum @@ -615,7 +681,7 @@ pub trait TypingMutVisitorContext { } fn visit_module(&mut self, ident: ModuleIdent, mdef: &mut T::ModuleDefinition) { - self.add_warning_filter_scope(mdef.warning_filter.clone()); + self.push_warning_filter_scope(mdef.warning_filter.clone()); if self.visit_module_custom(ident, mdef) { self.pop_warning_filter_scope(); return; @@ -656,7 +722,7 @@ pub trait TypingMutVisitorContext { struct_name: DatatypeName, sdef: &mut N::StructDefinition, ) { - self.add_warning_filter_scope(sdef.warning_filter.clone()); + self.push_warning_filter_scope(sdef.warning_filter.clone()); if self.visit_struct_custom(module, struct_name, sdef) { self.pop_warning_filter_scope(); return; @@ -689,7 +755,7 @@ pub trait TypingMutVisitorContext { enum_name: DatatypeName, edef: &mut N::EnumDefinition, ) { - self.add_warning_filter_scope(edef.warning_filter.clone()); + self.push_warning_filter_scope(edef.warning_filter.clone()); if self.visit_enum_custom(module, enum_name, edef) { self.pop_warning_filter_scope(); return; @@ -747,7 +813,7 @@ pub trait TypingMutVisitorContext { constant_name: ConstantName, cdef: &mut T::Constant, ) { - self.add_warning_filter_scope(cdef.warning_filter.clone()); + self.push_warning_filter_scope(cdef.warning_filter.clone()); if self.visit_constant_custom(module, constant_name, cdef) { self.pop_warning_filter_scope(); return; @@ -771,7 +837,7 @@ pub trait TypingMutVisitorContext { function_name: FunctionName, fdef: &mut T::Function, ) { - self.add_warning_filter_scope(fdef.warning_filter.clone()); + self.push_warning_filter_scope(fdef.warning_filter.clone()); if self.visit_function_custom(module, function_name, fdef) { self.pop_warning_filter_scope(); return; @@ -973,10 +1039,12 @@ pub trait TypingMutVisitorContext { } self.visit_exp(e); } - E::IfElse(e1, e2, e3) => { + E::IfElse(e1, e2, e3_opt) => { self.visit_exp(e1); self.visit_exp(e2); - self.visit_exp(e3); + if let Some(e3) = e3_opt { + self.visit_exp(e3); + } } E::Match(esubject, arms) => { self.visit_exp(esubject); @@ -1092,7 +1160,7 @@ pub trait TypingMutVisitorContext { } impl TypingMutVisitor for V { - fn visit(&self, env: &mut CompilationEnv, program: &mut T::Program) { + fn visit(&self, env: &CompilationEnv, program: &mut T::Program) { Self::visit(env, program) } } @@ -1158,8 +1226,10 @@ where E::While(_, e1, e2) | E::Mutate(e1, e2) | E::BinopExp(e1, _, _, e2) => { exp_satisfies_(e1, p) || exp_satisfies_(e2, p) } - E::IfElse(e1, e2, e3) => { - exp_satisfies_(e1, p) || exp_satisfies_(e2, p) || exp_satisfies_(e3, p) + E::IfElse(e1, e2, e3_opt) => { + exp_satisfies_(e1, p) + || exp_satisfies_(e2, p) + || e3_opt.iter().any(|e3| exp_satisfies_(e3, p)) } E::ModuleCall(c) => exp_satisfies_(&c.arguments, p), E::Match(esubject, arms) => { diff --git a/external-crates/move/crates/move-compiler/src/unit_test/filter_test_members.rs b/external-crates/move/crates/move-compiler/src/unit_test/filter_test_members.rs index 225b4f91628b5..2fb22b66b4b30 100644 --- a/external-crates/move/crates/move-compiler/src/unit_test/filter_test_members.rs +++ b/external-crates/move/crates/move-compiler/src/unit_test/filter_test_members.rs @@ -18,13 +18,13 @@ use crate::{ use std::sync::Arc; struct Context<'env> { - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, is_source_def: bool, current_package: Option, } impl<'env> Context<'env> { - fn new(env: &'env mut CompilationEnv) -> Self { + fn new(env: &'env CompilationEnv) -> Self { Self { env, is_source_def: false, @@ -92,7 +92,7 @@ pub const UNIT_TEST_POISON_FUN_NAME: Symbol = symbol!("unit_test_poison"); // in `compilation_env` is not set. If the test flag is set, no filtering is performed, and instead // a test plan is created for use by the testing framework. pub fn program( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, pre_compiled_lib: Option>, prog: P::Program, ) -> P::Program { @@ -127,7 +127,7 @@ fn has_unit_test_module(prog: &P::Program) -> bool { } fn check_has_unit_test_module( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, pre_compiled_lib: Option>, prog: &P::Program, ) -> bool { @@ -145,7 +145,7 @@ fn check_has_unit_test_module( P::Definition::Module(P::ModuleDefinition { name, .. }) => name.0.loc, P::Definition::Address(P::AddressDefinition { loc, .. }) => *loc, }; - compilation_env.add_diag(diag!( + compilation_env.add_error_diag(diag!( Attributes::InvalidTest, ( loc, diff --git a/external-crates/move/crates/move-compiler/src/unit_test/plan_builder.rs b/external-crates/move/crates/move-compiler/src/unit_test/plan_builder.rs index 7aab1a163d71d..30f14b7f7a891 100644 --- a/external-crates/move/crates/move-compiler/src/unit_test/plan_builder.rs +++ b/external-crates/move/crates/move-compiler/src/unit_test/plan_builder.rs @@ -5,6 +5,10 @@ use crate::{ cfgir::ast as G, diag, + diagnostics::{ + warning_filters::{WarningFilters, WarningFiltersScope}, + Diagnostic, Diagnostics, + }, expansion::ast::{ self as E, Address, Attribute, AttributeValue, Attributes, ModuleAccess_, ModuleIdent, ModuleIdent_, @@ -33,12 +37,13 @@ use move_symbol_pool::Symbol; use std::collections::BTreeMap; struct Context<'env> { - env: &'env mut CompilationEnv, + env: &'env CompilationEnv, + warning_filters_scope: WarningFiltersScope, constants: UniqueMap, Attributes)>>, } impl<'env> Context<'env> { - fn new(compilation_env: &'env mut CompilationEnv, prog: &G::Program) -> Self { + fn new(compilation_env: &'env CompilationEnv, prog: &G::Program) -> Self { let constants = prog.modules.ref_map(|_mident, module| { module.constants.ref_map(|_name, constant| { let v_opt = constant.value.as_ref().and_then(|v| match v { @@ -48,12 +53,31 @@ impl<'env> Context<'env> { (constant.loc, v_opt, constant.attributes.clone()) }) }); + let warning_filters_scope = compilation_env.top_level_warning_filter_scope().clone(); Self { env: compilation_env, + warning_filters_scope, constants, } } + pub fn add_diag(&self, diag: Diagnostic) { + self.env.add_diag(&self.warning_filters_scope, diag); + } + + #[allow(unused)] + pub fn add_diags(&self, diags: Diagnostics) { + self.env.add_diags(&self.warning_filters_scope, diags); + } + + pub fn push_warning_filter_scope(&mut self, filters: WarningFilters) { + self.warning_filters_scope.push(filters) + } + + pub fn pop_warning_filter_scope(&mut self) { + self.warning_filters_scope.pop() + } + fn resolve_address(&self, addr: &Address) -> NumericalAddress { (*addr).into_addr_bytes() } @@ -72,7 +96,7 @@ impl<'env> Context<'env> { // Constructs a test plan for each module in `prog`. This also validates the structure of the // attributes as the test plan is constructed. pub fn construct_test_plan( - compilation_env: &mut CompilationEnv, + compilation_env: &CompilationEnv, package_filter: Option, prog: &G::Program, ) -> Option> { @@ -85,7 +109,15 @@ pub fn construct_test_plan( prog.modules .key_cloned_iter() .flat_map(|(module_ident, module_def)| { - construct_module_test_plan(&mut context, package_filter, module_ident, module_def) + context.push_warning_filter_scope(module_def.warning_filter.clone()); + let plan = construct_module_test_plan( + &mut context, + package_filter, + module_ident, + module_def, + ); + context.pop_warning_filter_scope(); + plan }) .collect(), ) @@ -104,8 +136,11 @@ fn construct_module_test_plan( .functions .iter() .filter_map(|(loc, fn_name, func)| { - build_test_info(context, loc, fn_name, func) - .map(|test_case| (fn_name.to_string(), test_case)) + context.push_warning_filter_scope(func.warning_filter.clone()); + let info = build_test_info(context, loc, fn_name, func) + .map(|test_case| (fn_name.to_string(), test_case)); + context.pop_warning_filter_scope(); + info }) .collect(); @@ -143,7 +178,7 @@ fn build_test_info<'func>( let fn_msg = "Only functions defined as a test with #[test] can also have an \ #[expected_failure] attribute"; let abort_msg = "Attributed as #[expected_failure] here"; - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidUsage, (fn_loc, fn_msg), (abort_attribute.loc, abort_msg), @@ -154,7 +189,7 @@ fn build_test_info<'func>( (Some(test_attribute), Some(random_test_attribute)) => { let msg = "Function annotated as both #[test] and #[random_test]. You need to declare \ it as either one or the other"; - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidUsage, (random_test_attribute.loc, msg), (test_attribute.loc, PREVIOUSLY_ANNOTATED_MSG), @@ -170,7 +205,7 @@ fn build_test_info<'func>( if let Some(test_only_attribute) = test_only_attribute_opt { let msg = "Function annotated as both #[test] and #[test_only]. You need to declare \ it as either one or the other"; - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidUsage, (test_only_attribute.loc, msg), (test_attribute.loc, PREVIOUSLY_ANNOTATED_MSG), @@ -205,7 +240,7 @@ fn build_test_info<'func>( "Supported builti-in types are: bool, u8, u16, u32, u64, \ u128, u256, address, and vector where T is a built-in type", ); - context.env.add_diag(diag); + context.add_diag(diag); return None; } }; @@ -214,7 +249,7 @@ fn build_test_info<'func>( None => { let missing_param_msg = "Missing test parameter assignment in test. Expected a \ parameter to be assigned in this attribute"; - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidTest, (test_attribute.loc, missing_param_msg), (vloc, "Corresponding to this parameter"), @@ -227,7 +262,7 @@ fn build_test_info<'func>( if is_random_test && arguments.is_empty() { let msg = "No parameters to generate for random test. A #[random_test] function must \ have at least one parameter to generate."; - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidTest, (test_attribute.loc, msg), (fn_loc, IN_THIS_TEST_MSG), @@ -266,7 +301,7 @@ fn parse_test_attribute( match test_attribute { EA::Name(_) | EA::Parameterized(_, _) if depth > 0 => { - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidTest, (*aloc, "Unexpected nested attribute in test declaration"), )); @@ -281,7 +316,7 @@ fn parse_test_attribute( } EA::Assigned(nm, attr_value) => { if depth != 1 { - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidTest, (*aloc, "Unexpected nested attribute in test declaration"), )); @@ -291,7 +326,7 @@ fn parse_test_attribute( let value = match convert_attribute_value_to_move_value(context, attr_value) { Some(move_value) => move_value, None => { - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (*assign_loc, "Unsupported attribute value"), (*aloc, "Assigned in this attribute"), @@ -338,7 +373,7 @@ fn parse_failure_attribute( let invalid_assignment_msg = "Invalid expected failure code assignment"; let expected_msg = "Expect an #[expected_failure(...)] attribute for error specification"; - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (assign_loc, invalid_assignment_msg), (*aloc, expected_msg), @@ -369,9 +404,7 @@ fn parse_failure_attribute( expected_failure_kind_vec.len(), TestingAttribute::expected_failure_cases().to_vec().join(", ") ); - context - .env - .add_diag(diag!(Attributes::InvalidValue, (*aloc, invalid_attr_msg))); + context.add_diag(diag!(Attributes::InvalidValue, (*aloc, invalid_attr_msg))); return None; } let (expected_failure_kind, (attr_loc, attr)) = @@ -400,7 +433,7 @@ fn parse_failure_attribute( attribute.", TestingAttribute::ERROR_LOCATION ); - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::ValueWarning, (attr_loc, BAD_ABORT_VALUE_WARNING), (value_loc, tip) @@ -500,7 +533,7 @@ fn parse_failure_attribute( ); let no_code = format!("No status code associated with value '{move_error_type}'"); - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (value_name_loc, bad_value), (major_value_loc, no_code) @@ -541,9 +574,7 @@ fn parse_failure_attribute( "Unused attribute for {}", TestingAttribute::ExpectedFailure.name() ); - context - .env - .add_diag(diag!(UnusedItem::Attribute, (loc, msg))); + context.add_diag(diag!(UnusedItem::Attribute, (loc, msg))); } Some(ExpectedFailure::ExpectedWithError(ExpectedMoveError( status_code, @@ -571,7 +602,7 @@ fn check_attribute_unassigned( "Expected no assigned value, e.g. '{}', for expected failure attribute", kind ); - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (attr_loc, "Unsupported attribute in this location"), (loc, msg) @@ -598,7 +629,7 @@ fn get_assigned_attribute( "Expected assigned value, e.g. '{}=...', for expected failure attribute", kind ); - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (attr_loc, "Unsupported attribute in this location"), (loc, msg) @@ -615,7 +646,7 @@ fn convert_location(context: &mut Context, attr_loc: Loc, attr: Attribute) -> Op match value { sp!(vloc, EAV::Module(module)) => convert_module_id(context, vloc, &module), sp!(vloc, _) => { - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (loc, INVALID_VALUE), (vloc, "Expected a module identifier, e.g. 'std::vector'") @@ -645,7 +676,7 @@ fn convert_constant_value_u64_constant_or_value( let modules_constants = context.constants().get(module).unwrap(); let constant = match modules_constants.get_(&member.value) { None => { - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (vloc, INVALID_VALUE), ( @@ -667,7 +698,7 @@ fn convert_constant_value_u64_constant_or_value( "Constant '{module}::{member}' has a non-u64 value. \ Only 'u64' values are permitted" ); - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (vloc, INVALID_VALUE), (*cloc, msg), @@ -680,7 +711,7 @@ fn convert_constant_value_u64_constant_or_value( fn convert_module_id(context: &mut Context, vloc: Loc, module: &ModuleIdent) -> Option { if !context.constants.contains_key(module) { - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (vloc, INVALID_VALUE), (module.loc, format!("Unbound module '{module}'")), @@ -693,7 +724,7 @@ fn convert_module_id(context: &mut Context, vloc: Loc, module: &ModuleIdent) -> value: sp!(_, a), .. } => a.into_inner(), Address::NamedUnassigned(addr) => { - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (vloc, INVALID_VALUE), (*mloc, format!("Unbound address '{addr}'")), @@ -722,7 +753,7 @@ fn convert_attribute_value_u64( | sp!(vloc, EAV::Value(sp!(_, EV::U32(_)))) | sp!(vloc, EAV::Value(sp!(_, EV::U128(_)))) | sp!(vloc, EAV::Value(sp!(_, EV::U256(_)))) => { - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (loc, INVALID_VALUE), (*vloc, "Annotated non-u64 literals are not permitted"), @@ -730,7 +761,7 @@ fn convert_attribute_value_u64( None } sp!(vloc, _) => { - context.env.add_diag(diag!( + context.add_diag(diag!( Attributes::InvalidValue, (loc, INVALID_VALUE), (*vloc, "Unsupported value in this assignment"), @@ -765,9 +796,7 @@ fn check_location( "Expected '{}' following '{attr}'", TestingAttribute::ERROR_LOCATION ); - context - .env - .add_diag(diag!(Attributes::InvalidUsage, (loc, msg))); + context.add_diag(diag!(Attributes::InvalidUsage, (loc, msg))); } location } diff --git a/external-crates/move/crates/move-compiler/tests/linter/false_negative_unnecessary_conditional.move b/external-crates/move/crates/move-compiler/tests/linter/false_negative_unnecessary_conditional.move index 4e676fa7ca518..a70e993da5b7e 100644 --- a/external-crates/move/crates/move-compiler/tests/linter/false_negative_unnecessary_conditional.move +++ b/external-crates/move/crates/move-compiler/tests/linter/false_negative_unnecessary_conditional.move @@ -1,12 +1,14 @@ module a::m { // These very simply could be rewritten but we are overly conservative when it comes to blocks public fun t0(condition: bool) { - if (condition) { (); true } else false; - if (condition) b"" else { (); (); vector[] }; + if (condition) { foo(); true } else false; + if (condition) b"" else { foo(); foo(); vector[] }; } // we don't do this check after constant folding public fun t1(condition: bool) { if (condition) 1 + 1 else 2; } + + fun foo() {} } diff --git a/external-crates/move/crates/move-compiler/tests/linter/suppress_unnecessary_unit.move b/external-crates/move/crates/move-compiler/tests/linter/suppress_unnecessary_unit.move new file mode 100644 index 0000000000000..da9533c8d5890 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/linter/suppress_unnecessary_unit.move @@ -0,0 +1,12 @@ +// suppress unnecessary_unit lint +module a::m { + + #[allow(lint(unnecessary_unit))] + public fun test_empty_else(x: bool): bool { + if (x) { x = true; } else {}; + if (!x) () else { test_empty_else(x); }; + { (); }; + (); + x + } +} diff --git a/external-crates/move/crates/move-compiler/tests/linter/true_negative_unnecessary_unit.move b/external-crates/move/crates/move-compiler/tests/linter/true_negative_unnecessary_unit.move new file mode 100644 index 0000000000000..dac10008319ab --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/linter/true_negative_unnecessary_unit.move @@ -0,0 +1,12 @@ +// tests unnecessary units. These caeses are not errors and should not be reported +module a::unnecessary_unit { + public fun t_if_without_else(cond: bool): u64 { + let x = 0; + if (cond) x = 1; + x + } + + public fun t() { + () // unit here is okay + } +} diff --git a/external-crates/move/crates/move-compiler/tests/linter/true_positive_unnecessary_unit.exp b/external-crates/move/crates/move-compiler/tests/linter/true_positive_unnecessary_unit.exp new file mode 100644 index 0000000000000..3b81304356f04 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/linter/true_positive_unnecessary_unit.exp @@ -0,0 +1,151 @@ +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:6:16 + │ +6 │ if (b) () else { x = 1 }; + │ - ^^ Unnecessary unit '()' + │ │ + │ Consider negating the 'if' condition and simplifying + │ + = For example 'if (cond) () else e' can be simplified to 'if (!cond) e' + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:8:16 + │ +8 │ if (b) {} else { x = 1 }; + │ - ^^ Unnecessary unit '()' + │ │ + │ Consider negating the 'if' condition and simplifying + │ + = For example 'if (cond) () else e' can be simplified to 'if (!cond) e' + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:10:16 + │ +10 │ if (b) { () } else { x = 1 }; + │ - ^^^^^^ Unnecessary unit '()' + │ │ + │ Consider negating the 'if' condition and simplifying + │ + = For example 'if (cond) () else e' can be simplified to 'if (!cond) e' + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:12:16 + │ +12 │ if (b) { + │ - Consider negating the 'if' condition and simplifying + │ ╭────────────────^ +13 │ │ // new line and comment does not suppress it +14 │ │ } else { x = 1 }; + │ ╰─────────^ Unnecessary unit '()' + │ + = For example 'if (cond) () else e' can be simplified to 'if (!cond) e' + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:21:31 + │ +21 │ if (b) { x = 1 } else (); + │ ----------------------^^ + │ │ │ + │ │ Unnecessary 'else ()'. + │ An 'if' without an 'else' has an implicit 'else ()'. Consider removing the 'else' branch + │ + = For example 'if (cond) e else ()' can be simplified to 'if (cond) e' + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:23:31 + │ +23 │ if (b) { x = 1 } else {}; + │ ----------------------^^ + │ │ │ + │ │ Unnecessary 'else ()'. + │ An 'if' without an 'else' has an implicit 'else ()'. Consider removing the 'else' branch + │ + = For example 'if (cond) e else ()' can be simplified to 'if (cond) e' + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:25:31 + │ +25 │ if (b) { x = 1 } else { () }; + │ ----------------------^^^^^^ + │ │ │ + │ │ Unnecessary 'else ()'. + │ An 'if' without an 'else' has an implicit 'else ()'. Consider removing the 'else' branch + │ + = For example 'if (cond) e else ()' can be simplified to 'if (cond) e' + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:27:31 + │ +27 │ â•­ if (b) { x = 1 } else { + │ ╭─────────────────────────────────^ +28 │ │ │ // new line and comment does not suppress it +29 │ │ │ }; + │ ╰─│─────────^ Unnecessary 'else ()'. + │ ╰─────────' An 'if' without an 'else' has an implicit 'else ()'. Consider removing the 'else' branch + │ + = For example 'if (cond) e else ()' can be simplified to 'if (cond) e' + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:34:9 + │ +34 │ (); + │ ^^ Unnecessary unit in sequence '();'. Consider removing + │ + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:37:18 + │ +37 │ if (b) { (); () } else { x = 1 }; // doesn't trigger if/else case + │ ^^ Unnecessary unit in sequence '();'. Consider removing + │ + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:39:33 + │ +39 │ if (b) { x = 1 } else { (); (); () }; // doesn't trigger if/else case + │ ^^ Unnecessary unit in sequence '();'. Consider removing + │ + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:39:37 + │ +39 │ if (b) { x = 1 } else { (); (); () }; // doesn't trigger if/else case + │ ^^ Unnecessary unit in sequence '();'. Consider removing + │ + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:41:9 + │ +41 │ {}; + │ ^^ Unnecessary unit in sequence '();'. Consider removing + │ + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:42:9 + │ +42 │ { () }; // inner isn't an error but the outer is + │ ^^^^^^ Unnecessary unit in sequence '();'. Consider removing + │ + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + +warning[Lint W04010]: unit `()` expression can be removed or simplified + ┌─ tests/linter/true_positive_unnecessary_unit.move:43:11 + │ +43 │ { (); }; // inner is an error but outer isn't + │ ^^ Unnecessary unit in sequence '();'. Consider removing + │ + = This warning can be suppressed with '#[allow(lint(unnecessary_unit))]' applied to the 'module' or module member ('const', 'fun', or 'struct') + diff --git a/external-crates/move/crates/move-compiler/tests/linter/true_positive_unnecessary_unit.move b/external-crates/move/crates/move-compiler/tests/linter/true_positive_unnecessary_unit.move new file mode 100644 index 0000000000000..3cd380ffbd1ce --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/linter/true_positive_unnecessary_unit.move @@ -0,0 +1,52 @@ +// tests unnecessary units in if, else, and block +module a::unnecessary_unit { + public fun t_if(b: bool) { + let x = 0; + x; + if (b) () else { x = 1 }; + x; + if (b) {} else { x = 1 }; + x; + if (b) { () } else { x = 1 }; + x; + if (b) { + // new line and comment does not suppress it + } else { x = 1 }; + x; + } + + public fun t_else(b: bool) { + let x = 0; + x; + if (b) { x = 1 } else (); + x; + if (b) { x = 1 } else {}; + x; + if (b) { x = 1 } else { () }; + x; + if (b) { x = 1 } else { + // new line and comment does not suppress it + }; + x; + } + + public fun t_block(b: bool) { + (); + let x = 0; + x; + if (b) { (); () } else { x = 1 }; // doesn't trigger if/else case + x; + if (b) { x = 1 } else { (); (); () }; // doesn't trigger if/else case + x; + {}; + { () }; // inner isn't an error but the outer is + { (); }; // inner is an error but outer isn't + () + } + + // public fun t_if_else_if(b: bool, c: bool) { + // let x = 0; + // x; + // if (b) { x = 1 } else if (c) {}; + // } +} diff --git a/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_complex.move b/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_complex.move new file mode 100644 index 0000000000000..e27a87cda2d14 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_complex.move @@ -0,0 +1,14 @@ +module a::m; + +fun t0() { + let x = 2 + 5; + match (x) { _ => {} } +} + +fun t1() { + match ({ 2 + 3 + 4}) { _ => {} } +} + +fun t2() { + match ({ let x = 2 + 3; x + 4}) { _ => {} } +} diff --git a/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_mut_ref_type.move b/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_mut_ref_type.move new file mode 100644 index 0000000000000..7294975f79172 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_mut_ref_type.move @@ -0,0 +1,5 @@ +module a::m; + +fun t() { + match (&mut 10) { _ => {} } +} diff --git a/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_ref_type.move b/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_ref_type.move new file mode 100644 index 0000000000000..a3a0eca9c2b9c --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_ref_type.move @@ -0,0 +1,5 @@ +module a::m; + +fun t() { + match (&10) { _ => {} } +} diff --git a/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_subject.move b/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_subject.move new file mode 100644 index 0000000000000..015242e02bc2f --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/move_2024/matching/inferred_int_subject.move @@ -0,0 +1,5 @@ +module a::m; + +fun t() { + match (10) { _ => {} } +} diff --git a/external-crates/move/crates/move-compiler/tests/move_2024/parser/labeled_control_exp_associativity_else_after_if_block.exp b/external-crates/move/crates/move-compiler/tests/move_2024/parser/labeled_control_exp_associativity_else_after_if_block.exp index a155e9d748798..8d8a57abe2613 100644 --- a/external-crates/move/crates/move-compiler/tests/move_2024/parser/labeled_control_exp_associativity_else_after_if_block.exp +++ b/external-crates/move/crates/move-compiler/tests/move_2024/parser/labeled_control_exp_associativity_else_after_if_block.exp @@ -4,9 +4,27 @@ error[E04007]: incompatible types 13 │ if (cond) 'a: { s1 }.f else s2.f │ ^^^^^^^^^^^^^^^^^^^^ │ │ │ - │ │ Found: '0x42::M::S'. It is not compatible with the other type. - │ Incompatible branches - │ Found: '()'. It is not compatible with the other type. + │ │ Given: '0x42::M::S' + │ Invalid 'if'. The body of an 'if' without an 'else' must have type '()' + │ Expected: '()' + +error[E04005]: expected a single type + ┌─ tests/move_2024/parser/labeled_control_exp_associativity_else_after_if_block.move:13:9 + │ +13 │ if (cond) 'a: { s1 }.f else s2.f + │ ^^^^^^^^^^^^^^^^^^^^ + │ │ + │ Invalid dot access + │ Expected a single type, but found expression list type: '()' + +error[E04009]: expected specific type + ┌─ tests/move_2024/parser/labeled_control_exp_associativity_else_after_if_block.move:13:9 + │ +13 │ if (cond) 'a: { s1 }.f else s2.f + │ ^^^^^^^^^^^^^^^^^^^^^^ + │ │ + │ Unbound field 'f' + │ Expected a struct type in the current module but got: '()' error[E01002]: unexpected token ┌─ tests/move_2024/parser/labeled_control_exp_associativity_else_after_if_block.move:13:32 diff --git a/external-crates/move/crates/move-compiler/tests/move_2024/parser/labeled_control_exp_associativity_typing_invalid.exp b/external-crates/move/crates/move-compiler/tests/move_2024/parser/labeled_control_exp_associativity_typing_invalid.exp index c89ea5aa9dae2..c97e100134719 100644 --- a/external-crates/move/crates/move-compiler/tests/move_2024/parser/labeled_control_exp_associativity_typing_invalid.exp +++ b/external-crates/move/crates/move-compiler/tests/move_2024/parser/labeled_control_exp_associativity_typing_invalid.exp @@ -2,13 +2,13 @@ error[E04007]: incompatible types ┌─ tests/move_2024/parser/labeled_control_exp_associativity_typing_invalid.move:12:9 │ 7 │ fun bar(): u64 { 0 } - │ --- Found: 'u64'. It is not compatible with the other type. + │ --- Given: 'u64' · 12 │ if (cond) bar() + 1; │ ^^^^^^^^^^^^^^^^^^^ │ │ - │ Incompatible branches - │ Found: '()'. It is not compatible with the other type. + │ Invalid 'if'. The body of an 'if' without an 'else' must have type '()' + │ Expected: '()' error[E04003]: built-in operation not supported ┌─ tests/move_2024/parser/labeled_control_exp_associativity_typing_invalid.move:15:9 diff --git a/external-crates/move/crates/move-compiler/tests/move_2024/parsing/clever_errors_raw_abort.move b/external-crates/move/crates/move-compiler/tests/move_2024/parsing/clever_errors_raw_abort.move new file mode 100644 index 0000000000000..b12ba9f501c91 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/move_2024/parsing/clever_errors_raw_abort.move @@ -0,0 +1,40 @@ +#[allow(dead_code)] +module 0x42::a; + +fun f() { + abort +} + +fun f1(): u64 { + abort; + 1 + 1 +} + +fun f2(): u64 { + 1 + 2; + abort; + 1 + 1 +} + +fun f3(): u64 { + 1 + abort; + 1 + 1 +} + +fun f4(): u64 { + abort abort; + 1 + 1 +} + +#[allow(unused_trailing_semi)] +fun f5() { + abort; +} + +fun f6() { + assert!(abort); +} + +fun f7(v: u64) { + if (v > 100) abort +} diff --git a/external-crates/move/crates/move-compiler/tests/move_2024/parsing/expr_abort_missing_value.move b/external-crates/move/crates/move-compiler/tests/move_2024/parsing/expr_abort_missing_value.move new file mode 100644 index 0000000000000..3edcf78e1e932 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/move_2024/parsing/expr_abort_missing_value.move @@ -0,0 +1,5 @@ +module 0x42::M { + fun f(v: u64) { + if (v > 100) abort + } +} diff --git a/external-crates/move/crates/move-compiler/tests/move_check/parser/control_exp_associativity_else_after_if_block.exp b/external-crates/move/crates/move-compiler/tests/move_check/parser/control_exp_associativity_else_after_if_block.exp index f8e6e2bde401f..f77e9f1d5e7f7 100644 --- a/external-crates/move/crates/move-compiler/tests/move_check/parser/control_exp_associativity_else_after_if_block.exp +++ b/external-crates/move/crates/move-compiler/tests/move_check/parser/control_exp_associativity_else_after_if_block.exp @@ -2,13 +2,31 @@ error[E04007]: incompatible types ┌─ tests/move_check/parser/control_exp_associativity_else_after_if_block.move:13:9 │ 7 │ fun t(cond: bool, s1: S, s2: S) { - │ - Found: '0x42::M::S'. It is not compatible with the other type. + │ - Given: '0x42::M::S' · 13 │ if (cond) { s1 }.f else s2.f │ ^^^^^^^^^^^^^^^^ │ │ - │ Incompatible branches - │ Found: '()'. It is not compatible with the other type. + │ Invalid 'if'. The body of an 'if' without an 'else' must have type '()' + │ Expected: '()' + +error[E04005]: expected a single type + ┌─ tests/move_check/parser/control_exp_associativity_else_after_if_block.move:13:9 + │ +13 │ if (cond) { s1 }.f else s2.f + │ ^^^^^^^^^^^^^^^^ + │ │ + │ Invalid dot access + │ Expected a single type, but found expression list type: '()' + +error[E04009]: expected specific type + ┌─ tests/move_check/parser/control_exp_associativity_else_after_if_block.move:13:9 + │ +13 │ if (cond) { s1 }.f else s2.f + │ ^^^^^^^^^^^^^^^^^^ + │ │ + │ Unbound field 'f' + │ Expected a struct type in the current module but got: '()' error[E01002]: unexpected token ┌─ tests/move_check/parser/control_exp_associativity_else_after_if_block.move:13:28 diff --git a/external-crates/move/crates/move-compiler/tests/move_check/parser/control_exp_associativity_typing_invalid.exp b/external-crates/move/crates/move-compiler/tests/move_check/parser/control_exp_associativity_typing_invalid.exp index 789d0a379b1a2..be79cfda6a95f 100644 --- a/external-crates/move/crates/move-compiler/tests/move_check/parser/control_exp_associativity_typing_invalid.exp +++ b/external-crates/move/crates/move-compiler/tests/move_check/parser/control_exp_associativity_typing_invalid.exp @@ -2,13 +2,13 @@ error[E04007]: incompatible types ┌─ tests/move_check/parser/control_exp_associativity_typing_invalid.move:12:9 │ 7 │ fun bar(): u64 { 0 } - │ --- Found: 'u64'. It is not compatible with the other type. + │ --- Given: 'u64' · 12 │ if (cond) bar() + 1; │ ^^^^^^^^^^^^^^^^^^^ │ │ - │ Incompatible branches - │ Found: '()'. It is not compatible with the other type. + │ Invalid 'if'. The body of an 'if' without an 'else' must have type '()' + │ Expected: '()' error[E04003]: built-in operation not supported ┌─ tests/move_check/parser/control_exp_associativity_typing_invalid.move:15:9 diff --git a/external-crates/move/crates/move-compiler/tests/move_check/parser/expr_abort_missing_value.exp b/external-crates/move/crates/move-compiler/tests/move_check/parser/expr_abort_missing_value.exp index 802e95f26007c..9fd456e62887b 100644 --- a/external-crates/move/crates/move-compiler/tests/move_check/parser/expr_abort_missing_value.exp +++ b/external-crates/move/crates/move-compiler/tests/move_check/parser/expr_abort_missing_value.exp @@ -1,9 +1,8 @@ -error[E01002]: unexpected token - ┌─ tests/move_check/parser/expr_abort_missing_value.move:5:5 +error[E13001]: feature is not supported in specified edition + ┌─ tests/move_check/parser/expr_abort_missing_value.move:4:22 │ -5 │ } - │ ^ - │ │ - │ Unexpected '}' - │ Expected an expression term +4 │ if (v > 100) abort + │ ^^^^^ Clever `assert!`, `abort`, and `#[error]` are not supported by current edition 'legacy', only '2024.alpha' and '2024.beta' support this feature + │ + = You can update the edition in the 'Move.toml', or via command line flag if invoking the compiler directly. diff --git a/external-crates/move/crates/move-compiler/tests/move_check/parser/expr_abort_missing_value.move b/external-crates/move/crates/move-compiler/tests/move_check/parser/expr_abort_missing_value.move index 2b53c3c6bbe93..b167cba1efde3 100644 --- a/external-crates/move/crates/move-compiler/tests/move_check/parser/expr_abort_missing_value.move +++ b/external-crates/move/crates/move-compiler/tests/move_check/parser/expr_abort_missing_value.move @@ -1,6 +1,6 @@ module 0x42::M { - fun f(_v: u64) { - // Aborts always require a value + fun f(v: u64) { + // Aborts always require a value if not in Move 2024 if (v > 100) abort } } diff --git a/external-crates/move/crates/move-compiler/tests/move_check/translated_ir_tests/move/commands/abort_negative_stack_size.exp b/external-crates/move/crates/move-compiler/tests/move_check/translated_ir_tests/move/commands/abort_negative_stack_size.exp deleted file mode 100644 index 95782c0d701ec..0000000000000 --- a/external-crates/move/crates/move-compiler/tests/move_check/translated_ir_tests/move/commands/abort_negative_stack_size.exp +++ /dev/null @@ -1,9 +0,0 @@ -error[E01002]: unexpected token - ┌─ tests/move_check/translated_ir_tests/move/commands/abort_negative_stack_size.move:6:1 - │ -6 │ } - │ ^ - │ │ - │ Unexpected '}' - │ Expected an expression term - diff --git a/external-crates/move/crates/move-compiler/tests/move_check/translated_ir_tests/move/commands/abort_negative_stack_size.move b/external-crates/move/crates/move-compiler/tests/move_check/translated_ir_tests/move/commands/abort_negative_stack_size.move deleted file mode 100644 index 5ae2e1b36d63a..0000000000000 --- a/external-crates/move/crates/move-compiler/tests/move_check/translated_ir_tests/move/commands/abort_negative_stack_size.move +++ /dev/null @@ -1,7 +0,0 @@ -// check: NEGATIVE_STACK_SIZE_WITHIN_BLOCK -module 0x42::m { - -fun main() { - abort -} -} diff --git a/external-crates/move/crates/move-compiler/tests/move_check/typing/if_no_else.exp b/external-crates/move/crates/move-compiler/tests/move_check/typing/if_no_else.exp new file mode 100644 index 0000000000000..56264bc013239 --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/move_check/typing/if_no_else.exp @@ -0,0 +1,34 @@ +error[E04007]: incompatible types + ┌─ tests/move_check/typing/if_no_else.move:4:9 + │ +4 │ if (cond) 0; + │ ^^^^^^^^^^^ + │ │ │ + │ │ Given: integer + │ Invalid 'if'. The body of an 'if' without an 'else' must have type '()' + │ Expected: '()' + +error[E04007]: incompatible types + ┌─ tests/move_check/typing/if_no_else.move:5:9 + │ + 5 │ if (cond) foo(); + │ ^^^^^^^^^^^^^^^ + │ │ + │ Invalid 'if'. The body of an 'if' without an 'else' must have type '()' + │ Expected: '()' + · +13 │ fun foo(): u64 { 0 } + │ --- Given: 'u64' + +error[E04007]: incompatible types + ┌─ tests/move_check/typing/if_no_else.move:6:9 + │ + 6 │ â•­ â•­ if (cond) { + 7 │ │ │ let x = 0; + 8 │ │ │ let y = 1; + │ │ │ - Given: integer + 9 │ │ │ x * y +10 │ │ │ } + │ ╰─│─────────^ Invalid 'if'. The body of an 'if' without an 'else' must have type '()' + │ ╰─────────' Expected: '()' + diff --git a/external-crates/move/crates/move-compiler/tests/move_check/typing/if_no_else.move b/external-crates/move/crates/move-compiler/tests/move_check/typing/if_no_else.move new file mode 100644 index 0000000000000..245ac3a26b03e --- /dev/null +++ b/external-crates/move/crates/move-compiler/tests/move_check/typing/if_no_else.move @@ -0,0 +1,14 @@ + +module a::m { + fun t(cond: bool) { + if (cond) 0; + if (cond) foo(); + if (cond) { + let x = 0; + let y = 1; + x * y + } + } + + fun foo(): u64 { 0 } +} diff --git a/external-crates/move/crates/move-compiler/tests/move_check_testsuite.rs b/external-crates/move/crates/move-compiler/tests/move_check_testsuite.rs index 502e5421645bb..1f9066f2a5505 100644 --- a/external-crates/move/crates/move-compiler/tests/move_check_testsuite.rs +++ b/external-crates/move/crates/move-compiler/tests/move_check_testsuite.rs @@ -10,6 +10,7 @@ use move_command_line_common::{ }; use move_compiler::{ command_line::compiler::move_check_for_errors, + diagnostics::warning_filters::WarningFilters, diagnostics::*, editions::{Edition, Flavor}, linters::{self, LintLevel}, diff --git a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/coin_field.exp b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/coin_field.exp index cd2e3896f492e..f28726716acaf 100644 --- a/external-crates/move/crates/move-compiler/tests/sui_mode/linter/coin_field.exp +++ b/external-crates/move/crates/move-compiler/tests/sui_mode/linter/coin_field.exp @@ -1,22 +1,16 @@ warning[Lint W99003]: sub-optimal 'sui::coin::Coin' field type - ┌─ tests/sui_mode/linter/coin_field.move:11:12 + ┌─ tests/sui_mode/linter/coin_field.move:13:12 │ -11 │ struct S2 has key, store { - │ ^^ The field 'c' of 'S2' has type 'sui::coin::Coin' -12 │ id: UID, 13 │ c: Coin, - │ - Storing 'sui::balance::Balance' in this field will typically be more space-efficient + │ ^^^^^^^^ Sub-optimal 'sui::coin::Coin' field type. Using 'sui::balance::Balance' instead will be more space efficient │ = This warning can be suppressed with '#[allow(lint(coin_field))]' applied to the 'module' or module member ('const', 'fun', or 'struct') warning[Lint W99003]: sub-optimal 'sui::coin::Coin' field type - ┌─ tests/sui_mode/linter/coin_field.move:25:12 + ┌─ tests/sui_mode/linter/coin_field.move:27:12 │ -25 │ struct S2 has key, store { - │ ^^ The field 'c' of 'S2' has type 'sui::coin::Coin' -26 │ id: UID, 27 │ c: Balance, - │ - Storing 'sui::balance::Balance' in this field will typically be more space-efficient + │ ^^^^^^^^^^^ Sub-optimal 'sui::coin::Coin' field type. Using 'sui::balance::Balance' instead will be more space efficient │ = This warning can be suppressed with '#[allow(lint(coin_field))]' applied to the 'module' or module member ('const', 'fun', or 'struct') diff --git a/external-crates/move/crates/move-core-types/src/annotated_extractor.rs b/external-crates/move/crates/move-core-types/src/annotated_extractor.rs new file mode 100644 index 0000000000000..72c9a1bc7717d --- /dev/null +++ b/external-crates/move/crates/move-core-types/src/annotated_extractor.rs @@ -0,0 +1,334 @@ +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + account_address::AccountAddress, annotated_value as A, annotated_visitor as AV, + language_storage::TypeTag, +}; + +/// Elements are components of paths that select values from the sub-structure of other values. +/// They are split into two categories: +/// +/// - Selectors, which recurse into the sub-structure. +/// - Filters, which check properties of the value at that position in the sub-structure. +#[derive(Debug, Clone)] +pub enum Element<'e> { + // Selectors + /// Select a named field, assuming the value in question is a struct or an enum variant. + Field(&'e str), + + /// Select a positional element. This can be the element of a vector, or it can be a positional + /// field in an enum or a struct. + Index(u64), + + // Filters + /// Confirm that the current value has a certain type. + Type(&'e TypeTag), + + /// Confirm that the current value is an enum and its variant has this name. Note that to + /// filter on both the enum type and the variant name, the path must contain the Type first, + /// and then the Variant. Otherwise the type filter will be assumed + Variant(&'e str), +} + +/// An Extractor is an [`AV::Visitor`] that deserializes a sub-structure of the value. The +/// sub-structure is found at the end of a path of [`Element`]s which select fields from structs, +/// indices from vectors, and variants from enums. Deserialization is delegated to another visitor, +/// of type `V`, with the Extractor returning `Option`: +/// +/// - `Some(v)` if the given path exists in the value, or +/// - `None` if the path did not exist, +/// - Or an error if the underlying visitor failed for some reason. +/// +/// At every stage, the path can optionally start with an [`Element::Type`], which restricts the +/// type of the top-level value being deserialized. From there, the elements expected are driven by +/// the layout being deserialized: +/// +/// - When deserializing a vector, the next element must be an [`Element::Index`] which selects the +/// offset into the vector that the extractor recurses into. +/// - When deserializing a struct, the next element may be an [`Element::Field`] which selects the +/// field of the struct that the extractor recurses into by name, or an [`Element::Index`] which +/// selects the field by its offset. +/// - When deserializing a variant, the next elements may optionally be an [`Element::Variant`] +/// which expects a particular variant of the enum, followed by either an [`Element::Field`] or +/// an [`Element::Index`], similar to a struct. +pub struct Extractor<'p, 'v, V> { + inner: &'v mut V, + path: &'p [Element<'p>], +} + +impl<'p, 'v, 'b, 'l, V: AV::Visitor<'b, 'l>> Extractor<'p, 'v, V> +where + V::Error: std::error::Error + Send + Sync + 'static, +{ + pub fn new(inner: &'v mut V, path: &'p [Element<'p>]) -> Self { + Self { inner, path } + } + + pub fn deserialize_value( + bytes: &'b [u8], + layout: &'l A::MoveTypeLayout, + inner: &'v mut V, + path: Vec>, + ) -> anyhow::Result> { + let mut extractor = Extractor::new(inner, &path); + A::MoveValue::visit_deserialize(bytes, layout, &mut extractor) + } + + pub fn deserialize_struct( + bytes: &'b [u8], + layout: &'l A::MoveStructLayout, + inner: &'v mut V, + path: Vec>, + ) -> anyhow::Result> { + let mut extractor = Extractor::new(inner, &path); + A::MoveStruct::visit_deserialize(bytes, layout, &mut extractor) + } +} + +impl<'p, 'v, 'b, 'l, V: AV::Visitor<'b, 'l>> AV::Visitor<'b, 'l> for Extractor<'p, 'v, V> { + type Value = Option; + type Error = V::Error; + + fn visit_u8( + &mut self, + driver: &AV::ValueDriver<'_, 'b, 'l>, + value: u8, + ) -> Result { + Ok(match self.path { + [] | [Element::Type(&TypeTag::U8)] => Some(self.inner.visit_u8(driver, value)?), + _ => None, + }) + } + + fn visit_u16( + &mut self, + driver: &AV::ValueDriver<'_, 'b, 'l>, + value: u16, + ) -> Result { + Ok(match self.path { + [] | [Element::Type(&TypeTag::U16)] => Some(self.inner.visit_u16(driver, value)?), + _ => None, + }) + } + + fn visit_u32( + &mut self, + driver: &AV::ValueDriver<'_, 'b, 'l>, + value: u32, + ) -> Result { + Ok(match self.path { + [] | [Element::Type(&TypeTag::U32)] => Some(self.inner.visit_u32(driver, value)?), + _ => None, + }) + } + + fn visit_u64( + &mut self, + driver: &AV::ValueDriver<'_, 'b, 'l>, + value: u64, + ) -> Result { + Ok(match self.path { + [] | [Element::Type(&TypeTag::U64)] => Some(self.inner.visit_u64(driver, value)?), + _ => None, + }) + } + + fn visit_u128( + &mut self, + driver: &AV::ValueDriver<'_, 'b, 'l>, + value: u128, + ) -> Result { + Ok(match self.path { + [] | [Element::Type(&TypeTag::U128)] => Some(self.inner.visit_u128(driver, value)?), + _ => None, + }) + } + + fn visit_u256( + &mut self, + driver: &AV::ValueDriver<'_, 'b, 'l>, + value: crate::u256::U256, + ) -> Result { + Ok(match self.path { + [] | [Element::Type(&TypeTag::U256)] => Some(self.inner.visit_u256(driver, value)?), + _ => None, + }) + } + + fn visit_bool( + &mut self, + driver: &AV::ValueDriver<'_, 'b, 'l>, + value: bool, + ) -> Result { + Ok(match self.path { + [] | [Element::Type(&TypeTag::Bool)] => Some(self.inner.visit_bool(driver, value)?), + _ => None, + }) + } + + fn visit_address( + &mut self, + driver: &AV::ValueDriver<'_, 'b, 'l>, + value: AccountAddress, + ) -> Result { + Ok(match self.path { + [] | [Element::Type(&TypeTag::Address)] => { + Some(self.inner.visit_address(driver, value)?) + } + _ => None, + }) + } + + fn visit_signer( + &mut self, + driver: &AV::ValueDriver<'_, 'b, 'l>, + value: AccountAddress, + ) -> Result { + Ok(match self.path { + [] | [Element::Type(&TypeTag::Signer)] => Some(self.inner.visit_signer(driver, value)?), + _ => None, + }) + } + + fn visit_vector( + &mut self, + driver: &mut AV::VecDriver<'_, 'b, 'l>, + ) -> Result { + use Element as E; + use TypeTag as T; + + // If there is a type element, check that it is a vector type with the correct element + // type, and remove it from the path. + let path = if let [E::Type(t), path @ ..] = self.path { + if !matches!(t, T::Vector(t) if driver.element_layout().is_type(t)) { + return Ok(None); + } + path + } else { + self.path + }; + + // If there are no further path elements, we can delegate to the inner visitor. + let [index, path @ ..] = path else { + return Ok(Some(self.inner.visit_vector(driver)?)); + }; + + // Visiting a vector, the next part of the path must be an index -- anything else is + // guaranteed to fail. + let E::Index(i) = index else { + return Ok(None); + }; + + // Skip all the elements before the index, and then recurse. + while driver.off() < *i && driver.skip_element()? {} + Ok(driver + .next_element(&mut Extractor { + inner: self.inner, + path, + })? + .flatten()) + } + + fn visit_struct( + &mut self, + driver: &mut AV::StructDriver<'_, 'b, 'l>, + ) -> Result { + use Element as E; + use TypeTag as T; + + // If there is a type element, check that it is a struct type with the correct struct tag, + // and remove it from the path. + let path = if let [E::Type(t), path @ ..] = self.path { + if !matches!(t, T::Struct(t) if driver.struct_layout().is_type(t)) { + return Ok(None); + } + path + } else { + self.path + }; + + // If there are no further path elements, we can delegate to the inner visitor. + let [field, path @ ..] = path else { + return Ok(Some(self.inner.visit_struct(driver)?)); + }; + + match field { + // Skip over mismatched fields by name. + E::Field(f) => { + while matches!(driver.peek_field(), Some(l) if l.name.as_str() != *f) { + driver.skip_field()?; + } + } + + // Skip over fields by offset. + E::Index(i) => while driver.off() < *i && driver.skip_field()?.is_some() {}, + + // Any other element is invalid in this position. + _ => return Ok(None), + } + + Ok(driver + .next_field(&mut Extractor { + inner: self.inner, + path, + })? + .and_then(|(_, v)| v)) + } + + fn visit_variant( + &mut self, + driver: &mut AV::VariantDriver<'_, 'b, 'l>, + ) -> Result { + use Element as E; + use TypeTag as T; + + // If there is a type element, check that it is a struct type with the correct struct tag, + // and remove it from the path. + let path = if let [E::Type(t), path @ ..] = self.path { + if !matches!(t, T::Struct(t) if driver.enum_layout().is_type(t)) { + return Ok(None); + } + path + } else { + self.path + }; + + // If there is a variant element, check that it matches and remove it from the path. + let path = if let [E::Variant(v), path @ ..] = path { + if driver.variant_name().as_str() != *v { + return Ok(None); + } + path + } else { + path + }; + + // If there are no further path elements, we can delegate to the inner visitor. + let [field, path @ ..] = path else { + return Ok(Some(self.inner.visit_variant(driver)?)); + }; + + match field { + // Skip over mismatched fields by name. + E::Field(f) => { + while matches!(driver.peek_field(), Some(l) if l.name.as_str() != *f) { + driver.skip_field()?; + } + } + + // Skip over fields by offset. + E::Index(i) => while driver.off() < *i && driver.skip_field()?.is_some() {}, + + // Any other element is invalid in this position. + _ => return Ok(None), + } + + Ok(driver + .next_field(&mut Extractor { + inner: self.inner, + path, + })? + .and_then(|(_, v)| v)) + } +} diff --git a/external-crates/move/crates/move-core-types/src/annotated_value.rs b/external-crates/move/crates/move-core-types/src/annotated_value.rs index f5a31cc571941..22718e716f9b1 100644 --- a/external-crates/move/crates/move-core-types/src/annotated_value.rs +++ b/external-crates/move/crates/move-core-types/src/annotated_value.rs @@ -142,6 +142,43 @@ pub enum MoveTypeLayout { Enum(Box), } +impl MoveStructLayout { + /// Returns `true` if and only if the layout is for `type_`. + pub fn is_type(&self, type_: &StructTag) -> bool { + self.type_ == *type_ + } +} + +impl MoveEnumLayout { + /// Returns `true` if and only if the layout is for `type_`. + pub fn is_type(&self, type_: &StructTag) -> bool { + self.type_ == *type_ + } +} + +impl MoveTypeLayout { + /// Returns `true` if and only if the layout is for `type_`. + pub fn is_type(&self, type_: &TypeTag) -> bool { + use MoveTypeLayout as L; + use TypeTag as T; + + match self { + L::Bool => matches!(type_, T::Bool), + L::U8 => matches!(type_, T::U8), + L::U16 => matches!(type_, T::U16), + L::U32 => matches!(type_, T::U32), + L::U64 => matches!(type_, T::U64), + L::U128 => matches!(type_, T::U128), + L::U256 => matches!(type_, T::U256), + L::Address => matches!(type_, T::Address), + L::Signer => matches!(type_, T::Signer), + L::Vector(l) => matches!(type_, T::Vector(t) if l.is_type(t)), + L::Struct(l) => matches!(type_, T::Struct(t) if l.is_type(t)), + L::Enum(l) => matches!(type_, T::Struct(t) if l.is_type(t)), + } + } +} + impl MoveValue { /// TODO (annotated-visitor): Port legacy uses of this method to `BoundedVisitor`. pub fn simple_deserialize(blob: &[u8], ty: &MoveTypeLayout) -> AResult { diff --git a/external-crates/move/crates/move-core-types/src/annotated_visitor.rs b/external-crates/move/crates/move-core-types/src/annotated_visitor.rs index 9160d4565dc27..6e87e8405e61d 100644 --- a/external-crates/move/crates/move-core-types/src/annotated_visitor.rs +++ b/external-crates/move/crates/move-core-types/src/annotated_visitor.rs @@ -333,7 +333,7 @@ pub struct VecDriver<'c, 'b, 'l> { pub struct StructDriver<'c, 'b, 'l> { inner: ValueDriver<'c, 'b, 'l>, layout: &'l MoveStructLayout, - off: usize, + off: u64, } /// Exposes information about a variant being visited (its layout, details about the next field to @@ -345,7 +345,7 @@ pub struct VariantDriver<'c, 'b, 'l> { tag: u16, variant_name: &'l IdentStr, variant_layout: &'l [MoveFieldLayout], - off: usize, + off: u64, } #[derive(thiserror::Error, Debug)] @@ -461,7 +461,12 @@ impl<'c, 'b, 'l> VecDriver<'c, 'b, 'l> { self.layout } - /// The number of elements in this vector + /// The number of elements in this vector that have been visited so far. + pub fn off(&self) -> u64 { + self.off + } + + /// The number of elements in this vector. pub fn len(&self) -> u64 { self.len } @@ -532,9 +537,14 @@ impl<'c, 'b, 'l> StructDriver<'c, 'b, 'l> { self.layout } + /// The number of fields in this struct that have been visited so far. + pub fn off(&self) -> u64 { + self.off + } + /// The layout of the next field to be visited (if there is one), or `None` otherwise. pub fn peek_field(&self) -> Option<&'l MoveFieldLayout> { - self.layout.fields.get(self.off) + self.layout.fields.get(self.off as usize) } /// Visit the next field in the struct. The driver accepts a visitor to use for this field, @@ -624,9 +634,14 @@ impl<'c, 'b, 'l> VariantDriver<'c, 'b, 'l> { self.variant_name } + /// The number of elements in this vector that have been visited so far. + pub fn off(&self) -> u64 { + self.off + } + /// The layout of the next field to be visited (if there is one), or `None` otherwise. pub fn peek_field(&self) -> Option<&'l MoveFieldLayout> { - self.variant_layout.get(self.off) + self.variant_layout.get(self.off as usize) } /// Visit the next field in the variant. The driver accepts a visitor to use for this field, diff --git a/external-crates/move/crates/move-core-types/src/language_storage.rs b/external-crates/move/crates/move-core-types/src/language_storage.rs index 64f314cfe49ef..bbf597fc5d326 100644 --- a/external-crates/move/crates/move-core-types/src/language_storage.rs +++ b/external-crates/move/crates/move-core-types/src/language_storage.rs @@ -6,7 +6,7 @@ use crate::{ account_address::AccountAddress, gas_algebra::{AbstractMemorySize, BOX_ABSTRACT_SIZE, ENUM_BASE_ABSTRACT_SIZE}, identifier::{IdentStr, Identifier}, - parser::{parse_struct_tag, parse_type_tag}, + parsing::types::{ParsedModuleId, ParsedStructType, ParsedType}, }; use move_proc_macros::test_variant_order; use once_cell::sync::Lazy; @@ -137,7 +137,7 @@ impl FromStr for TypeTag { type Err = anyhow::Error; fn from_str(s: &str) -> Result { - parse_type_tag(s) + ParsedType::parse(s)?.into_type_tag(&|_| None) } } @@ -252,7 +252,7 @@ impl FromStr for StructTag { type Err = anyhow::Error; fn from_str(s: &str) -> Result { - parse_struct_tag(s) + ParsedStructType::parse(s)?.into_struct_tag(&|_| None) } } @@ -327,6 +327,13 @@ impl Display for ModuleId { } } +impl FromStr for ModuleId { + type Err = anyhow::Error; + fn from_str(s: &str) -> Result { + ParsedModuleId::parse(s)?.into_module_id(&|_| None) + } +} + impl ModuleId { pub fn short_str_lossless(&self) -> String { format!("0x{}::{}", self.address.short_str_lossless(), self.name) diff --git a/external-crates/move/crates/move-core-types/src/lib.rs b/external-crates/move/crates/move-core-types/src/lib.rs index b43bbdca21714..ed3921df2dfdc 100644 --- a/external-crates/move/crates/move-core-types/src/lib.rs +++ b/external-crates/move/crates/move-core-types/src/lib.rs @@ -8,6 +8,7 @@ use std::fmt; pub mod abi; pub mod account_address; +pub mod annotated_extractor; pub mod annotated_value; pub mod annotated_visitor; pub mod effects; @@ -17,7 +18,7 @@ pub mod identifier; pub mod language_storage; pub mod metadata; pub mod move_resource; -pub mod parser; +pub mod parsing; #[cfg(any(test, feature = "fuzzing"))] pub mod proptest_types; pub mod resolver; diff --git a/external-crates/move/crates/move-core-types/src/parser.rs b/external-crates/move/crates/move-core-types/src/parser.rs deleted file mode 100644 index 99af0f2dce1b5..0000000000000 --- a/external-crates/move/crates/move-core-types/src/parser.rs +++ /dev/null @@ -1,632 +0,0 @@ -// Copyright (c) The Diem Core Contributors -// Copyright (c) The Move Contributors -// SPDX-License-Identifier: Apache-2.0 - -use crate::{ - account_address::AccountAddress, - identifier::{self, Identifier}, - language_storage::{StructTag, TypeTag}, - transaction_argument::TransactionArgument, -}; -use anyhow::{bail, format_err, Result}; -use std::iter::Peekable; - -#[derive(Eq, PartialEq, Debug)] -enum Token { - U8Type, - U16Type, - U32Type, - U64Type, - U128Type, - U256Type, - BoolType, - AddressType, - VectorType, - SignerType, - Whitespace(String), - Name(String), - Address(String), - U8(String), - U16(String), - U32(String), - U64(String), - U128(String), - U256(String), - - Bytes(String), - True, - False, - ColonColon, - Lt, - Gt, - Comma, - EOF, -} - -impl Token { - fn is_whitespace(&self) -> bool { - matches!(self, Self::Whitespace(_)) - } -} - -fn token_as_name(tok: Token) -> Result { - use Token::*; - Ok(match tok { - U8Type => "u8".to_string(), - U16Type => "u16".to_string(), - U32Type => "u32".to_string(), - U64Type => "u64".to_string(), - U128Type => "u128".to_string(), - U256Type => "u256".to_string(), - BoolType => "bool".to_string(), - AddressType => "address".to_string(), - VectorType => "vector".to_string(), - True => "true".to_string(), - False => "false".to_string(), - SignerType => "signer".to_string(), - Name(s) => s, - Whitespace(_) | Address(_) | U8(_) | U16(_) | U32(_) | U64(_) | U128(_) | U256(_) - | Bytes(_) | ColonColon | Lt | Gt | Comma | EOF => { - bail!("Invalid token. Expected a name but got {:?}", tok) - } - }) -} - -fn name_token(s: String) -> Token { - match s.as_str() { - "u8" => Token::U8Type, - "u16" => Token::U16Type, - "u32" => Token::U32Type, - "u64" => Token::U64Type, - "u128" => Token::U128Type, - "u256" => Token::U256Type, - "bool" => Token::BoolType, - "address" => Token::AddressType, - "vector" => Token::VectorType, - "true" => Token::True, - "false" => Token::False, - "signer" => Token::SignerType, - _ => Token::Name(s), - } -} - -fn next_number(initial: char, mut it: impl Iterator) -> Result<(Token, usize)> { - let mut num = String::new(); - num.push(initial); - loop { - match it.next() { - Some(c) if c.is_ascii_digit() || c == '_' => num.push(c), - Some(c) if c.is_alphanumeric() => { - let mut suffix = String::new(); - suffix.push(c); - loop { - match it.next() { - Some(c) if c.is_ascii_alphanumeric() => suffix.push(c), - _ => { - let len = num.len() + suffix.len(); - let tok = match suffix.as_str() { - "u8" => Token::U8(num), - "u16" => Token::U16(num), - "u32" => Token::U32(num), - "u64" => Token::U64(num), - "u128" => Token::U128(num), - "u256" => Token::U256(num), - _ => bail!("invalid suffix"), - }; - return Ok((tok, len)); - } - } - } - } - _ => { - let len = num.len(); - return Ok((Token::U64(num), len)); - } - } - } -} - -#[allow(clippy::many_single_char_names)] -fn next_token(s: &str) -> Result> { - let mut it = s.chars().peekable(); - match it.next() { - None => Ok(None), - Some(c) => Ok(Some(match c { - '<' => (Token::Lt, 1), - '>' => (Token::Gt, 1), - ',' => (Token::Comma, 1), - ':' => match it.next() { - Some(':') => (Token::ColonColon, 2), - _ => bail!("unrecognized token"), - }, - '0' if it.peek() == Some(&'x') || it.peek() == Some(&'X') => { - it.next().unwrap(); - match it.next() { - Some(c) if c.is_ascii_hexdigit() => { - let mut r = String::new(); - r.push('0'); - r.push('x'); - r.push(c); - for c in it { - if c.is_ascii_hexdigit() { - r.push(c); - } else { - break; - } - } - let len = r.len(); - (Token::Address(r), len) - } - _ => bail!("unrecognized token"), - } - } - c if c.is_ascii_digit() => next_number(c, it)?, - 'b' if it.peek() == Some(&'"') => { - it.next().unwrap(); - let mut r = String::new(); - loop { - match it.next() { - Some('"') => break, - Some(c) if c.is_ascii() => r.push(c), - _ => bail!("unrecognized token"), - } - } - let len = r.len() + 3; - (Token::Bytes(hex::encode(r)), len) - } - 'x' if it.peek() == Some(&'"') => { - it.next().unwrap(); - let mut r = String::new(); - loop { - match it.next() { - Some('"') => break, - Some(c) if c.is_ascii_hexdigit() => r.push(c), - _ => bail!("unrecognized token"), - } - } - let len = r.len() + 3; - (Token::Bytes(r), len) - } - c if c.is_ascii_whitespace() => { - let mut r = String::new(); - r.push(c); - for c in it { - if c.is_ascii_whitespace() { - r.push(c); - } else { - break; - } - } - let len = r.len(); - (Token::Whitespace(r), len) - } - c if c.is_ascii_alphabetic() => { - let mut r = String::new(); - r.push(c); - for c in it { - if identifier::is_valid_identifier_char(c) { - r.push(c); - } else { - break; - } - } - let len = r.len(); - (name_token(r), len) - } - _ => bail!("unrecognized token"), - })), - } -} - -fn tokenize(mut s: &str) -> Result> { - let mut v = vec![]; - while let Some((tok, n)) = next_token(s)? { - v.push(tok); - s = &s[n..]; - } - Ok(v) -} - -struct Parser> { - it: Peekable, -} - -impl> Parser { - fn new>(v: T) -> Self { - Self { - it: v.into_iter().peekable(), - } - } - - fn next(&mut self) -> Result { - match self.it.next() { - Some(tok) => Ok(tok), - None => bail!("out of tokens, this should not happen"), - } - } - - fn peek(&mut self) -> Option<&Token> { - self.it.peek() - } - - fn consume(&mut self, tok: Token) -> Result<()> { - let t = self.next()?; - if t != tok { - bail!("expected token {:?}, got {:?}", tok, t) - } - Ok(()) - } - - fn parse_comma_list( - &mut self, - parse_list_item: F, - end_token: Token, - allow_trailing_comma: bool, - ) -> Result> - where - F: Fn(&mut Self) -> Result, - R: std::fmt::Debug, - { - let mut v = vec![]; - if !(self.peek() == Some(&end_token)) { - loop { - v.push(parse_list_item(self)?); - if self.peek() == Some(&end_token) { - break; - } - self.consume(Token::Comma)?; - if self.peek() == Some(&end_token) && allow_trailing_comma { - break; - } - } - } - Ok(v) - } - - fn parse_type_tag(&mut self) -> Result { - Ok(match self.next()? { - Token::U8Type => TypeTag::U8, - Token::U16Type => TypeTag::U16, - Token::U32Type => TypeTag::U32, - Token::U64Type => TypeTag::U64, - Token::U128Type => TypeTag::U128, - Token::U256Type => TypeTag::U256, - Token::BoolType => TypeTag::Bool, - Token::AddressType => TypeTag::Address, - Token::SignerType => TypeTag::Signer, - Token::VectorType => { - self.consume(Token::Lt)?; - let ty = self.parse_type_tag()?; - self.consume(Token::Gt)?; - TypeTag::Vector(Box::new(ty)) - } - Token::Address(addr) => { - self.consume(Token::ColonColon)?; - let module = self.next().and_then(token_as_name)?; - self.consume(Token::ColonColon)?; - let name = self.next().and_then(token_as_name)?; - let ty_args = if self.peek() == Some(&Token::Lt) { - self.next()?; - let ty_args = - self.parse_comma_list(|parser| parser.parse_type_tag(), Token::Gt, true)?; - self.consume(Token::Gt)?; - ty_args - } else { - vec![] - }; - TypeTag::Struct(Box::new(StructTag { - address: AccountAddress::from_hex_literal(&addr)?, - module: Identifier::new(module)?, - name: Identifier::new(name)?, - type_params: ty_args, - })) - } - tok => bail!("unexpected token {:?}, expected type tag", tok), - }) - } - - fn parse_transaction_argument(&mut self) -> Result { - Ok(match self.next()? { - Token::U8(s) => TransactionArgument::U8(s.replace('_', "").parse()?), - Token::U16(s) => TransactionArgument::U16(s.replace('_', "").parse()?), - Token::U32(s) => TransactionArgument::U32(s.replace('_', "").parse()?), - Token::U64(s) => TransactionArgument::U64(s.replace('_', "").parse()?), - Token::U128(s) => TransactionArgument::U128(s.replace('_', "").parse()?), - Token::U256(s) => TransactionArgument::U256(s.replace('_', "").parse()?), - Token::True => TransactionArgument::Bool(true), - Token::False => TransactionArgument::Bool(false), - Token::Address(addr) => { - TransactionArgument::Address(AccountAddress::from_hex_literal(&addr)?) - } - Token::Bytes(s) => TransactionArgument::U8Vector(hex::decode(s)?), - tok => bail!("unexpected token {:?}, expected transaction argument", tok), - }) - } -} - -fn parse(s: &str, f: F) -> Result -where - F: Fn(&mut Parser>) -> Result, -{ - let mut tokens: Vec<_> = tokenize(s)? - .into_iter() - .filter(|tok| !tok.is_whitespace()) - .collect(); - tokens.push(Token::EOF); - let mut parser = Parser::new(tokens); - let res = f(&mut parser)?; - parser.consume(Token::EOF)?; - Ok(res) -} - -pub fn parse_type_tag(s: &str) -> Result { - parse(s, |parser| parser.parse_type_tag()) -} - -pub fn parse_transaction_argument(s: &str) -> Result { - parse(s, |parser| parser.parse_transaction_argument()) -} - -pub fn parse_struct_tag(s: &str) -> Result { - let type_tag = parse(s, |parser| parser.parse_type_tag()) - .map_err(|e| format_err!("invalid struct tag: {}, {}", s, e))?; - if let TypeTag::Struct(struct_tag) = type_tag { - Ok(*struct_tag) - } else { - bail!("invalid struct tag: {}", s) - } -} - -#[cfg(test)] -mod tests { - use std::str::FromStr; - - use crate::{ - account_address::AccountAddress, - parser::{parse_struct_tag, parse_transaction_argument, parse_type_tag}, - transaction_argument::TransactionArgument, - u256, - }; - - #[allow(clippy::unreadable_literal)] - #[test] - fn tests_parse_transaction_argument_positive() { - use TransactionArgument as T; - - for (s, expected) in &[ - (" 0u8", T::U8(0)), - ("0u8", T::U8(0)), - ("255u8", T::U8(255)), - ("0", T::U64(0)), - ("0123", T::U64(123)), - ("0u64", T::U64(0)), - ("18446744073709551615", T::U64(18446744073709551615)), - ("18446744073709551615u64", T::U64(18446744073709551615)), - ("0u128", T::U128(0)), - ("1_0u8", T::U8(1_0)), - ("10_u8", T::U8(10)), - ("10___u8", T::U8(10)), - ("1_000u64", T::U64(1_000)), - ("1_000", T::U64(1_000)), - ("1_0_0_0u64", T::U64(1_000)), - ("1_000_000u128", T::U128(1_000_000)), - ( - "340282366920938463463374607431768211455u128", - T::U128(340282366920938463463374607431768211455), - ), - (" 0u16", T::U16(0)), - ("0u16", T::U16(0)), - ("532u16", T::U16(532)), - ("65535u16", T::U16(65535)), - ("0u32", T::U32(0)), - ("01239498u32", T::U32(1239498)), - ("35366u32", T::U32(35366)), - ("4294967295u32", T::U32(4294967295)), - ("0u256", T::U256(u256::U256::from(0u8))), - ("1_0u16", T::U16(1_0)), - ("10_u16", T::U16(10)), - ("10___u16", T::U16(10)), - ("1_000u32", T::U32(1_000)), - ("1_0_00u32", T::U32(1_000)), - ("1_0_0_0u32", T::U32(1_000)), - ("1_000_000u256", T::U256(u256::U256::from(1_000_000u64))), - ( - "1_000_000_000u256", - T::U256(u256::U256::from(1_000_000_000u128)), - ), - ( - "3402823669209384634633746074317682114551234u256", - T::U256( - u256::U256::from_str("3402823669209384634633746074317682114551234").unwrap(), - ), - ), - ("true", T::Bool(true)), - ("false", T::Bool(false)), - ( - "0x0", - T::Address(AccountAddress::from_hex_literal("0x0").unwrap()), - ), - ( - "0x54afa3526", - T::Address(AccountAddress::from_hex_literal("0x54afa3526").unwrap()), - ), - ( - "0X54afa3526", - T::Address(AccountAddress::from_hex_literal("0x54afa3526").unwrap()), - ), - ("x\"7fff\"", T::U8Vector(vec![0x7f, 0xff])), - ("x\"\"", T::U8Vector(vec![])), - ("x\"00\"", T::U8Vector(vec![0x00])), - ("x\"deadbeef\"", T::U8Vector(vec![0xde, 0xad, 0xbe, 0xef])), - ] { - assert_eq!(&parse_transaction_argument(s).unwrap(), expected) - } - } - - #[test] - fn tests_parse_transaction_argument_negative() { - /// Test cases for the parser that should always fail. - const PARSE_VALUE_NEGATIVE_TEST_CASES: &[&str] = &[ - "-3", - "0u42", - "0u645", - "0u64x", - "0u6 4", - "0u", - "_10", - "_10_u8", - "_10__u8", - "_1014__u32", - "10_u8__", - "_", - "__", - "__4", - "_u8", - "5_bool", - "256u8", - "18446744073709551616u64", - "340282366920938463463374607431768211456u128", - "340282366920938463463374607431768211456340282366920938463463374607431768211456340282366920938463463374607431768211456u256", - "0xg", - "0x00g0", - "0x", - "0x_", - "", - "@@", - "()", - "x\"ffff", - "x\"a \"", - "x\" \"", - "x\"0g\"", - "x\"0\"", - "garbage", - "true3", - "3false", - "3 false", - "", - ]; - - for s in PARSE_VALUE_NEGATIVE_TEST_CASES { - assert!( - parse_transaction_argument(s).is_err(), - "test case unexpectedly succeeded: {}", - s - ) - } - } - - #[test] - fn test_type_tag() { - for s in &[ - "u64", - "bool", - "vector", - "vector>", - "vector", - "vector>", - "vector", - "vector>", - "vector", - "vector>", - "vector", - "vector>", - "signer", - "0x1::M::S", - "0x2::M::S_", - "0x3::M_::S", - "0x4::M_::S_", - "0x00000000004::M::S", - "0x1::M::S", - "0x1::M::S", - "0x1::M::S", - "0x1::M::S", - "0x1::M::S<0x2::P::Q>", - "vector<0x1::M::S>", - "vector<0x1::M_::S_>", - "vector>", - "0x1::M::S>", - "0x1::M::S>", - "0x1::M::S>", - "0x1::M::S>", - "0x1::M::S>", - "0x1::M::S>", - ] { - assert!(parse_type_tag(s).is_ok(), "Failed to parse tag {}", s); - } - } - - #[test] - fn test_parse_valid_struct_tag() { - let valid = vec![ - "0x1::Diem::Diem", - "0x1::Diem_Type::Diem", - "0x1::Diem_::Diem", - "0x1::X_123::X32_", - "0x1::Diem::Diem_Type", - "0x1::Diem::Diem<0x1::XDX::XDX>", - "0x1::Diem::Diem<0x1::XDX::XDX_Type>", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem
", - "0x1::Diem::Diem", - "0x1::Diem::Diem>", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem", - "0x1::Diem::Diem,address,signer>", - "0x1::Diem::Diem>>", - "0x1::Diem::Diem<0x1::Diem::Struct, 0x1::Diem::Diem>>>>", - ]; - for text in valid { - let st = parse_struct_tag(text).expect("valid StructTag"); - assert_eq!( - st.to_string().replace(' ', ""), - text.replace(' ', ""), - "text: {:?}, StructTag: {:?}", - text, - st - ); - } - } - - #[test] - fn test_parse_struct_tag_with_type_names() { - let names = vec![ - "address", "vector", "u128", "u256", "u64", "u32", "u16", "u8", "bool", "signer", - ]; - - let mut tests = vec![]; - for name in &names { - for name_type in &names { - tests.push(format!("0x1::{name}::{name_type}")) - } - } - - let mut instantiations = vec![]; - for ty in &tests { - for other_ty in &tests { - instantiations.push(format!("{ty}<{other_ty}>")) - } - } - - for text in tests.iter().chain(instantiations.iter()) { - let st = parse_struct_tag(text).expect("valid StructTag"); - assert_eq!( - st.to_string().replace(' ', ""), - text.replace(' ', ""), - "text: {:?}, StructTag: {:?}", - text, - st - ); - } - } -} diff --git a/external-crates/move/crates/move-command-line-common/src/address.rs b/external-crates/move/crates/move-core-types/src/parsing/address.rs similarity index 93% rename from external-crates/move/crates/move-command-line-common/src/address.rs rename to external-crates/move/crates/move-core-types/src/parsing/address.rs index 0e63a23b8d85d..44ff9810fc613 100644 --- a/external-crates/move/crates/move-command-line-common/src/address.rs +++ b/external-crates/move/crates/move-core-types/src/parsing/address.rs @@ -1,10 +1,10 @@ // Copyright (c) The Move Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::parser::{parse_address_number, NumberFormat}; +use crate::account_address::AccountAddress; +use crate::parsing::parser::{parse_address_number, NumberFormat}; +use crate::u256::U256; use anyhow::anyhow; -use move_core_types::account_address::AccountAddress; -use num_bigint::BigUint; use std::{fmt, hash::Hash}; // Parsed Address, either a name or a numerical address @@ -62,10 +62,7 @@ impl NumericalAddress { pub fn parse_str(s: &str) -> Result { match parse_address_number(s) { - Some((n, format)) => Ok(NumericalAddress { - bytes: AccountAddress::new(n), - format, - }), + Some((n, format)) => Ok(NumericalAddress { bytes: n, format }), None => // TODO the kind of error is in an unstable nightly API // But currently the only way this should fail is if the number is too long @@ -90,7 +87,7 @@ impl fmt::Display for NumericalAddress { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self.format { NumberFormat::Decimal => { - let n = BigUint::from_bytes_be(self.bytes.as_ref()); + let n = U256::from_be_bytes(&self.bytes); write!(f, "{}", n) } NumberFormat::Hex => write!(f, "{:#X}", self), diff --git a/external-crates/move/crates/move-core-types/src/parsing/mod.rs b/external-crates/move/crates/move-core-types/src/parsing/mod.rs new file mode 100644 index 0000000000000..46c51e639f0a6 --- /dev/null +++ b/external-crates/move/crates/move-core-types/src/parsing/mod.rs @@ -0,0 +1,10 @@ +// Copyright (c) The Diem Core Contributors +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +#![forbid(unsafe_code)] + +pub mod address; +pub mod parser; +pub mod types; +pub mod values; diff --git a/external-crates/move/crates/move-command-line-common/src/parser.rs b/external-crates/move/crates/move-core-types/src/parsing/parser.rs similarity index 60% rename from external-crates/move/crates/move-command-line-common/src/parser.rs rename to external-crates/move/crates/move-core-types/src/parsing/parser.rs index accd1d1a94653..eba50aef13801 100644 --- a/external-crates/move/crates/move-command-line-common/src/parser.rs +++ b/external-crates/move/crates/move-core-types/src/parsing/parser.rs @@ -1,21 +1,22 @@ // Copyright (c) The Move Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::{ +use crate::parsing::{ address::{NumericalAddress, ParsedAddress}, types::{ParsedFqName, ParsedModuleId, ParsedStructType, ParsedType, TypeToken}, values::{ParsableValue, ParsedValue, ValueToken}, }; -use anyhow::{anyhow, bail, Result}; -use move_core_types::{ +use crate::{ account_address::AccountAddress, u256::{U256FromStrError, U256}, }; -use num_bigint::BigUint; +use anyhow::{anyhow, bail, Result}; use std::{fmt::Display, iter::Peekable, num::ParseIntError}; const MAX_TYPE_DEPTH: u64 = 128; const MAX_TYPE_NODE_COUNT: u64 = 256; +// See: https://stackoverflow.com/questions/43787672/the-max-number-of-digits-in-an-int-based-on-number-of-bits +const U256_MAX_DECIMAL_DIGITS: usize = 241 * AccountAddress::LENGTH / 100 + 1; pub trait Token: Display + Copy + Eq { fn is_whitespace(&self) -> bool; @@ -76,7 +77,7 @@ impl ParsedValue { } } -fn parse<'a, Tok: Token, R>( +pub(crate) fn parse<'a, Tok: Token, R>( s: &'a str, f: impl FnOnce(&mut Parser<'a, Tok, std::vec::IntoIter<(Tok, &'a str)>>) -> Result, ) -> Result { @@ -139,8 +140,12 @@ impl<'a, Tok: Token, I: Iterator> Parser<'a, Tok, I> { break; } self.advance(delim)?; - if is_end(self.peek_tok()) && allow_trailing_delim { - break; + if is_end(self.peek_tok()) { + if allow_trailing_delim { + break; + } else { + bail!("Invalid type list: trailing delimiter '{}'", delim) + } } } Ok(v) @@ -225,6 +230,9 @@ impl<'a, I: Iterator> Parser<'a, TypeToken, I> { true, )?; self.advance(TypeToken::Gt)?; + if type_args.is_empty() { + bail!("expected at least one type argument") + } type_args } _ => vec![], @@ -440,306 +448,23 @@ pub fn parse_u256(s: &str) -> Result<(U256, NumberFormat), U256FromStrError> { } // Parse an address from a decimal or hex encoding -pub fn parse_address_number(s: &str) -> Option<([u8; AccountAddress::LENGTH], NumberFormat)> { +pub fn parse_address_number(s: &str) -> Option<(AccountAddress, NumberFormat)> { let (txt, base) = determine_num_text_and_base(s); - let parsed = BigUint::parse_bytes( - txt.as_bytes(), + let txt = txt.replace('_', ""); + let max_len = match base { + NumberFormat::Hex => AccountAddress::LENGTH * 2, + NumberFormat::Decimal => U256_MAX_DECIMAL_DIGITS, + }; + if txt.len() > max_len { + return None; + } + let parsed = U256::from_str_radix( + &txt, match base { NumberFormat::Hex => 16, NumberFormat::Decimal => 10, }, - )?; - let bytes = parsed.to_bytes_be(); - if bytes.len() > AccountAddress::LENGTH { - return None; - } - let mut result = [0u8; AccountAddress::LENGTH]; - result[(AccountAddress::LENGTH - bytes.len())..].clone_from_slice(&bytes); - Some((result, base)) -} - -#[cfg(test)] -mod tests { - use crate::{ - address::{NumericalAddress, ParsedAddress}, - types::{ParsedStructType, ParsedType}, - values::ParsedValue, - }; - use move_core_types::{account_address::AccountAddress, identifier::Identifier, u256::U256}; - use proptest::prelude::*; - use proptest::proptest; - - #[allow(clippy::unreadable_literal)] - #[test] - fn tests_parse_value_positive() { - use ParsedValue as V; - let cases: &[(&str, V)] = &[ - (" 0u8", V::U8(0)), - ("0u8", V::U8(0)), - ("0xF_Fu8", V::U8(255)), - ("0xF__FF__Eu16", V::U16(u16::MAX - 1)), - ("0xFFF_FF__FF_Cu32", V::U32(u32::MAX - 3)), - ("255u8", V::U8(255)), - ("255u256", V::U256(U256::from(255u64))), - ("0", V::InferredNum(U256::from(0u64))), - ("0123", V::InferredNum(U256::from(123u64))), - ("0xFF", V::InferredNum(U256::from(0xFFu64))), - ("0xF_F", V::InferredNum(U256::from(0xFFu64))), - ("0xFF__", V::InferredNum(U256::from(0xFFu64))), - ( - "0x12_34__ABCD_FF", - V::InferredNum(U256::from(0x1234ABCDFFu64)), - ), - ("0u64", V::U64(0)), - ("0x0u64", V::U64(0)), - ( - "18446744073709551615", - V::InferredNum(U256::from(18446744073709551615u128)), - ), - ("18446744073709551615u64", V::U64(18446744073709551615)), - ("0u128", V::U128(0)), - ("1_0u8", V::U8(1_0)), - ("10_u8", V::U8(10)), - ("1_000u64", V::U64(1_000)), - ("1_000", V::InferredNum(U256::from(1_000u32))), - ("1_0_0_0u64", V::U64(1_000)), - ("1_000_000u128", V::U128(1_000_000)), - ( - "340282366920938463463374607431768211455u128", - V::U128(340282366920938463463374607431768211455), - ), - ("true", V::Bool(true)), - ("false", V::Bool(false)), - ( - "@0x0", - V::Address(ParsedAddress::Numerical(NumericalAddress::new( - AccountAddress::from_hex_literal("0x0") - .unwrap() - .into_bytes(), - crate::parser::NumberFormat::Hex, - ))), - ), - ( - "@0", - V::Address(ParsedAddress::Numerical(NumericalAddress::new( - AccountAddress::from_hex_literal("0x0") - .unwrap() - .into_bytes(), - crate::parser::NumberFormat::Hex, - ))), - ), - ( - "@0x54afa3526", - V::Address(ParsedAddress::Numerical(NumericalAddress::new( - AccountAddress::from_hex_literal("0x54afa3526") - .unwrap() - .into_bytes(), - crate::parser::NumberFormat::Hex, - ))), - ), - ( - "b\"hello\"", - V::Vector("hello".as_bytes().iter().copied().map(V::U8).collect()), - ), - ("x\"7fff\"", V::Vector(vec![V::U8(0x7f), V::U8(0xff)])), - ("x\"\"", V::Vector(vec![])), - ("x\"00\"", V::Vector(vec![V::U8(0x00)])), - ( - "x\"deadbeef\"", - V::Vector(vec![V::U8(0xde), V::U8(0xad), V::U8(0xbe), V::U8(0xef)]), - ), - ]; - - for (s, expected) in cases { - assert_eq!(&ParsedValue::parse(s).unwrap(), expected) - } - } - - #[test] - fn tests_parse_value_negative() { - /// Test cases for the parser that should always fail. - const PARSE_VALUE_NEGATIVE_TEST_CASES: &[&str] = &[ - "-3", - "0u42", - "0u645", - "0u64x", - "0u6 4", - "0u", - "_10", - "_10_u8", - "_10__u8", - "10_u8__", - "0xFF_u8_", - "0xF_u8__", - "0x_F_u8__", - "_", - "__", - "__4", - "_u8", - "5_bool", - "256u8", - "4294967296u32", - "65536u16", - "18446744073709551616u64", - "340282366920938463463374607431768211456u128", - "340282366920938463463374607431768211456340282366920938463463374607431768211456340282366920938463463374607431768211456340282366920938463463374607431768211456u256", - "0xg", - "0x00g0", - "0x", - "0x_", - "", - "@@", - "()", - "x\"ffff", - "x\"a \"", - "x\" \"", - "x\"0g\"", - "x\"0\"", - "garbage", - "true3", - "3false", - "3 false", - "", - "0XFF", - "0X0", - ]; - - for s in PARSE_VALUE_NEGATIVE_TEST_CASES { - assert!( - ParsedValue::<()>::parse(s).is_err(), - "Unexpectedly succeeded in parsing: {}", - s - ) - } - } - - #[test] - fn test_parse_type_negative() { - for s in &[ - "_", - "_::_::_", - "0x1::_", - "0x1::__::_", - "0x1::_::__", - "0x1::_::foo", - "0x1::foo::_", - "0x1::_::_", - "0x1::bar::foo<0x1::_::foo>", - ] { - assert!( - ParsedType::parse(s).is_err(), - "Parsed type {s} but should have failed" - ); - } - } - - #[test] - fn test_parse_struct_negative() { - for s in &[ - "_", - "_::_::_", - "0x1::_", - "0x1::__::_", - "0x1::_::__", - "0x1::_::foo", - "0x1::foo::_", - "0x1::_::_", - "0x1::bar::foo<0x1::_::foo>", - ] { - assert!( - ParsedStructType::parse(s).is_err(), - "Parsed type {s} but should have failed" - ); - } - } - - #[test] - fn test_type_type() { - for s in &[ - "u64", - "bool", - "vector", - "vector>", - "address", - "signer", - "0x1::M::S", - "0x2::M::S_", - "0x3::M_::S", - "0x4::M_::S_", - "0x00000000004::M::S", - "0x1::M::S", - "0x1::M::S<0x2::P::Q>", - "vector<0x1::M::S>", - "vector<0x1::M_::S_>", - "vector>", - "0x1::M::S>", - "0x1::_bar::_BAR", - "0x1::__::__", - "0x1::_bar::_BAR<0x2::_____::______fooo______>", - "0x1::__::__<0x2::_____::______fooo______, 0xff::Bar____::_______foo>", - ] { - assert!(ParsedType::parse(s).is_ok(), "Failed to parse type {}", s); - } - } - - #[test] - fn test_parse_valid_struct_type() { - let valid = vec![ - "0x1::Foo::Foo", - "0x1::Foo_Type::Foo", - "0x1::Foo_::Foo", - "0x1::X_123::X32_", - "0x1::Foo::Foo_Type", - "0x1::Foo::Foo<0x1::ABC::ABC>", - "0x1::Foo::Foo<0x1::ABC::ABC_Type>", - "0x1::Foo::Foo", - "0x1::Foo::Foo", - "0x1::Foo::Foo", - "0x1::Foo::Foo", - "0x1::Foo::Foo", - "0x1::Foo::Foo", - "0x1::Foo::Foo", - "0x1::Foo::Foo
", - "0x1::Foo::Foo", - "0x1::Foo::Foo>", - "0x1::Foo::Foo", - "0x1::Foo::Foo", - "0x1::Foo::Foo", - "0x1::Foo::Foo,address,signer>", - "0x1::Foo::Foo>>", - "0x1::Foo::Foo<0x1::Foo::Struct, 0x1::Foo::Foo>>>>", - "0x1::_bar::_BAR", - "0x1::__::__", - "0x1::_bar::_BAR<0x2::_____::______fooo______>", - "0x1::__::__<0x2::_____::______fooo______, 0xff::Bar____::_______foo>", - ]; - for s in valid { - assert!( - ParsedStructType::parse(s).is_ok(), - "Failed to parse struct {}", - s - ); - } - } - - fn struct_type_gen() -> impl Strategy { - ( - any::(), - any::(), - any::(), - ) - .prop_map(|(address, module, name)| format!("0x{}::{}::{}", address, module, name)) - } - - proptest! { - #[test] - fn test_parse_valid_struct_type_proptest(s in struct_type_gen()) { - prop_assert!(ParsedStructType::parse(&s).is_ok()); - } - - #[test] - fn test_parse_valid_type_struct_only_proptest(s in struct_type_gen()) { - prop_assert!(ParsedStructType::parse(&s).is_ok()); - } - } + ) + .ok()?; + Some((AccountAddress::new(parsed.to_be_bytes()), base)) } diff --git a/external-crates/move/crates/move-command-line-common/src/types.rs b/external-crates/move/crates/move-core-types/src/parsing/types.rs similarity index 94% rename from external-crates/move/crates/move-command-line-common/src/types.rs rename to external-crates/move/crates/move-core-types/src/parsing/types.rs index 442e0ed691629..acdb789d93156 100644 --- a/external-crates/move/crates/move-command-line-common/src/types.rs +++ b/external-crates/move/crates/move-core-types/src/parsing/types.rs @@ -3,14 +3,14 @@ use std::fmt::{self, Display}; -use anyhow::bail; -use move_core_types::{ +use crate::{ account_address::AccountAddress, identifier::{self, Identifier}, language_storage::{ModuleId, StructTag, TypeTag}, }; +use anyhow::bail; -use crate::{address::ParsedAddress, parser::Token}; +use crate::parsing::{address::ParsedAddress, parser::Token}; #[derive(Eq, PartialEq, Debug, Clone, Copy)] pub enum TypeToken { @@ -91,10 +91,10 @@ impl Token for TypeToken { Some(':') => (Self::ColonColon, 2), _ => bail!("unrecognized token: {}", s), }, - '0' if matches!(chars.peek(), Some('x') | Some('X')) => { + '0' if matches!(chars.peek(), Some('x')) => { chars.next().unwrap(); match chars.next() { - Some(c) if c.is_ascii_hexdigit() || c == '_' => { + Some(c) if c.is_ascii_hexdigit() => { // 0x + c + remaining let len = 3 + chars .take_while(|q| char::is_ascii_hexdigit(q) || *q == '_') @@ -106,7 +106,9 @@ impl Token for TypeToken { } c if c.is_ascii_digit() => { // c + remaining - let len = 1 + chars.take_while(char::is_ascii_digit).count(); + let len = 1 + chars + .take_while(|c| c.is_ascii_digit() || *c == '_') + .count(); (Self::AddressIdent, len) } c if c.is_ascii_whitespace() => { diff --git a/external-crates/move/crates/move-command-line-common/src/values.rs b/external-crates/move/crates/move-core-types/src/parsing/values.rs similarity index 99% rename from external-crates/move/crates/move-command-line-common/src/values.rs rename to external-crates/move/crates/move-core-types/src/parsing/values.rs index 03bf0a80ad9f4..951dafe04c11a 100644 --- a/external-crates/move/crates/move-command-line-common/src/values.rs +++ b/external-crates/move/crates/move-core-types/src/parsing/values.rs @@ -1,16 +1,16 @@ // Copyright (c) The Move Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::{ +use crate::parsing::{ address::ParsedAddress, parser::{Parser, Token}, }; -use anyhow::bail; -use move_core_types::{ +use crate::{ account_address::AccountAddress, identifier, runtime_value::{MoveStruct, MoveValue}, }; +use anyhow::bail; use std::fmt::{self, Display}; #[derive(Eq, PartialEq, Debug, Clone, Copy)] @@ -39,13 +39,13 @@ pub enum ValueToken { #[derive(Eq, PartialEq, Debug, Clone)] pub enum ParsedValue { Address(ParsedAddress), - InferredNum(move_core_types::u256::U256), + InferredNum(crate::u256::U256), U8(u8), U16(u16), U32(u32), U64(u64), U128(u128), - U256(move_core_types::u256::U256), + U256(crate::u256::U256), Bool(bool), Vector(Vec>), Struct(Vec>), diff --git a/external-crates/move/crates/move-core-types/src/u256.rs b/external-crates/move/crates/move-core-types/src/u256.rs index d47245857df5c..10657683d6eaf 100644 --- a/external-crates/move/crates/move-core-types/src/u256.rs +++ b/external-crates/move/crates/move-core-types/src/u256.rs @@ -308,6 +308,11 @@ impl U256 { Self(PrimitiveU256::from_little_endian(slice)) } + /// U256 from 32 big endian bytes + pub fn from_be_bytes(slice: &[u8; U256_NUM_BYTES]) -> Self { + Self(PrimitiveU256::from_big_endian(slice)) + } + /// U256 to 32 little endian bytes pub fn to_le_bytes(self) -> [u8; U256_NUM_BYTES] { let mut bytes = [0u8; U256_NUM_BYTES]; @@ -315,6 +320,13 @@ impl U256 { bytes } + /// U256 to 32 big endian bytes + pub fn to_be_bytes(self) -> [u8; U256_NUM_BYTES] { + let mut bytes = [0u8; U256_NUM_BYTES]; + self.0.to_big_endian(&mut bytes); + bytes + } + /// Leading zeros of the number pub fn leading_zeros(&self) -> u32 { self.0.leading_zeros() diff --git a/external-crates/move/crates/move-core-types/src/unit_tests/extractor_test.rs b/external-crates/move/crates/move-core-types/src/unit_tests/extractor_test.rs new file mode 100644 index 0000000000000..9d93abf734207 --- /dev/null +++ b/external-crates/move/crates/move-core-types/src/unit_tests/extractor_test.rs @@ -0,0 +1,852 @@ +// Copyright (c) The Move Contributors +// SPDX-License-Identifier: Apache-2.0 + +use std::str::FromStr; + +use crate::{ + account_address::AccountAddress, + annotated_extractor::{Element as E, Extractor}, + annotated_value::{MoveTypeLayout, MoveValue}, + language_storage::TypeTag, + unit_tests::visitor_test::{ + enum_layout_, serialize, struct_layout_, struct_value_, variant_value_, PrintVisitor, + }, +}; + +#[test] +fn struct_() { + let expect = r#" +[0] struct 0x0::foo::Bar { + a: u8, + b: u16, + c: u32, + d: u64, + e: u128, + f: u256, + g: bool, + h: address, + i: signer, + j: vector, + k: struct 0x0::foo::Baz { + l: u8, + }, + m: enum 0x0::foo::Qux { + n { + o: u8, + }, + }, + p: vector, +} +[1] 1: u8 +[1] 2: u16 +[1] 3: u32 +[1] 4: u64 +[1] 5: u128 +[1] 6: u256 +[1] true: bool +[1] 0000000000000000000000000000000000000000000000000000000000000000: address +[1] 0000000000000000000000000000000000000000000000000000000000000000: signer +[1] vector +[2] 7: u8 +[2] 8: u8 +[2] 9: u8 +[1] struct 0x0::foo::Baz { + l: u8, +} +[2] 10: u8 +[1] enum 0x0::foo::Qux { + n { + o: u8, + }, +} +[2] 11: u8 +[1] vector +[2] struct 0x0::foo::Quy { + q: u8, + r: bool, +} +[3] 12: u8 +[3] true: bool +[2] struct 0x0::foo::Quy { + q: u8, + r: bool, +} +[3] 13: u8 +[3] false: bool + "#; + + for path in enumerate_paths(vec![C::Opt(E::Type(&type_("0x0::foo::Bar")))]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_a() { + let expect = r#" +[0] 1: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("a"), E::Index(0)]), + C::Opt(E::Type(&type_("u8"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_b() { + let expect = r#" +[0] 2: u16 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("b"), E::Index(1)]), + C::Opt(E::Type(&type_("u16"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_c() { + let expect = r#" +[0] 3: u32 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("c"), E::Index(2)]), + C::Opt(E::Type(&type_("u32"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_d() { + let expect = r#" +[0] 4: u64 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("d"), E::Index(3)]), + C::Opt(E::Type(&type_("u64"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_e() { + let expect = r#" +[0] 5: u128 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("e"), E::Index(4)]), + C::Opt(E::Type(&type_("u128"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_f() { + let expect = r#" +[0] 6: u256 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("f"), E::Index(5)]), + C::Opt(E::Type(&type_("u256"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_g() { + let expect = r#" +[0] true: bool + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("g"), E::Index(6)]), + C::Opt(E::Type(&type_("bool"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_h() { + let expect = r#" +[0] 0000000000000000000000000000000000000000000000000000000000000000: address + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("h"), E::Index(7)]), + C::Opt(E::Type(&type_("address"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_i() { + let expect = r#" +[0] 0000000000000000000000000000000000000000000000000000000000000000: signer + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("i"), E::Index(8)]), + C::Opt(E::Type(&type_("signer"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_j() { + let expect = r#" +[0] vector +[1] 7: u8 +[1] 8: u8 +[1] 9: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("j"), E::Index(9)]), + C::Opt(E::Type(&type_("vector"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_j_0() { + let expect = r#" +[0] 7: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("j"), E::Index(9)]), + C::Opt(E::Type(&type_("vector"))), + C::Req(vec![E::Index(0)]), + C::Opt(E::Type(&type_("u8"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_j_1() { + let expect = r#" +[0] 8: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("j"), E::Index(9)]), + C::Opt(E::Type(&type_("vector"))), + C::Req(vec![E::Index(1)]), + C::Opt(E::Type(&type_("u8"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_j_2() { + let expect = r#" +[0] 9: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("j"), E::Index(9)]), + C::Opt(E::Type(&type_("vector"))), + C::Req(vec![E::Index(2)]), + C::Opt(E::Type(&type_("u8"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_k() { + let expect = r#" +[0] struct 0x0::foo::Baz { + l: u8, +} +[1] 10: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("k"), E::Index(10)]), + C::Opt(E::Type(&type_("0x0::foo::Baz"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_k_l() { + let expect = r#" +[0] 10: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("k"), E::Index(10)]), + C::Opt(E::Type(&type_("0x0::foo::Baz"))), + C::Req(vec![E::Field("l"), E::Index(0)]), + C::Opt(E::Type(&type_("u8"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_m() { + let expect = r#" +[0] enum 0x0::foo::Qux { + n { + o: u8, + }, +} +[1] 11: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("m"), E::Index(11)]), + C::Opt(E::Type(&type_("0x0::foo::Qux"))), + C::Opt(E::Variant("n")), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_m_o() { + let expect = r#" +[0] 11: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("m"), E::Index(11)]), + C::Opt(E::Type(&type_("0x0::foo::Qux"))), + C::Opt(E::Variant("n")), + C::Req(vec![E::Field("o"), E::Index(0)]), + C::Opt(E::Type(&type_("u8"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_p() { + let expect = r#" +[0] vector +[1] struct 0x0::foo::Quy { + q: u8, + r: bool, +} +[2] 12: u8 +[2] true: bool +[1] struct 0x0::foo::Quy { + q: u8, + r: bool, +} +[2] 13: u8 +[2] false: bool + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("p"), E::Index(12)]), + C::Opt(E::Type(&type_("vector<0x0::foo::Quy>"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_p_0() { + let expect = r#" +[0] struct 0x0::foo::Quy { + q: u8, + r: bool, +} +[1] 12: u8 +[1] true: bool + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("p"), E::Index(12)]), + C::Opt(E::Type(&type_("vector<0x0::foo::Quy>"))), + C::Req(vec![E::Index(0)]), + C::Opt(E::Type(&type_("0x0::foo::Quy"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_p_0_q() { + let expect = r#" +[0] 12: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("p"), E::Index(12)]), + C::Opt(E::Type(&type_("vector<0x0::foo::Quy>"))), + C::Req(vec![E::Index(0)]), + C::Opt(E::Type(&type_("0x0::foo::Quy"))), + C::Req(vec![E::Field("q"), E::Index(0)]), + C::Opt(E::Type(&type_("u8"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_p_0_r() { + let expect = r#" +[0] true: bool + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("p"), E::Index(12)]), + C::Opt(E::Type(&type_("vector<0x0::foo::Quy>"))), + C::Req(vec![E::Index(0)]), + C::Opt(E::Type(&type_("0x0::foo::Quy"))), + C::Req(vec![E::Field("r"), E::Index(1)]), + C::Opt(E::Type(&type_("bool"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_p_1() { + let expect = r#" +[0] struct 0x0::foo::Quy { + q: u8, + r: bool, +} +[1] 13: u8 +[1] false: bool + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("p"), E::Index(12)]), + C::Opt(E::Type(&type_("vector<0x0::foo::Quy>"))), + C::Req(vec![E::Index(1)]), + C::Opt(E::Type(&type_("0x0::foo::Quy"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_p_1_q() { + let expect = r#" +[0] 13: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("p"), E::Index(12)]), + C::Opt(E::Type(&type_("vector<0x0::foo::Quy>"))), + C::Req(vec![E::Index(1)]), + C::Opt(E::Type(&type_("0x0::foo::Quy"))), + C::Req(vec![E::Field("q"), E::Index(0)]), + C::Opt(E::Type(&type_("u8"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn struct_p_1_r() { + let expect = r#" +[0] false: bool + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Bar"))), + C::Req(vec![E::Field("p"), E::Index(12)]), + C::Opt(E::Type(&type_("vector<0x0::foo::Quy>"))), + C::Req(vec![E::Index(1)]), + C::Opt(E::Type(&type_("0x0::foo::Quy"))), + C::Req(vec![E::Field("r"), E::Index(1)]), + C::Opt(E::Type(&type_("bool"))), + ]) { + assert_path(test_struct(), path, expect); + } +} + +#[test] +fn vector_() { + let expect = r#" +[0] vector +[1] struct 0x0::foo::Quy { + q: u8, + r: bool, +} +[2] 12: u8 +[2] true: bool +[1] struct 0x0::foo::Quy { + q: u8, + r: bool, +} +[2] 13: u8 +[2] false: bool + "#; + + for path in enumerate_paths(vec![C::Opt(E::Type(&type_("vector<0x0::foo::Quy>")))]) { + assert_path(test_vector(), path, expect); + } +} + +#[test] +fn vector_0() { + let expect = r#" +[0] struct 0x0::foo::Quy { + q: u8, + r: bool, +} +[1] 12: u8 +[1] true: bool + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("vector<0x0::foo::Quy>"))), + C::Req(vec![E::Index(0)]), + C::Opt(E::Type(&type_("0x0::foo::Quy"))), + ]) { + assert_path(test_vector(), path, expect); + } +} + +#[test] +fn vector_1() { + let expect = r#" +[0] struct 0x0::foo::Quy { + q: u8, + r: bool, +} +[1] 13: u8 +[1] false: bool + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("vector<0x0::foo::Quy>"))), + C::Req(vec![E::Index(1)]), + C::Opt(E::Type(&type_("0x0::foo::Quy"))), + ]) { + assert_path(test_vector(), path, expect); + } +} + +#[test] +fn enum_() { + let expect = r#" +[0] enum 0x0::foo::Qux { + n { + o: u8, + }, +} +[1] 11: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Qux"))), + C::Opt(E::Variant("n")), + ]) { + assert_path(test_enum(), path, expect); + } +} + +#[test] +fn enum_o() { + let expect = r#" +[0] 11: u8 + "#; + + for path in enumerate_paths(vec![ + C::Opt(E::Type(&type_("0x0::foo::Qux"))), + C::Opt(E::Variant("n")), + C::Req(vec![E::Field("o"), E::Index(0)]), + C::Opt(E::Type(&type_("u8"))), + ]) { + assert_path(test_enum(), path, expect); + } +} + +#[test] +fn field_not_found() { + for path in [ + vec![E::Field("z")], + // Trying to access a field on a primitive + vec![E::Field("a"), E::Field("z")], + // Nested field doesn't exist + vec![E::Field("k"), E::Field("z")], + // Nested field on an enum (that doesn't exist) + vec![E::Field("m"), E::Field("z")], + // Trying to access a field on a vector + vec![E::Field("p"), E::Field("z")], + // Nested field on a struct in a vector + vec![E::Field("p"), E::Index(0), E::Field("z")], + ] { + assert_no_path(test_struct(), path); + } +} + +#[test] +fn index_out_of_bounds() { + for path in [ + // Positional access of field, out of bounds + vec![E::Index(1000)], + // Trying to access index on a primitive + vec![E::Field("a"), E::Index(1000)], + // Out of bounds on primitive vector + vec![E::Field("j"), E::Index(1000)], + // Out of bounds field on nested struct + vec![E::Field("k"), E::Index(1000)], + // Out of bounds field on nested enum + vec![E::Field("m"), E::Index(1000)], + // Out of bounds field on struct vector + vec![E::Field("p"), E::Index(1000)], + // Out of bounds field on struct in vector + vec![E::Field("p"), E::Index(0), E::Index(1000)], + ] { + assert_no_path(test_struct(), path); + } +} + +#[test] +fn type_mismatch() { + for path in [ + // Wrong root type + vec![E::Type(&type_("0x0::foo::Baz"))], + // Wrong primitive type + vec![E::Field("a"), E::Type(&type_("u16"))], + // Wrong nested struct + vec![E::Field("k"), E::Type(&type_("0x0::foo::Bar"))], + // Wrong type with further nesting + vec![ + E::Field("k"), + E::Type(&type_("0x0::foo::Bar")), + E::Field("l"), + ], + // Wrong primitive vector + vec![E::Field("j"), E::Type(&type_("vector"))], + vec![E::Field("j"), E::Type(&type_("u8"))], + // Wrong enum type + vec![E::Field("m"), E::Type(&type_("0x0::foo::Bar"))], + // Wrong type nested inside enum + vec![E::Field("m"), E::Field("o"), E::Type(&type_("u16"))], + ] { + assert_no_path(test_struct(), path); + } +} + +#[test] +fn variant_not_found() { + assert_no_path(test_enum(), vec![E::Variant("z")]); + assert_no_path(test_struct(), vec![E::Field("m"), E::Variant("z")]); +} + +/// Components are used to generate paths. Each component offers a number of options for the +/// element that goes in the same position in the generated path. +enum C<'p> { + /// This element is optional -- paths are geneated with and without this element at the + /// component's position. + Opt(E<'p>), + + /// This element is required, and is picked from the provided list. + Req(Vec>), +} + +/// Generate a list of paths as a cartesian product of the provided components. +fn enumerate_paths(components: Vec>) -> Vec>> { + let mut paths = vec![vec![]]; + + for component in components { + let mut new_paths = vec![]; + + for path in paths { + match &component { + C::Opt(element) => { + new_paths.push(path.clone()); + let mut path = path.clone(); + path.push(element.clone()); + new_paths.push(path); + } + C::Req(elements) => { + new_paths.extend(elements.iter().map(|e| { + let mut path = path.clone(); + path.push(e.clone()); + path + })); + } + } + } + + paths = new_paths; + } + + paths +} + +fn assert_path((value, layout): (MoveValue, MoveTypeLayout), path: Vec>, expect: &str) { + let bytes = serialize(value); + let mut printer = PrintVisitor::default(); + + assert!( + Extractor::deserialize_value(&bytes, &layout, &mut printer, path.clone()) + .unwrap() + .is_some(), + "Failed to extract value {path:?}", + ); + + assert_eq!( + printer.output.trim(), + expect.trim(), + "Failed to match value at {path:?}" + ); +} + +fn assert_no_path((value, layout): (MoveValue, MoveTypeLayout), path: Vec>) { + let bytes = serialize(value); + let mut printer = PrintVisitor::default(); + + assert!( + Extractor::deserialize_value(&bytes, &layout, &mut printer, path.clone()) + .unwrap() + .is_none(), + "Expected not to find something at {path:?}", + ); + + assert!( + printer.output.is_empty(), + "Expected not to delegate to the inner visitor for {path:?}" + ); +} + +fn type_(t: &str) -> TypeTag { + TypeTag::from_str(t).unwrap() +} + +fn test_struct() -> (MoveValue, MoveTypeLayout) { + use MoveTypeLayout as T; + use MoveValue as V; + + let (vector, vector_layout) = test_vector(); + let (variant, enum_layout) = test_enum(); + + let value = struct_value_( + "0x0::foo::Bar", + vec![ + ("a", V::U8(1)), + ("b", V::U16(2)), + ("c", V::U32(3)), + ("d", V::U64(4)), + ("e", V::U128(5)), + ("f", V::U256(6u32.into())), + ("g", V::Bool(true)), + ("h", V::Address(AccountAddress::ZERO)), + ("i", V::Signer(AccountAddress::ZERO)), + ("j", V::Vector(vec![V::U8(7), V::U8(8), V::U8(9)])), + ("k", struct_value_("0x0::foo::Baz", vec![("l", V::U8(10))])), + ("m", variant), + ("p", vector), + ], + ); + + let layout = struct_layout_( + "0x0::foo::Bar", + vec![ + ("a", T::U8), + ("b", T::U16), + ("c", T::U32), + ("d", T::U64), + ("e", T::U128), + ("f", T::U256), + ("g", T::Bool), + ("h", T::Address), + ("i", T::Signer), + ("j", T::Vector(Box::new(T::U8))), + ("k", struct_layout_("0x0::foo::Baz", vec![("l", T::U8)])), + ("m", enum_layout), + ("p", vector_layout), + ], + ); + + (value, layout) +} + +fn test_enum() -> (MoveValue, MoveTypeLayout) { + use MoveTypeLayout as T; + use MoveValue as V; + + let value = variant_value_("0x0::foo::Qux", "n", 0, vec![("o", V::U8(11))]); + let layout = enum_layout_("0x0::foo::Qux", vec![("n", vec![("o", T::U8)])]); + + (value, layout) +} + +fn test_vector() -> (MoveValue, MoveTypeLayout) { + use MoveTypeLayout as T; + use MoveValue as V; + + let value = V::Vector(vec![ + struct_value_( + "0x0::foo::Quy", + vec![("q", V::U8(12)), ("r", V::Bool(true))], + ), + struct_value_( + "0x0::foo::Quy", + vec![("q", V::U8(13)), ("r", V::Bool(false))], + ), + ]); + + let layout = T::Vector(Box::new(struct_layout_( + "0x0::foo::Quy", + vec![("q", T::U8), ("r", T::Bool)], + ))); + + (value, layout) +} diff --git a/external-crates/move/crates/move-core-types/src/unit_tests/mod.rs b/external-crates/move/crates/move-core-types/src/unit_tests/mod.rs index 6788ebac27bde..64e54f71b241d 100644 --- a/external-crates/move/crates/move-core-types/src/unit_tests/mod.rs +++ b/external-crates/move/crates/move-core-types/src/unit_tests/mod.rs @@ -2,7 +2,9 @@ // Copyright (c) The Move Contributors // SPDX-License-Identifier: Apache-2.0 +mod extractor_test; mod identifier_test; mod language_storage_test; +mod parsing_test; mod value_test; mod visitor_test; diff --git a/external-crates/move/crates/move-core-types/src/unit_tests/parsing_test.rs b/external-crates/move/crates/move-core-types/src/unit_tests/parsing_test.rs new file mode 100644 index 0000000000000..2d69de0be29c6 --- /dev/null +++ b/external-crates/move/crates/move-core-types/src/unit_tests/parsing_test.rs @@ -0,0 +1,678 @@ +use crate::{ + account_address::AccountAddress, + identifier::Identifier, + language_storage::{ModuleId, StructTag, TypeTag}, + parsing::{ + address::{NumericalAddress, ParsedAddress}, + parser::parse, + types::{ParsedFqName, ParsedType, TypeToken}, + values::ParsedValue, + }, + u256::U256, +}; +use anyhow::bail; +use num::BigUint; +use proptest::{prelude::*, proptest}; +use std::str::FromStr; + +const VALID_ADDRS: &[&str] = &[ + "0x0", + "0x1", + "1", + "123", + "0x123", + "0x1234567890abcdef", + "100_00_00", + "0x0_0_0_0", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0_00000_0000000000000000000000000000000000000000000000000_000000000", + "000000000000000000000000000000000000000000000000000000000000000000000000000000", + "00_0000000000000000000000000000000000000000000000000000000_00000000000000000_0000", +]; + +const INVALID_ADDRS: &[&str] = &[ + "_x", + "0x", + "_0x0", + "_0", + "0x_", + "0x_00", + "+0x0", + "+0", + "0xg", + "0x0g", + "0X0", + "_0x0", + "_0x0_", + "_0", + "_0_", + "_00_", + "_0_0_", + "0x_00", + "0x00000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000_0000000", + "0x_0_00000_0000000000000000000000000000000000000000000000000_000000000", + "0000000000000000000000000000000000000000000000000000000000000000000000000000000", + "000_0000000000000000000000000000000000000000000000000000000_00000000000000000_0000", +]; + +#[allow(clippy::unreadable_literal)] +#[test] +fn tests_parse_value_positive() { + use ParsedValue as V; + let cases: &[(&str, V)] = &[ + (" 0u8", V::U8(0)), + ("0u8", V::U8(0)), + ("0xF_Fu8", V::U8(255)), + ("0xF__FF__Eu16", V::U16(u16::MAX - 1)), + ("0xFFF_FF__FF_Cu32", V::U32(u32::MAX - 3)), + ("255u8", V::U8(255)), + ("255u256", V::U256(U256::from(255u64))), + ("0", V::InferredNum(U256::from(0u64))), + ("0123", V::InferredNum(U256::from(123u64))), + ("0xFF", V::InferredNum(U256::from(0xFFu64))), + ("0xF_F", V::InferredNum(U256::from(0xFFu64))), + ("0xFF__", V::InferredNum(U256::from(0xFFu64))), + ( + "0x12_34__ABCD_FF", + V::InferredNum(U256::from(0x1234ABCDFFu64)), + ), + ("0u64", V::U64(0)), + ("0x0u64", V::U64(0)), + ( + "18446744073709551615", + V::InferredNum(U256::from(18446744073709551615u128)), + ), + ("18446744073709551615u64", V::U64(18446744073709551615)), + ("0u128", V::U128(0)), + ("1_0u8", V::U8(1_0)), + ("10_u8", V::U8(10)), + ("1_000u64", V::U64(1_000)), + ("1_000", V::InferredNum(U256::from(1_000u32))), + ("1_0_0_0u64", V::U64(1_000)), + ("1_000_000u128", V::U128(1_000_000)), + ( + "340282366920938463463374607431768211455u128", + V::U128(340282366920938463463374607431768211455), + ), + ("true", V::Bool(true)), + ("false", V::Bool(false)), + ( + "@0x0", + V::Address(ParsedAddress::Numerical(NumericalAddress::new( + AccountAddress::from_hex_literal("0x0") + .unwrap() + .into_bytes(), + crate::parsing::parser::NumberFormat::Hex, + ))), + ), + ( + "@0", + V::Address(ParsedAddress::Numerical(NumericalAddress::new( + AccountAddress::from_hex_literal("0x0") + .unwrap() + .into_bytes(), + crate::parsing::parser::NumberFormat::Hex, + ))), + ), + ( + "@0x54afa3526", + V::Address(ParsedAddress::Numerical(NumericalAddress::new( + AccountAddress::from_hex_literal("0x54afa3526") + .unwrap() + .into_bytes(), + crate::parsing::parser::NumberFormat::Hex, + ))), + ), + ( + "b\"hello\"", + V::Vector("hello".as_bytes().iter().copied().map(V::U8).collect()), + ), + ("x\"7fff\"", V::Vector(vec![V::U8(0x7f), V::U8(0xff)])), + ("x\"\"", V::Vector(vec![])), + ("x\"00\"", V::Vector(vec![V::U8(0x00)])), + ( + "x\"deadbeef\"", + V::Vector(vec![V::U8(0xde), V::U8(0xad), V::U8(0xbe), V::U8(0xef)]), + ), + ]; + + for (s, expected) in cases { + assert_eq!(&ParsedValue::parse(s).unwrap(), expected) + } +} + +#[test] +fn tests_parse_value_negative() { + /// Test cases for the parser that should always fail. + const PARSE_VALUE_NEGATIVE_TEST_CASES: &[&str] = &[ + "-3", + "0u42", + "0u645", + "0u64x", + "0u6 4", + "0u", + "_10", + "_10_u8", + "_10__u8", + "10_u8__", + "0xFF_u8_", + "0xF_u8__", + "0x_F_u8__", + "_", + "__", + "__4", + "_u8", + "5_bool", + "256u8", + "4294967296u32", + "65536u16", + "18446744073709551616u64", + "340282366920938463463374607431768211456u128", + "340282366920938463463374607431768211456340282366920938463463374607431768211456340282366920938463463374607431768211456340282366920938463463374607431768211456u256", + "0xg", + "0x00g0", + "0x", + "0x_", + "", + "@@", + "()", + "x\"ffff", + "x\"a \"", + "x\" \"", + "x\"0g\"", + "x\"0\"", + "garbage", + "true3", + "3false", + "3 false", + "", + "0XFF", + "0X0", + ]; + + for s in PARSE_VALUE_NEGATIVE_TEST_CASES { + assert!( + ParsedValue::<()>::parse(s).is_err(), + "Unexpectedly succeeded in parsing: {}", + s + ) + } +} + +#[test] +fn test_parse_struct_negative() { + for s in &[ + "_", + "_::_::_", + "0x1::_", + "0x1::__::_", + "0x1::_::__", + "0x1::_::foo", + "0x1::foo::_", + "0x1::_::_", + "0x1::bar::foo<0x1::_::foo>", + "0x1::bar::bar::foo", + "0x1::Foo::Foo<", + "0x1::Foo::Foo<0x1::ABC::ABC", + "0x1::Foo::Foo<0x1::ABC::ABC::>", + "0x1::Foo::Foo<0x1::ABC::ABC::A>", + "0x1::Foo::Foo<>", + "0x1::Foo::Foo<,>", + "0x1::Foo::Foo<,", + "0x1::Foo::Foo,>", + "0x1::Foo::Foo>", + "0x1::Foo::Foo,", + "_0x0_0::a::a", + "_0x_00::a::a", + "_0_0::a::a", + ] { + assert!( + TypeTag::from_str(s).is_err(), + "Parsed type {s} but should have failed" + ); + } +} + +#[test] +fn test_type_type() { + for s in &[ + "u8", + "u16", + "u32", + "u64", + "u128", + "u256", + "bool", + "vector", + "vector>", + "address", + "signer", + "0x1::M::S", + "0x2::M::S_", + "0x3::M_::S", + "0x4::M_::S_", + "0x00000000004::M::S", + "0x1::M::S", + "0x1::M::S<0x2::P::Q>", + "vector<0x1::M::S>", + "vector<0x1::M_::S_>", + "vector>", + "0x1::M::S>", + "0x1::_bar::_BAR", + "0x1::__::__", + "0x1::_bar::_BAR<0x2::_____::______fooo______>", + "0x1::__::__<0x2::_____::______fooo______, 0xff::Bar____::_______foo>", + "0x0_0::a::a", + "0_0::a::a", + ] { + assert!(TypeTag::from_str(s).is_ok(), "Failed to parse type {}", s); + } + + for valid_addr in VALID_ADDRS { + assert!( + TypeTag::from_str(&format!("{valid_addr}::a::a")).is_ok(), + "Failed to parse type {}::a::a", + valid_addr + ); + } + + for invalid_addr in INVALID_ADDRS { + assert!( + TypeTag::from_str(&format!("{invalid_addr}::a::a")).is_err(), + "Parse type {}::a::a but should have failed", + invalid_addr + ); + } +} + +#[test] +fn test_parse_valid_struct_type() { + let valid = vec![ + "0x1::Foo::Foo", + "0x1::Foo_Type::Foo", + "0x1::Foo_::Foo", + "0x1::X_123::X32_", + "0x1::Foo::Foo_Type", + "0x1::Foo::Foo<0x1::ABC::ABC>", + "0x1::Foo::Foo<0x1::ABC::ABC_Type>", + "0x1::Foo::Foo", + "0x1::Foo::Foo", + "0x1::Foo::Foo", + "0x1::Foo::Foo", + "0x1::Foo::Foo", + "0x1::Foo::Foo", + "0x1::Foo::Foo", + "0x1::Foo::Foo
", + "0x1::Foo::Foo", + "0x1::Foo::Foo>", + "0x1::Foo::Foo", + "0x1::Foo::Foo", + "0x1::Foo::Foo", + "0x1::Foo::Foo,address,signer>", + "0x1::Foo::Foo>>", + "0x1::Foo::Foo<0x1::Foo::Struct, 0x1::Foo::Foo>>>>", + "0x1::_bar::_BAR", + "0x1::__::__", + "0x1::_bar::_BAR<0x2::_____::______fooo______>", + "0x1::__::__<0x2::_____::______fooo______, 0xff::Bar____::_______foo>", + ]; + for s in valid { + assert!( + StructTag::from_str(s).is_ok(), + "Failed to parse struct {}", + s + ); + } +} + +#[test] +fn test_parse_type_list() { + let valid_with_trails = &[ + "", + "", + ",>", + ]; + let valid_no_trails = &[ + "", + "", + ">", + ]; + let invalid = &[ + "<>", + "<,>", + "", + "<,u64>", + "<,u64,>", + ",", + "", + "<", + "<<", + "><", + ">,<", + ">,", + ",>", + ",,", + ">>", + "", + "u64,>", + "u64, u64,>", + "u64, u64,", + "u64, u64", + "u64 u64", + "", + "", + "u64 u64,", + "", + ",", + ",,>", + ]; + + for t in valid_no_trails.iter().chain(valid_with_trails.iter()) { + assert!(parse_type_tags(t, true).is_ok()); + } + + for t in valid_no_trails { + assert!(parse_type_tags(t, false).is_ok()); + } + + for t in valid_with_trails { + assert!(parse_type_tags(t, false).is_err()); + } + + for t in invalid { + assert!(parse_type_tags(t, true).is_err(), "parsed type {}", t); + assert!(parse_type_tags(t, false).is_err(), "parsed type {}", t); + } +} + +fn struct_type_gen0() -> impl Strategy { + ( + any::(), + any::(), + any::(), + ) + .prop_map(|(address, module, name)| format!("0x{}::{}::{}", address, module, name)) +} + +fn struct_type_gen1() -> impl Strategy { + (any::(), any::(), any::()) + .prop_map(|(address, module, name)| format!("{}::{}::{}", address, module, name)) +} + +fn module_id_gen0() -> impl Strategy { + (any::(), any::()) + .prop_map(|(address, module)| format!("0x{address}::{module}")) +} + +fn module_id_gen1() -> impl Strategy { + (any::(), any::()) + .prop_map(|(address, module)| format!("{address}::{module}")) +} + +fn fq_id_gen0() -> impl Strategy { + ( + any::(), + any::(), + any::(), + ) + .prop_map(|(address, module, name)| format!("0x{address}::{module}::{name}")) +} + +fn fq_id_gen1() -> impl Strategy { + (any::(), any::(), any::()) + .prop_map(|(address, module, name)| format!("{address}::{module}::{name}")) +} + +fn parse_type_tags(s: &str, allow_trailing_delim: bool) -> anyhow::Result> { + parse(s, |parser| { + parser.advance(TypeToken::Lt)?; + let parsed = parser.parse_list( + |parser| parser.parse_type(), + TypeToken::Comma, + TypeToken::Gt, + allow_trailing_delim, + )?; + parser.advance(TypeToken::Gt)?; + if parsed.is_empty() { + bail!("expected at least one type argument") + } + Ok(parsed) + }) +} + +#[test] +fn address_parsing() { + for valid_addr in VALID_ADDRS { + assert!( + ParsedAddress::parse(valid_addr).is_ok(), + "parsed address {}", + valid_addr + ); + } + + for invalid_addr in INVALID_ADDRS { + assert!(ParsedAddress::parse(invalid_addr).is_err()); + } +} + +proptest! { + #[test] + fn parse_type_tag_list(t in struct_type_gen0(), args in proptest::collection::vec(struct_type_gen0(), 1..=100)) { + let s_no_trail = format!("<{}>", args.join(",")); + let s_with_trail = format!("<{},>", args.join(",")); + let s_no_trail_no_trail = parse_type_tags(&s_no_trail, false); + let s_no_trail_allow_trail = parse_type_tags(&s_no_trail, true); + let s_with_trail_no_trail = parse_type_tags(&s_with_trail, false); + let s_with_trail_allow_trail = parse_type_tags(&s_with_trail, true); + prop_assert!(s_no_trail_no_trail.is_ok()); + prop_assert!(s_no_trail_allow_trail.is_ok()); + prop_assert!(s_with_trail_no_trail.is_err()); + prop_assert!(s_with_trail_allow_trail.is_ok()); + let t_with_trail = format!("{t}{s_no_trail}"); + let t_no_trail = format!("{t}{s_with_trail}"); + let t_with_trail = TypeTag::from_str(&t_with_trail); + let t_no_trail = TypeTag::from_str(&t_no_trail); + prop_assert!(t_with_trail.is_ok()); + prop_assert!(t_no_trail.is_ok()); + prop_assert_eq!(t_with_trail.unwrap(), t_no_trail.unwrap()); + } + + #[test] + fn test_parse_valid_struct_type_proptest0(s in struct_type_gen0(), x in r#"(::foo)[^a-zA-Z0-9_\s]+"#) { + prop_assert!(StructTag::from_str(&s).is_ok()); + prop_assert!(TypeTag::from_str(&s).is_ok()); + prop_assert!(ParsedFqName::parse(&s).is_ok()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + + // Add remainder string + let s = s + &x; + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + + } + + #[test] + fn test_parse_valid_struct_type_proptest1(s in struct_type_gen1(), x in r#"(::foo)[^a-zA-Z0-9_\s]+"#) { + prop_assert!(StructTag::from_str(&s).is_ok()); + prop_assert!(TypeTag::from_str(&s).is_ok()); + prop_assert!(ParsedFqName::parse(&s).is_ok()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + // add remainder string + let s = s + &x; + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + } + + #[test] + fn test_parse_valid_module_id_proptest0(s in module_id_gen0(), x in r#"[^a-zA-Z0-9_\s]+"#) { + prop_assert!(ModuleId::from_str(&s).is_ok()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + // add remainder string + let s = s + &x; + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + } + + #[test] + fn test_parse_valid_module_id_proptest1(s in module_id_gen1(), x in r#"[^a-zA-Z0-9_\s]+"#) { + prop_assert!(ModuleId::from_str(&s).is_ok()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + // add remainder String + let s = s + &x; + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + + } + + #[test] + fn test_parse_valid_fq_id_proptest0(s in fq_id_gen0(), x in r#"[^a-zA-Z0-9_\s]+"#) { + prop_assert!(ParsedFqName::parse(&s).is_ok()); + prop_assert!(StructTag::from_str(&s).is_ok()); + prop_assert!(TypeTag::from_str(&s).is_ok()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + // add remainder string + let s = s + &x; + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + } + + #[test] + fn test_parse_valid_fq_id_proptest1(s in fq_id_gen1(), x in r#"[^a-zA-Z0-9_\s]+"#) { + prop_assert!(ParsedFqName::parse(&s).is_ok()); + prop_assert!(StructTag::from_str(&s).is_ok()); + prop_assert!(TypeTag::from_str(&s).is_ok()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + let s = s + &x; + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + } + + #[test] + fn test_parse_valid_numeric_address(s in "[0-9]{64}", x in r#"[^a-zA-Z0-9_\s]+"#) { + prop_assert!(AccountAddress::from_str(&s).is_ok()); + prop_assert!(ParsedAddress::parse(&s).is_ok()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + // add remainder string + let s = s + &x; + prop_assert!(AccountAddress::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + } + + #[test] + fn decimal_parse_parity(s in "[0-9]{64}") { + let bigint_parsed = { + let bytes = BigUint::parse_bytes(s.as_bytes(), 10).unwrap().to_bytes_be(); + let mut result = [0u8; AccountAddress::LENGTH]; + result[(AccountAddress::LENGTH - bytes.len())..].clone_from_slice(&bytes); + result + }; + let u256_parsed = U256::from_str(&s).unwrap(); + prop_assert_eq!(bigint_parsed, u256_parsed.to_be_bytes(), "Parsed addresses do not match: {}", s); + } + + #[test] + fn hex_parse_parity(s in "0x[0-9a-fA-F]{1,64}") { + let bigint_parsed = { + let bytes = BigUint::parse_bytes(s[2..].as_bytes(), 16).unwrap().to_bytes_be(); + let mut result = [0u8; AccountAddress::LENGTH]; + result[(AccountAddress::LENGTH - bytes.len())..].clone_from_slice(&bytes); + result + }; + let addr_parsed = AccountAddress::from_hex_literal(&s).unwrap().into_bytes(); + let u256_parsed = AccountAddress::new(U256::from_str_radix(&s[2..], 16).unwrap().to_be_bytes()).into_bytes(); + prop_assert_eq!(bigint_parsed, addr_parsed, "Parsed addresses do not match: {}", s); + prop_assert_eq!(addr_parsed, u256_parsed, "Parsed addresses do not match: {}", s); + } + + #[test] + fn test_parse_different_length_numeric_addresses(s in "[0-9]{1,63}", x in r#"[^a-zA-Z0-9_\s]+"#) { + prop_assert!(AccountAddress::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_ok()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + // add remainder string + let s = s + &x; + prop_assert!(AccountAddress::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + } + + #[test] + fn test_parse_valid_hex_address(s in "0x[0-9a-fA-F]{64}", x in r#"[^a-zA-Z0-9_\s]+"#) { + prop_assert!(AccountAddress::from_str(&s).is_ok()); + prop_assert!(ParsedAddress::parse(&s).is_ok()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + // add remainder string + let s = s + &x; + prop_assert!(AccountAddress::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + } + + #[test] + fn test_parse_invalid_hex_address(s in "[0-9]{63}[a-fA-F]{1}", x in r#"[^a-zA-Z0-9_\s]+"#) { + prop_assert!(AccountAddress::from_str(&s).is_ok()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + // add remainder string + let s = s + &x; + prop_assert!(AccountAddress::from_str(&s).is_err()); + prop_assert!(ParsedAddress::parse(&s).is_err()); + prop_assert!(ParsedFqName::parse(&s).is_err()); + prop_assert!(ModuleId::from_str(&s).is_err()); + prop_assert!(StructTag::from_str(&s).is_err()); + prop_assert!(TypeTag::from_str(&s).is_err()); + } +} diff --git a/external-crates/move/crates/move-core-types/src/unit_tests/visitor_test.rs b/external-crates/move/crates/move-core-types/src/unit_tests/visitor_test.rs index 7d0c06b8696af..3d64ecb1d598e 100644 --- a/external-crates/move/crates/move-core-types/src/unit_tests/visitor_test.rs +++ b/external-crates/move/crates/move-core-types/src/unit_tests/visitor_test.rs @@ -19,125 +19,284 @@ use crate::{ VARIANT_COUNT_MAX, }; -#[test] -fn traversal() { - use MoveTypeLayout as T; - use MoveValue as V; +#[derive(Default)] +pub(crate) struct CountingTraversal(usize); - #[derive(Default)] - struct CountingTraversal(usize); +#[derive(Default)] +pub(crate) struct PrintVisitor { + depth: usize, + pub output: String, +} - impl<'b, 'l> Traversal<'b, 'l> for CountingTraversal { - type Error = annotated_visitor::Error; +impl<'b, 'l> Traversal<'b, 'l> for CountingTraversal { + type Error = annotated_visitor::Error; - fn traverse_u8( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - _value: u8, - ) -> Result<(), Self::Error> { - self.0 += 1; - Ok(()) - } + fn traverse_u8( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + _value: u8, + ) -> Result<(), Self::Error> { + self.0 += 1; + Ok(()) + } - fn traverse_u16( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - _value: u16, - ) -> Result<(), Self::Error> { - self.0 += 1; - Ok(()) - } + fn traverse_u16( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + _value: u16, + ) -> Result<(), Self::Error> { + self.0 += 1; + Ok(()) + } - fn traverse_u32( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - _value: u32, - ) -> Result<(), Self::Error> { - self.0 += 1; - Ok(()) - } + fn traverse_u32( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + _value: u32, + ) -> Result<(), Self::Error> { + self.0 += 1; + Ok(()) + } - fn traverse_u64( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - _value: u64, - ) -> Result<(), Self::Error> { - self.0 += 1; - Ok(()) - } + fn traverse_u64( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + _value: u64, + ) -> Result<(), Self::Error> { + self.0 += 1; + Ok(()) + } - fn traverse_u128( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - _value: u128, - ) -> Result<(), Self::Error> { - self.0 += 1; - Ok(()) - } + fn traverse_u128( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + _value: u128, + ) -> Result<(), Self::Error> { + self.0 += 1; + Ok(()) + } - fn traverse_u256( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - _value: U256, - ) -> Result<(), Self::Error> { - self.0 += 1; - Ok(()) - } + fn traverse_u256( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + _value: U256, + ) -> Result<(), Self::Error> { + self.0 += 1; + Ok(()) + } - fn traverse_bool( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - _value: bool, - ) -> Result<(), Self::Error> { - self.0 += 1; - Ok(()) - } + fn traverse_bool( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + _value: bool, + ) -> Result<(), Self::Error> { + self.0 += 1; + Ok(()) + } - fn traverse_address( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - _value: AccountAddress, - ) -> Result<(), Self::Error> { - self.0 += 1; - Ok(()) - } + fn traverse_address( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + _value: AccountAddress, + ) -> Result<(), Self::Error> { + self.0 += 1; + Ok(()) + } - fn traverse_signer( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - _value: AccountAddress, - ) -> Result<(), Self::Error> { - self.0 += 1; - Ok(()) - } + fn traverse_signer( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + _value: AccountAddress, + ) -> Result<(), Self::Error> { + self.0 += 1; + Ok(()) + } - fn traverse_vector( - &mut self, - driver: &mut VecDriver<'_, 'b, 'l>, - ) -> Result<(), Self::Error> { - self.0 += 1; - while driver.next_element(self)?.is_some() {} - Ok(()) + fn traverse_vector(&mut self, driver: &mut VecDriver<'_, 'b, 'l>) -> Result<(), Self::Error> { + self.0 += 1; + while driver.next_element(self)?.is_some() {} + Ok(()) + } + + fn traverse_struct( + &mut self, + driver: &mut StructDriver<'_, 'b, 'l>, + ) -> Result<(), Self::Error> { + self.0 += 1; + while driver.next_field(self)?.is_some() {} + Ok(()) + } + + fn traverse_variant( + &mut self, + driver: &mut VariantDriver<'_, 'b, 'l>, + ) -> Result<(), Self::Error> { + self.0 += 1; + while driver.next_field(self)?.is_some() {} + Ok(()) + } +} + +impl<'b, 'l> Visitor<'b, 'l> for PrintVisitor { + type Value = MoveValue; + type Error = annotated_visitor::Error; + + fn visit_u8( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + value: u8, + ) -> Result { + write!(self.output, "\n[{}] {value}: u8", self.depth).unwrap(); + Ok(MoveValue::U8(value)) + } + + fn visit_u16( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + value: u16, + ) -> Result { + write!(self.output, "\n[{}] {value}: u16", self.depth).unwrap(); + Ok(MoveValue::U16(value)) + } + + fn visit_u32( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + value: u32, + ) -> Result { + write!(self.output, "\n[{}] {value}: u32", self.depth).unwrap(); + Ok(MoveValue::U32(value)) + } + + fn visit_u64( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + value: u64, + ) -> Result { + write!(self.output, "\n[{}] {value}: u64", self.depth).unwrap(); + Ok(MoveValue::U64(value)) + } + + fn visit_u128( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + value: u128, + ) -> Result { + write!(self.output, "\n[{}] {value}: u128", self.depth).unwrap(); + Ok(MoveValue::U128(value)) + } + + fn visit_u256( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + value: U256, + ) -> Result { + write!(self.output, "\n[{}] {value}: u256", self.depth).unwrap(); + Ok(MoveValue::U256(value)) + } + + fn visit_bool( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + value: bool, + ) -> Result { + write!(self.output, "\n[{}] {value}: bool", self.depth).unwrap(); + Ok(MoveValue::Bool(value)) + } + + fn visit_address( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + value: AccountAddress, + ) -> Result { + write!(self.output, "\n[{}] {value}: address", self.depth).unwrap(); + Ok(MoveValue::Address(value)) + } + + fn visit_signer( + &mut self, + _driver: &ValueDriver<'_, 'b, 'l>, + value: AccountAddress, + ) -> Result { + write!(self.output, "\n[{}] {value}: signer", self.depth).unwrap(); + Ok(MoveValue::Signer(value)) + } + + fn visit_vector( + &mut self, + driver: &mut VecDriver<'_, 'b, 'l>, + ) -> Result { + let layout = driver.element_layout(); + write!(self.output, "\n[{}] vector<{layout:#}>", self.depth).unwrap(); + + let mut elems = vec![]; + let mut elem_visitor = Self { + depth: self.depth + 1, + output: std::mem::take(&mut self.output), + }; + + while let Some(elem) = driver.next_element(&mut elem_visitor)? { + elems.push(elem) } - fn traverse_struct( - &mut self, - driver: &mut StructDriver<'_, 'b, 'l>, - ) -> Result<(), Self::Error> { - self.0 += 1; - while driver.next_field(self)?.is_some() {} - Ok(()) + self.output = elem_visitor.output; + Ok(MoveValue::Vector(elems)) + } + + fn visit_struct( + &mut self, + driver: &mut StructDriver<'_, 'b, 'l>, + ) -> Result { + let layout = driver.struct_layout(); + write!(self.output, "\n[{}] {layout:#}", self.depth).unwrap(); + + let mut fields = vec![]; + let mut field_visitor = Self { + depth: self.depth + 1, + output: std::mem::take(&mut self.output), + }; + + while let Some((field, value)) = driver.next_field(&mut field_visitor)? { + fields.push((field.name.clone(), value)); } - fn traverse_variant( - &mut self, - driver: &mut VariantDriver<'_, 'b, 'l>, - ) -> Result<(), Self::Error> { - self.0 += 1; - while driver.next_field(self)?.is_some() {} - Ok(()) + self.output = field_visitor.output; + let type_ = driver.struct_layout().type_.clone(); + Ok(MoveValue::Struct(MoveStruct { type_, fields })) + } + + fn visit_variant( + &mut self, + driver: &mut VariantDriver<'_, 'b, 'l>, + ) -> Result { + let layout = driver.enum_layout(); + write!(self.output, "\n[{}] {layout:#}", self.depth).unwrap(); + + let mut fields = vec![]; + let mut field_visitor = Self { + depth: self.depth + 1, + output: std::mem::take(&mut self.output), + }; + + while let Some((field, value)) = driver.next_field(&mut field_visitor)? { + fields.push((field.name.clone(), value)); } + + self.output = field_visitor.output; + let type_ = driver.enum_layout().type_.clone(); + Ok(MoveValue::Variant(MoveVariant { + type_, + variant_name: driver.variant_name().to_owned(), + tag: driver.tag(), + fields, + })) } +} + +#[test] +fn traversal() { + use MoveTypeLayout as T; + use MoveValue as V; let type_layout = struct_layout_( "0x0::foo::Bar", @@ -334,168 +493,6 @@ fn nested_datatype_visit() { use MoveTypeLayout as T; use MoveValue as V; - #[derive(Default)] - struct PrintVisitor { - depth: usize, - output: String, - } - - impl<'b, 'l> Visitor<'b, 'l> for PrintVisitor { - type Value = MoveValue; - type Error = annotated_visitor::Error; - - fn visit_u8( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - value: u8, - ) -> Result { - write!(self.output, "\n[{}] {value}: u8", self.depth).unwrap(); - Ok(V::U8(value)) - } - - fn visit_u16( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - value: u16, - ) -> Result { - write!(self.output, "\n[{}] {value}: u16", self.depth).unwrap(); - Ok(V::U16(value)) - } - - fn visit_u32( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - value: u32, - ) -> Result { - write!(self.output, "\n[{}] {value}: u32", self.depth).unwrap(); - Ok(V::U32(value)) - } - - fn visit_u64( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - value: u64, - ) -> Result { - write!(self.output, "\n[{}] {value}: u64", self.depth).unwrap(); - Ok(V::U64(value)) - } - - fn visit_u128( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - value: u128, - ) -> Result { - write!(self.output, "\n[{}] {value}: u128", self.depth).unwrap(); - Ok(V::U128(value)) - } - - fn visit_u256( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - value: U256, - ) -> Result { - write!(self.output, "\n[{}] {value}: u256", self.depth).unwrap(); - Ok(V::U256(value)) - } - - fn visit_bool( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - value: bool, - ) -> Result { - write!(self.output, "\n[{}] {value}: bool", self.depth).unwrap(); - Ok(V::Bool(value)) - } - - fn visit_address( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - value: AccountAddress, - ) -> Result { - write!(self.output, "\n[{}] {value}: address", self.depth).unwrap(); - Ok(V::Address(value)) - } - - fn visit_signer( - &mut self, - _driver: &ValueDriver<'_, 'b, 'l>, - value: AccountAddress, - ) -> Result { - write!(self.output, "\n[{}] {value}: signer", self.depth).unwrap(); - Ok(V::Signer(value)) - } - - fn visit_vector( - &mut self, - driver: &mut VecDriver<'_, 'b, 'l>, - ) -> Result { - let layout = driver.element_layout(); - write!(self.output, "\n[{}] vector<{layout:#}>", self.depth).unwrap(); - - let mut elems = vec![]; - let mut elem_visitor = Self { - depth: self.depth + 1, - output: std::mem::take(&mut self.output), - }; - - while let Some(elem) = driver.next_element(&mut elem_visitor)? { - elems.push(elem) - } - - self.output = elem_visitor.output; - Ok(V::Vector(elems)) - } - - fn visit_struct( - &mut self, - driver: &mut StructDriver<'_, 'b, 'l>, - ) -> Result { - let layout = driver.struct_layout(); - write!(self.output, "\n[{}] {layout:#}", self.depth).unwrap(); - - let mut fields = vec![]; - let mut field_visitor = Self { - depth: self.depth + 1, - output: std::mem::take(&mut self.output), - }; - - while let Some((field, value)) = driver.next_field(&mut field_visitor)? { - fields.push((field.name.clone(), value)); - } - - self.output = field_visitor.output; - let type_ = driver.struct_layout().type_.clone(); - Ok(V::Struct(MoveStruct { type_, fields })) - } - - fn visit_variant( - &mut self, - driver: &mut VariantDriver<'_, 'b, 'l>, - ) -> Result { - let layout = driver.enum_layout(); - write!(self.output, "\n[{}] {layout:#}", self.depth).unwrap(); - - let mut fields = vec![]; - let mut field_visitor = Self { - depth: self.depth + 1, - output: std::mem::take(&mut self.output), - }; - - while let Some((field, value)) = driver.next_field(&mut field_visitor)? { - fields.push((field.name.clone(), value)); - } - - self.output = field_visitor.output; - let type_ = driver.enum_layout().type_.clone(); - Ok(V::Variant(MoveVariant { - type_, - variant_name: driver.variant_name().to_owned(), - tag: driver.tag(), - fields, - })) - } - } - let type_layout = struct_layout_( "0x0::foo::Bar", vec![ @@ -1058,7 +1055,7 @@ fn byte_offset_test() { } /// Create a struct value for test purposes. -fn struct_value_(rep: &str, fields: Vec<(&str, MoveValue)>) -> MoveValue { +pub(crate) fn struct_value_(rep: &str, fields: Vec<(&str, MoveValue)>) -> MoveValue { let type_ = StructTag::from_str(rep).unwrap(); let fields = fields .into_iter() @@ -1069,7 +1066,7 @@ fn struct_value_(rep: &str, fields: Vec<(&str, MoveValue)>) -> MoveValue { } /// Create a struct layout for test purposes. -fn struct_layout_(rep: &str, fields: Vec<(&str, MoveTypeLayout)>) -> MoveTypeLayout { +pub(crate) fn struct_layout_(rep: &str, fields: Vec<(&str, MoveTypeLayout)>) -> MoveTypeLayout { let type_ = StructTag::from_str(rep).unwrap(); let fields = fields .into_iter() @@ -1083,7 +1080,12 @@ fn struct_layout_(rep: &str, fields: Vec<(&str, MoveTypeLayout)>) -> MoveTypeLay } /// Create a variant value for test purposes. -fn variant_value_(rep: &str, name: &str, tag: u16, fields: Vec<(&str, MoveValue)>) -> MoveValue { +pub(crate) fn variant_value_( + rep: &str, + name: &str, + tag: u16, + fields: Vec<(&str, MoveValue)>, +) -> MoveValue { let type_ = StructTag::from_str(rep).unwrap(); let fields = fields .into_iter() @@ -1099,7 +1101,10 @@ fn variant_value_(rep: &str, name: &str, tag: u16, fields: Vec<(&str, MoveValue) } /// Create an enum layout for test purposes. -fn enum_layout_(rep: &str, variants: Vec<(&str, Vec<(&str, MoveTypeLayout)>)>) -> MoveTypeLayout { +pub(crate) fn enum_layout_( + rep: &str, + variants: Vec<(&str, Vec<(&str, MoveTypeLayout)>)>, +) -> MoveTypeLayout { let type_ = StructTag::from_str(rep).unwrap(); let variants = variants .into_iter() @@ -1117,6 +1122,6 @@ fn enum_layout_(rep: &str, variants: Vec<(&str, Vec<(&str, MoveTypeLayout)>)>) - } /// BCS encode Move value. -fn serialize(value: MoveValue) -> Vec { +pub(crate) fn serialize(value: MoveValue) -> Vec { value.clone().undecorate().simple_serialize().unwrap() } diff --git a/external-crates/move/crates/move-model/src/lib.rs b/external-crates/move/crates/move-model/src/lib.rs index a81e9e5557e42..58b9b7cb5888a 100644 --- a/external-crates/move/crates/move-model/src/lib.rs +++ b/external-crates/move/crates/move-model/src/lib.rs @@ -23,7 +23,7 @@ use move_binary_format::file_format::{ use move_compiler::{ self, compiled_unit::{self, AnnotatedCompiledUnit}, - diagnostics::{Diagnostics, WarningFilters}, + diagnostics::{warning_filters::WarningFilters, Diagnostics}, expansion::ast::{self as E, ModuleIdent, ModuleIdent_, TargetKind}, parser::ast as P, shared::{parse_named_address, unique_map::UniqueMap, NumericalAddress, PackagePaths}, diff --git a/external-crates/move/crates/move-model/src/model.rs b/external-crates/move/crates/move-model/src/model.rs index f80c4c15b9ba1..40d0391f2a456 100644 --- a/external-crates/move/crates/move-model/src/model.rs +++ b/external-crates/move/crates/move-model/src/model.rs @@ -47,7 +47,8 @@ use move_binary_format::{ CompiledModule, }; use move_bytecode_source_map::{mapping::SourceMapping, source_map::SourceMap}; -use move_command_line_common::{address::NumericalAddress, files::FileHash}; +use move_command_line_common::files::FileHash; +use move_core_types::parsing::address::NumericalAddress; use move_core_types::{ account_address::AccountAddress, identifier::{IdentStr, Identifier}, diff --git a/external-crates/move/crates/move-model/tests/testsuite.rs b/external-crates/move/crates/move-model/tests/testsuite.rs index bf66d7215179a..965080f519ca1 100644 --- a/external-crates/move/crates/move-model/tests/testsuite.rs +++ b/external-crates/move/crates/move-model/tests/testsuite.rs @@ -5,7 +5,7 @@ use codespan_reporting::{diagnostic::Severity, term::termcolor::Buffer}; use move_binary_format::file_format::{FunctionDefinitionIndex, StructDefinitionIndex}; use move_command_line_common::testing::EXP_EXT; -use move_compiler::{diagnostics::WarningFilters, shared::PackagePaths}; +use move_compiler::{diagnostics::warning_filters::WarningFilters, shared::PackagePaths}; use move_model::{run_bytecode_model_builder, run_model_builder}; use move_prover_test_utils::baseline_test::verify_or_update_baseline; use std::path::Path; diff --git a/external-crates/move/crates/move-package/src/resolution/resolution_graph.rs b/external-crates/move/crates/move-package/src/resolution/resolution_graph.rs index d6aa0ed8b2f4f..1d45ef772f0e2 100644 --- a/external-crates/move/crates/move-package/src/resolution/resolution_graph.rs +++ b/external-crates/move/crates/move-package/src/resolution/resolution_graph.rs @@ -8,7 +8,7 @@ use move_command_line_common::files::{ }; use move_compiler::command_line::DEFAULT_OUTPUT_DIR; use move_compiler::editions::Edition; -use move_compiler::{diagnostics::WarningFilters, shared::PackageConfig}; +use move_compiler::{diagnostics::warning_filters::WarningFilters, shared::PackageConfig}; use move_core_types::account_address::AccountAddress; use move_symbol_pool::Symbol; use std::fs::File; diff --git a/external-crates/move/crates/move-stackless-bytecode/tests/testsuite.rs b/external-crates/move/crates/move-stackless-bytecode/tests/testsuite.rs index a992a3726479c..9465f65b7f5ef 100644 --- a/external-crates/move/crates/move-stackless-bytecode/tests/testsuite.rs +++ b/external-crates/move/crates/move-stackless-bytecode/tests/testsuite.rs @@ -5,7 +5,7 @@ use anyhow::anyhow; use codespan_reporting::{diagnostic::Severity, term::termcolor::Buffer}; use move_command_line_common::testing::EXP_EXT; -use move_compiler::{diagnostics::WarningFilters, shared::PackagePaths}; +use move_compiler::{diagnostics::warning_filters::WarningFilters, shared::PackagePaths}; use move_model::{model::GlobalEnv, options::ModelBuilderOptions, run_model_builder_with_options}; use move_prover_test_utils::{baseline_test::verify_or_update_baseline, extract_test_directives}; use move_stackless_bytecode::{ diff --git a/external-crates/move/crates/move-stdlib/src/lib.rs b/external-crates/move/crates/move-stdlib/src/lib.rs index 5d714e3d3acd1..a823dca9fefe4 100644 --- a/external-crates/move/crates/move-stdlib/src/lib.rs +++ b/external-crates/move/crates/move-stdlib/src/lib.rs @@ -3,10 +3,8 @@ // SPDX-License-Identifier: Apache-2.0 use log::LevelFilter; -use move_command_line_common::{ - address::NumericalAddress, - files::{extension_equals, find_filenames, MOVE_EXTENSION}, -}; +use move_command_line_common::files::{extension_equals, find_filenames, MOVE_EXTENSION}; +use move_core_types::parsing::address::NumericalAddress; use std::{collections::BTreeMap, path::PathBuf}; #[cfg(test)] diff --git a/external-crates/move/crates/move-transactional-test-runner/src/framework.rs b/external-crates/move/crates/move-transactional-test-runner/src/framework.rs index 1294d1760b6a9..631870c045224 100644 --- a/external-crates/move/crates/move-transactional-test-runner/src/framework.rs +++ b/external-crates/move/crates/move-transactional-test-runner/src/framework.rs @@ -14,20 +14,22 @@ use clap::Parser; use move_binary_format::file_format::CompiledModule; use move_bytecode_source_map::{mapping::SourceMapping, source_map::SourceMap}; use move_command_line_common::{ - address::ParsedAddress, env::read_bool_env_var, files::{MOVE_EXTENSION, MOVE_IR_EXTENSION}, testing::{add_update_baseline_fix, format_diff, read_env_update_baseline, EXP_EXT}, - types::ParsedType, - values::{ParsableValue, ParsedValue}, }; use move_compiler::{ compiled_unit::AnnotatedCompiledUnit, - diagnostics::{Diagnostics, WarningFilters}, + diagnostics::{warning_filters::WarningFilters, Diagnostics}, editions::{Edition, Flavor}, shared::{files::MappedFiles, NumericalAddress, PackageConfig}, FullyCompiledProgram, }; +use move_core_types::parsing::{ + address::ParsedAddress, + types::ParsedType, + values::{ParsableValue, ParsedValue}, +}; use move_core_types::{ account_address::AccountAddress, identifier::{IdentStr, Identifier}, diff --git a/external-crates/move/crates/move-transactional-test-runner/src/tasks.rs b/external-crates/move/crates/move-transactional-test-runner/src/tasks.rs index c1e62c544f60b..f5247de2c86a7 100644 --- a/external-crates/move/crates/move-transactional-test-runner/src/tasks.rs +++ b/external-crates/move/crates/move-transactional-test-runner/src/tasks.rs @@ -6,14 +6,14 @@ use anyhow::{anyhow, bail, Result}; use clap::*; -use move_command_line_common::{ +use move_command_line_common::files::{MOVE_EXTENSION, MOVE_IR_EXTENSION}; +use move_compiler::shared::NumericalAddress; +use move_core_types::identifier::Identifier; +use move_core_types::parsing::{ address::ParsedAddress, - files::{MOVE_EXTENSION, MOVE_IR_EXTENSION}, types::ParsedType, values::{ParsableValue, ParsedValue}, }; -use move_compiler::shared::NumericalAddress; -use move_core_types::identifier::Identifier; use std::{convert::TryInto, fmt::Debug, path::Path, str::FromStr}; use tempfile::NamedTempFile; diff --git a/external-crates/move/crates/move-transactional-test-runner/src/vm_test_harness.rs b/external-crates/move/crates/move-transactional-test-runner/src/vm_test_harness.rs index f04769afce091..05b3b5d281926 100644 --- a/external-crates/move/crates/move-transactional-test-runner/src/vm_test_harness.rs +++ b/external-crates/move/crates/move-transactional-test-runner/src/vm_test_harness.rs @@ -15,10 +15,9 @@ use move_binary_format::{ errors::{Location, VMError, VMResult}, CompiledModule, }; -use move_command_line_common::{ - address::ParsedAddress, files::verify_and_create_named_address_mapping, -}; +use move_command_line_common::files::verify_and_create_named_address_mapping; use move_compiler::{editions::Edition, shared::PackagePaths, FullyCompiledProgram}; +use move_core_types::parsing::address::ParsedAddress; use move_core_types::{ account_address::AccountAddress, identifier::IdentStr, diff --git a/external-crates/move/crates/move-unit-test/Cargo.toml b/external-crates/move/crates/move-unit-test/Cargo.toml index d6a3d543ffcf5..e485019a2083a 100644 --- a/external-crates/move/crates/move-unit-test/Cargo.toml +++ b/external-crates/move/crates/move-unit-test/Cargo.toml @@ -51,4 +51,4 @@ name = "move_unit_test_testsuite" harness = false [features] -gas-profiler = [] +tracing = [] diff --git a/external-crates/move/crates/move-unit-test/src/lib.rs b/external-crates/move/crates/move-unit-test/src/lib.rs index 00d42c0219885..496782ee916b9 100644 --- a/external-crates/move/crates/move-unit-test/src/lib.rs +++ b/external-crates/move/crates/move-unit-test/src/lib.rs @@ -187,7 +187,7 @@ impl UnitTestingConfig { let (_, compiler) = diagnostics::unwrap_or_report_pass_diagnostics(&files, comments_and_compiler_res); - let (mut compiler, cfgir) = compiler.into_ast(); + let (compiler, cfgir) = compiler.into_ast(); let compilation_env = compiler.compilation_env(); let test_plan = unit_test::plan_builder::construct_test_plan(compilation_env, None, &cfgir); let mapped_files = compilation_env.mapped_files().clone(); diff --git a/external-crates/move/crates/move-unit-test/src/test_runner.rs b/external-crates/move/crates/move-unit-test/src/test_runner.rs index 42310d40cece8..1dae13f18203f 100644 --- a/external-crates/move/crates/move-unit-test/src/test_runner.rs +++ b/external-crates/move/crates/move-unit-test/src/test_runner.rs @@ -262,7 +262,7 @@ impl SharedTestingConfig { let mut session = move_vm.new_session_with_extensions(&self.starting_storage_state, extensions); let mut gas_meter = GasStatus::new(&self.cost_table, Gas::new(self.execution_bound)); - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; use move_vm_types::gas::GasMeter; gas_meter.set_profiler(GasProfiler::init_default_cfg( diff --git a/external-crates/move/crates/move-vm-config/Cargo.toml b/external-crates/move/crates/move-vm-config/Cargo.toml index e0447de95b2c4..50eb97a427134 100644 --- a/external-crates/move/crates/move-vm-config/Cargo.toml +++ b/external-crates/move/crates/move-vm-config/Cargo.toml @@ -12,4 +12,4 @@ move-binary-format.workspace = true once_cell.workspace = true [features] -gas-profiler = [] +tracing = [] diff --git a/external-crates/move/crates/move-vm-config/src/runtime.rs b/external-crates/move/crates/move-vm-config/src/runtime.rs index a3c763e760ed3..6743c7f9d298a 100644 --- a/external-crates/move/crates/move-vm-config/src/runtime.rs +++ b/external-crates/move/crates/move-vm-config/src/runtime.rs @@ -4,13 +4,13 @@ use crate::verifier::{VerifierConfig, DEFAULT_MAX_CONSTANT_VECTOR_LEN}; use move_binary_format::binary_config::BinaryConfig; use move_binary_format::file_format_common::VERSION_MAX; -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] use once_cell::sync::Lazy; -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] const MOVE_VM_PROFILER_ENV_VAR_NAME: &str = "MOVE_VM_PROFILE"; -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] static PROFILER_ENABLED: Lazy = Lazy::new(|| std::env::var(MOVE_VM_PROFILER_ENV_VAR_NAME).is_ok()); @@ -88,7 +88,7 @@ pub struct VMProfilerConfig { pub use_long_function_name: bool, } -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] impl std::default::Default for VMProfilerConfig { fn default() -> Self { Self { @@ -99,7 +99,7 @@ impl std::default::Default for VMProfilerConfig { } } -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] impl VMProfilerConfig { pub fn get_default_config_if_enabled() -> Option { if *PROFILER_ENABLED { diff --git a/external-crates/move/crates/move-vm-integration-tests/Cargo.toml b/external-crates/move/crates/move-vm-integration-tests/Cargo.toml index 2a74255a839f7..37a6ebfd42bbc 100644 --- a/external-crates/move/crates/move-vm-integration-tests/Cargo.toml +++ b/external-crates/move/crates/move-vm-integration-tests/Cargo.toml @@ -13,7 +13,6 @@ edition = "2021" [dependencies] anyhow.workspace = true -expect-test = "1.4.0" fail = { workspace = true, features = ["failpoints"] } tempfile.workspace = true memory-stats = "1.0.0" @@ -33,11 +32,11 @@ move-ir-to-bytecode.workspace = true [features] default = [] -gas-profiler = [ - "move-vm-config/gas-profiler", - "move-vm-runtime/gas-profiler", - "move-vm-profiler/gas-profiler", - "move-vm-test-utils/gas-profiler", +tracing = [ + "move-vm-config/tracing", + "move-vm-runtime/tracing", + "move-vm-profiler/tracing", + "move-vm-test-utils/tracing", ] [[bin]] diff --git a/external-crates/move/crates/move-vm-integration-tests/src/tests/instantiation_tests.rs b/external-crates/move/crates/move-vm-integration-tests/src/tests/instantiation_tests.rs index 29974b219d5f9..8892eef2a70b5 100644 --- a/external-crates/move/crates/move-vm-integration-tests/src/tests/instantiation_tests.rs +++ b/external-crates/move/crates/move-vm-integration-tests/src/tests/instantiation_tests.rs @@ -23,7 +23,7 @@ use move_core_types::{ language_storage::{ModuleId, StructTag, TypeTag}, vm_status::StatusCode, }; -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] use move_vm_profiler::GasProfiler; use move_vm_runtime::{ move_vm::MoveVM, @@ -33,7 +33,7 @@ use move_vm_test_utils::{ gas_schedule::{Gas, GasStatus, INITIAL_COST_SCHEDULE}, InMemoryStorage, }; -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] use move_vm_types::gas::GasMeter; use std::time::Instant; @@ -555,7 +555,7 @@ fn run_with_module( .into_iter() .map(|tag| session.load_type(&tag)) .collect::>>(); - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { gas.set_profiler(GasProfiler::init( &session.vm_config().profiler_config, entry_name.to_string(), diff --git a/external-crates/move/crates/move-vm-profiler/Cargo.toml b/external-crates/move/crates/move-vm-profiler/Cargo.toml index 89bdd1396c016..05cf0baadc301 100644 --- a/external-crates/move/crates/move-vm-profiler/Cargo.toml +++ b/external-crates/move/crates/move-vm-profiler/Cargo.toml @@ -15,4 +15,4 @@ tracing.workspace = true move-vm-config.workspace = true [features] -gas-profiler = ["move-vm-config/gas-profiler"] +tracing = ["move-vm-config/tracing"] diff --git a/external-crates/move/crates/move-vm-profiler/src/lib.rs b/external-crates/move/crates/move-vm-profiler/src/lib.rs index 3312383e1b052..3479e1d059223 100644 --- a/external-crates/move/crates/move-vm-profiler/src/lib.rs +++ b/external-crates/move/crates/move-vm-profiler/src/lib.rs @@ -4,7 +4,7 @@ use move_vm_config::runtime::VMProfilerConfig; use serde::Serialize; use std::collections::BTreeMap; -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] use tracing::info; #[derive(Debug, Clone, Serialize)] @@ -62,7 +62,7 @@ pub struct GasProfiler { finished: bool, } -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] impl GasProfiler { // Used by profiler viz tool const OPEN_FRAME_IDENT: &'static str = "O"; @@ -70,7 +70,7 @@ impl GasProfiler { const TOP_LEVEL_FRAME_NAME: &'static str = "root"; - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] pub fn init(config: &Option, name: String, start_gas: u64) -> Self { let mut prof = GasProfiler { exporter: "speedscope@1.15.2".to_string(), @@ -101,7 +101,7 @@ impl GasProfiler { prof } - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] pub fn init_default_cfg(name: String, start_gas: u64) -> Self { Self::init( &VMProfilerConfig::get_default_config_if_enabled(), @@ -110,22 +110,22 @@ impl GasProfiler { ) } - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] pub fn short_name(s: &String) -> String { s.split("::").last().unwrap_or(s).to_string() } - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] fn is_metered(&self) -> bool { (self.profiles[0].end_value != 0) && (self.start_gas != 0) } - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] fn start_gas(&self) -> u64 { self.start_gas } - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] fn add_frame( &mut self, frame_name: String, @@ -146,7 +146,7 @@ impl GasProfiler { } } - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] pub fn open_frame(&mut self, frame_name: String, metadata: String, gas_start: u64) { if self.config.is_none() || self.start_gas == 0 { return; @@ -162,7 +162,7 @@ impl GasProfiler { }); } - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] pub fn close_frame(&mut self, frame_name: String, metadata: String, gas_end: u64) { if self.config.is_none() || self.start_gas == 0 { return; @@ -178,7 +178,7 @@ impl GasProfiler { self.profiles[0].end_value = start - gas_end; } - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] pub fn to_file(&self) { use std::ffi::{OsStr, OsString}; use std::fs::File; @@ -218,7 +218,7 @@ impl GasProfiler { info!("Gas profile written to file: {}", p.display()); } - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] pub fn finish(&mut self) { if self.finished { return; @@ -231,7 +231,7 @@ impl GasProfiler { } } -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] impl Drop for GasProfiler { fn drop(&mut self) { self.finish(); @@ -241,7 +241,7 @@ impl Drop for GasProfiler { #[macro_export] macro_rules! profile_open_frame { ($gas_meter:expr, $frame_name:expr) => { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] { let gas_rem = $gas_meter.remaining_gas().into(); move_vm_profiler::profile_open_frame_impl!( @@ -256,7 +256,7 @@ macro_rules! profile_open_frame { #[macro_export] macro_rules! profile_open_frame_impl { ($profiler:expr, $frame_name:expr, $gas_rem:expr) => { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] { if let Some(profiler) = $profiler { if let Some(config) = &profiler.config { @@ -275,7 +275,7 @@ macro_rules! profile_open_frame_impl { #[macro_export] macro_rules! profile_close_frame { ($gas_meter:expr, $frame_name:expr) => { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] { let gas_rem = $gas_meter.remaining_gas().into(); move_vm_profiler::profile_close_frame_impl!( @@ -290,7 +290,7 @@ macro_rules! profile_close_frame { #[macro_export] macro_rules! profile_close_frame_impl { ($profiler:expr, $frame_name:expr, $gas_rem:expr) => { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] { if let Some(profiler) = $profiler { if let Some(config) = &profiler.config { @@ -309,7 +309,7 @@ macro_rules! profile_close_frame_impl { #[macro_export] macro_rules! profile_open_instr { ($gas_meter:expr, $frame_name:expr) => { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] { let gas_rem = $gas_meter.remaining_gas().into(); if let Some(profiler) = $gas_meter.get_profiler_mut() { @@ -326,7 +326,7 @@ macro_rules! profile_open_instr { #[macro_export] macro_rules! profile_close_instr { ($gas_meter:expr, $frame_name:expr) => { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] { let gas_rem = $gas_meter.remaining_gas().into(); if let Some(profiler) = $gas_meter.get_profiler_mut() { @@ -343,39 +343,39 @@ macro_rules! profile_close_instr { #[macro_export] macro_rules! profile_dump_file { ($profiler:expr) => { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] $profiler.to_file() }; } -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] #[macro_export] -macro_rules! gas_profiler_feature_enabled { +macro_rules! tracing_feature_enabled { ($($tt:tt)*) => { - if cfg!(feature = "gas-profiler") { + if cfg!(feature = "tracing") { $($tt)* } }; } -#[cfg(not(feature = "gas-profiler"))] +#[cfg(not(feature = "tracing"))] #[macro_export] -macro_rules! gas_profiler_feature_enabled { +macro_rules! tracing_feature_enabled { ( $( $tt:tt )* ) => {}; } -#[cfg(not(feature = "gas-profiler"))] +#[cfg(not(feature = "tracing"))] #[macro_export] -macro_rules! gas_profiler_feature_disabled { +macro_rules! tracing_feature_disabled { ($($tt:tt)*) => { - if !cfg!(feature = "gas-profiler") { + if !cfg!(feature = "tracing") { $($tt)* } }; } -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] #[macro_export] -macro_rules! gas_profiler_feature_disabled { +macro_rules! tracing_feature_disabled { ( $( $tt:tt )* ) => {}; } diff --git a/external-crates/move/crates/move-vm-runtime/Cargo.toml b/external-crates/move/crates/move-vm-runtime/Cargo.toml index 17e49de4c0a46..3089a93ce3d6d 100644 --- a/external-crates/move/crates/move-vm-runtime/Cargo.toml +++ b/external-crates/move/crates/move-vm-runtime/Cargo.toml @@ -39,11 +39,9 @@ move-compiler.workspace = true default = [] fuzzing = ["move-vm-types/fuzzing"] failpoints = ["fail/failpoints"] -# Enable tracing and debugging also for release builds. By default, it is only enabled for debug builds. -debugging = [] testing = [] lazy_natives = [] -gas-profiler = [ - "move-vm-config/gas-profiler", - "move-vm-profiler/gas-profiler", +tracing = [ + "move-vm-config/tracing", + "move-vm-profiler/tracing", ] diff --git a/external-crates/move/crates/move-vm-runtime/src/lib.rs b/external-crates/move/crates/move-vm-runtime/src/lib.rs index c69f0d150582d..c982a51ce1209 100644 --- a/external-crates/move/crates/move-vm-runtime/src/lib.rs +++ b/external-crates/move/crates/move-vm-runtime/src/lib.rs @@ -24,7 +24,7 @@ mod tracing; mod tracing2; // Only include debugging functionality in debug builds -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] mod debug; #[cfg(test)] diff --git a/external-crates/move/crates/move-vm-runtime/src/loader.rs b/external-crates/move/crates/move-vm-runtime/src/loader.rs index bffc555dbbaba..6f843fd75aec6 100644 --- a/external-crates/move/crates/move-vm-runtime/src/loader.rs +++ b/external-crates/move/crates/move-vm-runtime/src/loader.rs @@ -2207,7 +2207,7 @@ impl Function { ) } - #[cfg(any(debug_assertions, feature = "debugging"))] + #[cfg(any(debug_assertions, feature = "tracing"))] pub(crate) fn pretty_short_string(&self) -> String { let id = &self.module; format!( diff --git a/external-crates/move/crates/move-vm-runtime/src/runtime.rs b/external-crates/move/crates/move-vm-runtime/src/runtime.rs index b3656f597a3f2..6801254fbe246 100644 --- a/external-crates/move/crates/move-vm-runtime/src/runtime.rs +++ b/external-crates/move/crates/move-vm-runtime/src/runtime.rs @@ -498,7 +498,7 @@ impl VMRuntime { gas_meter: &mut impl GasMeter, extensions: &mut NativeContextExtensions, ) -> VMResult { - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; if gas_meter.get_profiler_mut().is_none() { gas_meter.set_profiler(GasProfiler::init_default_cfg( diff --git a/external-crates/move/crates/move-vm-runtime/src/session.rs b/external-crates/move/crates/move-vm-runtime/src/session.rs index b20694120a5b2..7dc5549a4f062 100644 --- a/external-crates/move/crates/move-vm-runtime/src/session.rs +++ b/external-crates/move/crates/move-vm-runtime/src/session.rs @@ -104,7 +104,7 @@ impl<'r, 'l, S: MoveResolver> Session<'r, 'l, S> { args: Vec>, gas_meter: &mut impl GasMeter, ) -> VMResult { - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; if gas_meter.get_profiler_mut().is_none() { gas_meter.set_profiler(GasProfiler::init_default_cfg( @@ -137,7 +137,7 @@ impl<'r, 'l, S: MoveResolver> Session<'r, 'l, S> { gas_meter: &mut impl GasMeter, tracer: Option<&mut MoveTraceBuilder>, ) -> VMResult { - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; if gas_meter.get_profiler_mut().is_none() { gas_meter.set_profiler(GasProfiler::init_default_cfg( @@ -147,7 +147,7 @@ impl<'r, 'l, S: MoveResolver> Session<'r, 'l, S> { } } - let tracer = if cfg!(feature = "gas-profiler") { + let tracer = if cfg!(feature = "tracing") { tracer } else { None diff --git a/external-crates/move/crates/move-vm-runtime/src/tracing.rs b/external-crates/move/crates/move-vm-runtime/src/tracing.rs index a4c984c62a537..d2a9662f12b6f 100644 --- a/external-crates/move/crates/move-vm-runtime/src/tracing.rs +++ b/external-crates/move/crates/move-vm-runtime/src/tracing.rs @@ -2,10 +2,10 @@ // Copyright (c) The Move Contributors // SPDX-License-Identifier: Apache-2.0 -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] use crate::debug::DebugContext; -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] use ::{ move_binary_format::file_format::Bytecode, move_vm_types::values::Locals, @@ -20,31 +20,31 @@ use ::{ }, }; -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] use crate::{ interpreter::Interpreter, loader::{Function, Loader}, }; -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] const MOVE_VM_TRACING_ENV_VAR_NAME: &str = "MOVE_VM_TRACE"; -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] const MOVE_VM_STEPPING_ENV_VAR_NAME: &str = "MOVE_VM_STEP"; -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] static FILE_PATH: Lazy = Lazy::new(|| { env::var(MOVE_VM_TRACING_ENV_VAR_NAME).unwrap_or_else(|_| "move_vm_trace.trace".to_string()) }); -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] static TRACING_ENABLED: Lazy = Lazy::new(|| env::var(MOVE_VM_TRACING_ENV_VAR_NAME).is_ok()); -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] static DEBUGGING_ENABLED: Lazy = Lazy::new(|| env::var(MOVE_VM_STEPPING_ENV_VAR_NAME).is_ok()); -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] static LOGGING_FILE: Lazy> = Lazy::new(|| { Mutex::new( OpenOptions::new() @@ -55,11 +55,11 @@ static LOGGING_FILE: Lazy> = Lazy::new(|| { ) }); -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] static DEBUG_CONTEXT: Lazy> = Lazy::new(|| Mutex::new(DebugContext::new())); // Only include in debug builds -#[cfg(any(debug_assertions, feature = "debugging"))] +#[cfg(any(debug_assertions, feature = "tracing"))] pub(crate) fn trace( function_desc: &Function, locals: &Locals, @@ -93,7 +93,7 @@ pub(crate) fn trace( macro_rules! trace { ($function_desc:expr, $locals:expr, $pc:expr, $instr:tt, $resolver:expr, $interp:expr) => { // Only include this code in debug releases - #[cfg(any(debug_assertions, feature = "debugging"))] + #[cfg(any(debug_assertions, feature = "tracing"))] $crate::tracing::trace( &$function_desc, $locals, diff --git a/external-crates/move/crates/move-vm-runtime/src/tracing2/mod.rs b/external-crates/move/crates/move-vm-runtime/src/tracing2/mod.rs index 5a29145c8ba4f..fe06f916abf2e 100644 --- a/external-crates/move/crates/move-vm-runtime/src/tracing2/mod.rs +++ b/external-crates/move/crates/move-vm-runtime/src/tracing2/mod.rs @@ -1,9 +1,9 @@ pub(crate) mod tracer; -#[cfg(feature = "gas-profiler")] +#[cfg(feature = "tracing")] pub(crate) const TRACING_ENABLED: bool = true; -#[cfg(not(feature = "gas-profiler"))] +#[cfg(not(feature = "tracing"))] pub(crate) const TRACING_ENABLED: bool = false; #[macro_export] diff --git a/external-crates/move/crates/move-vm-test-utils/Cargo.toml b/external-crates/move/crates/move-vm-test-utils/Cargo.toml index a354e6dcc26ad..6b90a1df11b39 100644 --- a/external-crates/move/crates/move-vm-test-utils/Cargo.toml +++ b/external-crates/move/crates/move-vm-test-utils/Cargo.toml @@ -24,4 +24,4 @@ move-vm-profiler.workspace = true [features] default = [ ] tiered-gas = [] -gas-profiler = [] +tracing = [] diff --git a/external-crates/move/move-execution/v0/crates/move-vm-runtime/Cargo.toml b/external-crates/move/move-execution/v0/crates/move-vm-runtime/Cargo.toml index 4ef1818e16839..e47f97d101eef 100644 --- a/external-crates/move/move-execution/v0/crates/move-vm-runtime/Cargo.toml +++ b/external-crates/move/move-execution/v0/crates/move-vm-runtime/Cargo.toml @@ -41,7 +41,7 @@ failpoints = ["fail/failpoints"] debugging = [] testing = [] lazy_natives = [] -gas-profiler = [ - "move-vm-config/gas-profiler", - "move-vm-profiler/gas-profiler", +tracing = [ + "move-vm-config/tracing", + "move-vm-profiler/tracing", ] diff --git a/external-crates/move/move-execution/v0/crates/move-vm-runtime/src/interpreter.rs b/external-crates/move/move-execution/v0/crates/move-vm-runtime/src/interpreter.rs index 077ee45c803cd..86bc799896c51 100644 --- a/external-crates/move/move-execution/v0/crates/move-vm-runtime/src/interpreter.rs +++ b/external-crates/move/move-execution/v0/crates/move-vm-runtime/src/interpreter.rs @@ -20,8 +20,6 @@ use move_core_types::{ vm_status::{StatusCode, StatusType}, }; use move_vm_config::runtime::VMRuntimeLimitsConfig; -#[cfg(feature = "gas-profiler")] -use move_vm_profiler::GasProfiler; use move_vm_profiler::{ profile_close_frame, profile_close_instr, profile_open_frame, profile_open_instr, }; @@ -213,7 +211,7 @@ impl Interpreter { } ExitCode::Call(fh_idx) => { let func = resolver.function_from_handle(fh_idx); - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] let func_name = func.pretty_string(); profile_open_frame!(gas_meter, func_name.clone()); @@ -255,7 +253,7 @@ impl Interpreter { .instantiate_generic_function(idx, current_frame.ty_args()) .map_err(|e| set_err_info!(current_frame, e))?; let func = resolver.function_from_instantiation(idx); - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] let func_name = func.pretty_string(); profile_open_frame!(gas_meter, func_name.clone()); diff --git a/external-crates/move/move-execution/v1/crates/move-vm-runtime/Cargo.toml b/external-crates/move/move-execution/v1/crates/move-vm-runtime/Cargo.toml index 5fa8584679fb3..683e7c3229244 100644 --- a/external-crates/move/move-execution/v1/crates/move-vm-runtime/Cargo.toml +++ b/external-crates/move/move-execution/v1/crates/move-vm-runtime/Cargo.toml @@ -41,7 +41,7 @@ failpoints = ["fail/failpoints"] debugging = [] testing = [] lazy_natives = [] -gas-profiler = [ - "move-vm-config/gas-profiler", - "move-vm-profiler/gas-profiler", +tracing = [ + "move-vm-config/tracing", + "move-vm-profiler/tracing", ] diff --git a/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/interpreter.rs b/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/interpreter.rs index 0317a9216e4ff..6a6a4a263c347 100644 --- a/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/interpreter.rs +++ b/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/interpreter.rs @@ -20,8 +20,6 @@ use move_core_types::{ vm_status::{StatusCode, StatusType}, }; use move_vm_config::runtime::VMRuntimeLimitsConfig; -#[cfg(feature = "gas-profiler")] -use move_vm_profiler::GasProfiler; use move_vm_profiler::{ profile_close_frame, profile_close_instr, profile_open_frame, profile_open_instr, }; @@ -273,7 +271,7 @@ impl Interpreter { } ExitCode::Call(fh_idx) => { let func = resolver.function_from_handle(fh_idx); - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] let func_name = func.pretty_string(); profile_open_frame!(gas_meter, func_name.clone()); @@ -314,7 +312,7 @@ impl Interpreter { .instantiate_generic_function(idx, current_frame.ty_args()) .map_err(|e| set_err_info!(current_frame, e))?; let func = resolver.function_from_instantiation(idx); - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] let func_name = func.pretty_string(); profile_open_frame!(gas_meter, func_name.clone()); diff --git a/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/runtime.rs b/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/runtime.rs index 172628e30d6ae..2d4ff01b7308b 100644 --- a/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/runtime.rs +++ b/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/runtime.rs @@ -493,7 +493,7 @@ impl VMRuntime { gas_meter: &mut impl GasMeter, extensions: &mut NativeContextExtensions, ) -> VMResult { - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; if gas_meter.get_profiler_mut().is_none() { gas_meter.set_profiler(GasProfiler::init_default_cfg( diff --git a/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/session.rs b/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/session.rs index c2a8dbf905dd5..37b2a49e41d0e 100644 --- a/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/session.rs +++ b/external-crates/move/move-execution/v1/crates/move-vm-runtime/src/session.rs @@ -102,7 +102,7 @@ impl<'r, 'l, S: MoveResolver> Session<'r, 'l, S> { args: Vec>, gas_meter: &mut impl GasMeter, ) -> VMResult { - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; if gas_meter.get_profiler_mut().is_none() { gas_meter.set_profiler(GasProfiler::init_default_cfg( diff --git a/external-crates/move/move-execution/v2/crates/move-vm-runtime/Cargo.toml b/external-crates/move/move-execution/v2/crates/move-vm-runtime/Cargo.toml index b7c15e1bba673..cf4a79153a2da 100644 --- a/external-crates/move/move-execution/v2/crates/move-vm-runtime/Cargo.toml +++ b/external-crates/move/move-execution/v2/crates/move-vm-runtime/Cargo.toml @@ -41,7 +41,7 @@ failpoints = ["fail/failpoints"] debugging = [] testing = [] lazy_natives = [] -gas-profiler = [ - "move-vm-config/gas-profiler", - "move-vm-profiler/gas-profiler", +tracing = [ + "move-vm-config/tracing", + "move-vm-profiler/tracing", ] diff --git a/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/interpreter.rs b/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/interpreter.rs index 29cc3dc4e9bad..e116037ba8b64 100644 --- a/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/interpreter.rs +++ b/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/interpreter.rs @@ -21,8 +21,6 @@ use move_core_types::{ vm_status::{StatusCode, StatusType}, }; use move_vm_config::runtime::VMRuntimeLimitsConfig; -#[cfg(feature = "gas-profiler")] -use move_vm_profiler::GasProfiler; use move_vm_profiler::{ profile_close_frame, profile_close_instr, profile_open_frame, profile_open_instr, }; @@ -275,7 +273,7 @@ impl Interpreter { } ExitCode::Call(fh_idx) => { let func = resolver.function_from_handle(fh_idx); - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] let func_name = func.pretty_string(); profile_open_frame!(gas_meter, func_name.clone()); @@ -318,7 +316,7 @@ impl Interpreter { .instantiate_generic_function(idx, current_frame.ty_args()) .map_err(|e| set_err_info!(current_frame, e))?; let func = resolver.function_from_instantiation(idx); - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] let func_name = func.pretty_string(); profile_open_frame!(gas_meter, func_name.clone()); diff --git a/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/runtime.rs b/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/runtime.rs index db7df944ec7ce..c21d9cff9dcc1 100644 --- a/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/runtime.rs +++ b/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/runtime.rs @@ -483,7 +483,7 @@ impl VMRuntime { gas_meter: &mut impl GasMeter, extensions: &mut NativeContextExtensions, ) -> VMResult { - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; if gas_meter.get_profiler_mut().is_none() { gas_meter.set_profiler(GasProfiler::init_default_cfg( diff --git a/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/session.rs b/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/session.rs index c2a8dbf905dd5..37b2a49e41d0e 100644 --- a/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/session.rs +++ b/external-crates/move/move-execution/v2/crates/move-vm-runtime/src/session.rs @@ -102,7 +102,7 @@ impl<'r, 'l, S: MoveResolver> Session<'r, 'l, S> { args: Vec>, gas_meter: &mut impl GasMeter, ) -> VMResult { - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; if gas_meter.get_profiler_mut().is_none() { gas_meter.set_profiler(GasProfiler::init_default_cfg( diff --git a/external-crates/tests.sh b/external-crates/tests.sh index 7fa5a0531ff9a..5586c35becfea 100755 --- a/external-crates/tests.sh +++ b/external-crates/tests.sh @@ -4,4 +4,4 @@ cd move echo "Excluding prover Move tests" cargo nextest run -E '!package(move-prover) and !test(prove) and !test(run_all::simple_build_with_docs/args.txt) and !test(run_test::nested_deps_bad_parent/Move.toml)' --workspace --no-fail-fast echo "Running tracing-specific tests" -cargo nextest run -p move-cli --features gas-profiler +cargo nextest run -p move-cli --features tracing diff --git a/narwhal/executor/tests/consensus_integration_tests.rs b/narwhal/executor/tests/consensus_integration_tests.rs deleted file mode 100644 index 9bebf74c0e3c7..0000000000000 --- a/narwhal/executor/tests/consensus_integration_tests.rs +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 -use bytes::Bytes; -use fastcrypto::hash::Hash; -use narwhal_executor::get_restored_consensus_output; -use narwhal_executor::MockExecutionState; -use primary::consensus::{ - Bullshark, Consensus, ConsensusMetrics, ConsensusRound, LeaderSchedule, LeaderSwapTable, -}; -use primary::NUM_SHUTDOWN_RECEIVERS; -use prometheus::Registry; -use std::collections::BTreeSet; -use std::sync::Arc; -use storage::NodeStorage; -use telemetry_subscribers::TelemetryGuards; -use test_utils::latest_protocol_version; -use test_utils::{cluster::Cluster, temp_dir, CommitteeFixture}; -use tokio::sync::watch; - -use types::{Certificate, PreSubscribedBroadcastSender, Round, TransactionProto}; - -#[tokio::test] -async fn test_recovery() { - // Create storage - let storage = NodeStorage::reopen(temp_dir(), None); - - let consensus_store = storage.consensus_store; - let certificate_store = storage.certificate_store; - - // Setup consensus - let fixture = CommitteeFixture::builder().build(); - let committee = fixture.committee(); - - // Make certificates for rounds 1 and 2. - let ids: Vec<_> = fixture.authorities().map(|a| a.id()).collect(); - let genesis = Certificate::genesis(&latest_protocol_version(), &committee) - .iter() - .map(|x| x.digest()) - .collect::>(); - let (mut certificates, next_parents) = test_utils::make_optimal_certificates( - &committee, - &latest_protocol_version(), - 1..=2, - &genesis, - &ids, - ); - - // Make two certificate (f+1) with round 3 to trigger the commits. - let (_, certificate) = test_utils::mock_certificate( - &committee, - &latest_protocol_version(), - ids[0], - 3, - next_parents.clone(), - ); - certificates.push_back(certificate); - let (_, certificate) = test_utils::mock_certificate( - &committee, - &latest_protocol_version(), - ids[1], - 3, - next_parents, - ); - certificates.push_back(certificate); - - // Spawn the consensus engine and sink the primary channel. - let (tx_waiter, rx_waiter) = test_utils::test_channel!(1); - let (tx_primary, mut rx_primary) = test_utils::test_channel!(1); - let (tx_output, mut rx_output) = test_utils::test_channel!(1); - let (tx_consensus_round_updates, _rx_consensus_round_updates) = - watch::channel(ConsensusRound::default()); - - let mut tx_shutdown = PreSubscribedBroadcastSender::new(NUM_SHUTDOWN_RECEIVERS); - - const GC_DEPTH: Round = 50; - const NUM_SUB_DAGS_PER_SCHEDULE: u64 = 100; - let metrics = Arc::new(ConsensusMetrics::new(&Registry::new())); - let bullshark = Bullshark::new( - committee.clone(), - consensus_store.clone(), - latest_protocol_version(), - metrics.clone(), - NUM_SUB_DAGS_PER_SCHEDULE, - LeaderSchedule::new(committee.clone(), LeaderSwapTable::default()), - ); - - let _consensus_handle = Consensus::spawn( - committee, - GC_DEPTH, - consensus_store.clone(), - certificate_store.clone(), - tx_shutdown.subscribe(), - rx_waiter, - tx_primary, - tx_consensus_round_updates, - tx_output, - bullshark, - metrics, - ); - tokio::spawn(async move { while rx_primary.recv().await.is_some() {} }); - - // Feed all certificates to the consensus. Only the last certificate should trigger - // commits, so the task should not block. - while let Some(certificate) = certificates.pop_front() { - // we store the certificates so we can enable the recovery - // mechanism later. - certificate_store.write(certificate.clone()).unwrap(); - tx_waiter.send(certificate).await.unwrap(); - } - - // Ensure the first 4 ordered certificates are from round 1 (they are the parents of the committed - // leader); then the leader's certificate should be committed. - let consensus_index_counter = 4; - let num_of_committed_certificates = 5; - - let committed_sub_dag = rx_output.recv().await.unwrap(); - let mut sequence = committed_sub_dag.certificates.into_iter(); - for i in 1..=num_of_committed_certificates { - let output = sequence.next().unwrap(); - - if i < 5 { - assert_eq!(output.round(), 1); - } else { - assert_eq!(output.round(), 2); - } - } - - // Now assume that we want to recover from a crash. We are testing all the recovery cases - // from having executed no certificates at all (or certificate with index = 0), up to - // have executed the last committed certificate - for last_executed_certificate_index in 0..consensus_index_counter { - let mut execution_state = MockExecutionState::new(); - execution_state - .expect_last_executed_sub_dag_index() - .times(1) - .returning(|| 1); - - let consensus_output = get_restored_consensus_output( - consensus_store.clone(), - certificate_store.clone(), - &execution_state, - ) - .await - .unwrap(); - - // we expect to have recovered all the certificates from the last commit. The Sui executor engine - // will not execute twice the same certificate. - assert_eq!(consensus_output.len(), 1); - assert!( - consensus_output[0].len() - >= (num_of_committed_certificates - last_executed_certificate_index) as usize - ); - } -} - -#[tokio::test] -async fn test_internal_consensus_output() { - // Enabled debug tracing so we can easily observe the - // nodes logs. - let _guard = setup_tracing(); - - let mut cluster = Cluster::new(None); - - // start the cluster - cluster.start(Some(4), Some(1), None).await; - - // get a client to send transactions - let worker_id = 0; - - let authority = cluster.authority(0); - let mut client = authority.new_transactions_client(&worker_id).await; - - // Subscribe to the transaction confirmation channel - let mut receiver = authority - .primary() - .await - .tx_transaction_confirmation - .subscribe(); - - // Create arbitrary transactions - let mut transactions = Vec::new(); - - const NUM_OF_TRANSACTIONS: u32 = 10; - for i in 0..NUM_OF_TRANSACTIONS { - let tx = string_transaction(i); - - // serialise and send - let tr = bcs::to_bytes(&tx).unwrap(); - let txn = TransactionProto { - transactions: vec![Bytes::from(tr)], - }; - client.submit_transaction(txn).await.unwrap(); - - transactions.push(tx); - } - - // wait for transactions to complete - loop { - let result = receiver.recv().await.unwrap(); - - // deserialise transaction - let output_transaction = bcs::from_bytes::(&result).unwrap(); - - // we always remove the first transaction and check with the one - // sequenced. We want the transactions to be sequenced in the - // same order as we post them. - let expected_transaction = transactions.remove(0); - - assert_eq!( - expected_transaction, output_transaction, - "Expected to have received transaction with same id. Ordering is important" - ); - - if transactions.is_empty() { - break; - } - } -} - -fn string_transaction(id: u32) -> String { - format!("test transaction:{id}") -} - -fn setup_tracing() -> TelemetryGuards { - // Setup tracing - let tracing_level = "debug"; - let network_tracing_level = "info"; - - let log_filter = format!("{tracing_level},h2={network_tracing_level},tower={network_tracing_level},hyper={network_tracing_level},tonic::transport={network_tracing_level}"); - - telemetry_subscribers::TelemetryConfig::new() - // load env variables - .with_env() - // load special log filter - .with_log_level(&log_filter) - .init() - .0 -} diff --git a/narwhal/primary/tests/causal_completion_tests.rs b/narwhal/primary/tests/causal_completion_tests.rs deleted file mode 100644 index d564f9ea39f4d..0000000000000 --- a/narwhal/primary/tests/causal_completion_tests.rs +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 -use bytes::Bytes; -use std::time::Duration; -use test_utils::cluster::{setup_tracing, Cluster}; -use tracing::info; -use types::TransactionProto; - -type StringTransaction = String; - -#[ignore] -#[tokio::test] -async fn test_restore_from_disk() { - // Enabled debug tracing so we can easily observe the - // nodes logs. - let _guard = setup_tracing(); - - let mut cluster = Cluster::new(None); - - // start the cluster - cluster.start(Some(4), Some(1), None).await; - - let id = 0; - let client = cluster.authority(0).new_transactions_client(&id).await; - - // Subscribe to the transaction confirmation channel - let mut receiver = cluster - .authority(0) - .primary() - .await - .tx_transaction_confirmation - .subscribe(); - - // Create arbitrary transactions - let mut total_tx = 3; - for tx in [ - string_transaction(), - string_transaction(), - string_transaction(), - ] { - let mut c = client.clone(); - tokio::spawn(async move { - let tr = bcs::to_bytes(&tx).unwrap(); - let txn = TransactionProto { - transactions: vec![Bytes::from(tr)], - }; - - c.submit_transaction(txn).await.unwrap(); - }); - } - - // wait for transactions to complete - loop { - if let Ok(_result) = receiver.recv().await { - total_tx -= 1; - if total_tx < 1 { - break; - } - } - } - - // Now stop node 0 - cluster.stop_node(0).await; - - // Let other primaries advance and primary 0 releases its port. - tokio::time::sleep(Duration::from_secs(10)).await; - - // Now start the node 0 again - cluster.start_node(0, true, Some(1)).await; - - // Let the node recover - tokio::time::sleep(Duration::from_secs(2)).await; - - let node = cluster.authority(0); - - // Check the metrics to ensure the node was recovered from disk - let primary = node.primary().await; - - let node_recovered_state = - if let Some(metric) = primary.metric("recovered_consensus_state").await { - let value = metric.get_counter().get_value(); - info!("Found metric for recovered consensus state."); - - value > 0.0 - } else { - false - }; - - assert!(node_recovered_state, "Node did not recover state from disk"); -} - -fn string_transaction() -> StringTransaction { - StringTransaction::from("test transaction") -} - -#[ignore] -#[tokio::test] -async fn test_read_causal_signed_certificates() { - const CURRENT_ROUND_METRIC: &str = "current_round"; - - // Enabled debug tracing so we can easily observe the - // nodes logs. - let _guard = setup_tracing(); - - let mut cluster = Cluster::new(None); - - // start the cluster - cluster.start(Some(4), Some(1), None).await; - - // Let primaries advance little bit - tokio::time::sleep(Duration::from_secs(10)).await; - - // Ensure all nodes advanced - for authority in cluster.authorities().await { - if let Some(metric) = authority.primary().await.metric(CURRENT_ROUND_METRIC).await { - let value = metric.get_gauge().get_value(); - - info!("Metric -> {:?}", value); - - // If the current round is increasing then it means that the - // node starts catching up and is proposing. - assert!(value > 1.0, "Node didn't progress further than the round 1"); - } - } - - // Now stop node 0 - cluster.stop_node(0).await; - - // Let other primaries advance and primary 0 releases its port. - tokio::time::sleep(Duration::from_secs(10)).await; - - // Now start the validator 0 again - cluster.start_node(0, true, Some(1)).await; - - // Now check that the current round advances. Give the opportunity with a few - // iterations. If metric hasn't picked up then we know that node can't make - // progress. - let mut node_made_progress = false; - let node = cluster.authority(0).primary().await; - - for _ in 0..10 { - tokio::time::sleep(Duration::from_secs(1)).await; - - if let Some(metric) = node.metric(CURRENT_ROUND_METRIC).await { - let value = metric.get_gauge().get_value(); - info!("Metric -> {:?}", value); - - // If the current round is increasing then it means that the - // node starts catching up and is proposing. - if value > 1.0 { - node_made_progress = true; - break; - } - } - } - - assert!( - node_made_progress, - "Node 0 didn't make progress - causal completion didn't succeed" - ); -} diff --git a/narwhal/primary/tests/nodes_bootstrapping_tests.rs b/narwhal/primary/tests/nodes_bootstrapping_tests.rs deleted file mode 100644 index 373676bfe7cb1..0000000000000 --- a/narwhal/primary/tests/nodes_bootstrapping_tests.rs +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 -use bytes::Bytes; -use std::time::Duration; -use test_utils::cluster::{setup_tracing, Cluster}; -use types::TransactionProto; - -#[tokio::test(flavor = "current_thread", start_paused = true)] -async fn test_response_error_after_shutdown_internal_consensus() { - // Enabled debug tracing so we can easily observe the - // nodes logs. - let _guard = setup_tracing(); - - let delay = Duration::from_secs(10); // 10 seconds - - // A cluster of 4 nodes will be created, with internal consensus. - let cluster = Cluster::new(None); - - // ==== Start first authority ==== - let authority = cluster.authority(0); - authority.start(false, Some(1)).await; - - tokio::time::sleep(delay).await; - - authority.stop_all().await; - - tokio::time::sleep(delay).await; - - let worker_id = 0; - let mut client = authority.new_transactions_client(&worker_id).await; - - // Create a fake transaction - let tx_str = "test transaction".to_string(); - let tx = bcs::to_bytes(&tx_str).unwrap(); - let txn = TransactionProto { - transactions: vec![Bytes::from(tx)], - }; - - // Should fail submitting to consensus. - let Err(e) = client.submit_transaction(txn).await else { - panic!("Submitting transactions after Narwhal shutdown should fail!"); - }; - assert!(e.message().contains("tcp connect error:"), "Actual: {}", e); -} - -/// Nodes will be started in a staggered fashion. This is simulating -/// a real world scenario where nodes across validators will not start -/// in the same time. -#[ignore] -#[tokio::test] -async fn test_node_staggered_starts() { - // Enabled debug tracing so we can easily observe the - // nodes logs. - let _guard = setup_tracing(); - - let node_staggered_delay = Duration::from_secs(60 * 2); // 2 minutes - - // A cluster of 4 nodes will be created - let cluster = Cluster::new(None); - - // ==== Start first authority ==== - cluster.authority(0).start(false, Some(1)).await; - - tokio::time::sleep(node_staggered_delay).await; - - // No node should be able to commit, no reported round was expected - cluster.assert_progress(0, 0).await; - - // ==== Start second authority ==== - cluster.authority(1).start(false, Some(1)).await; - - tokio::time::sleep(node_staggered_delay).await; - - // No node should be able to commit, no reported round was expected - cluster.assert_progress(0, 0).await; - - // ==== Start third authority ==== - // Now 2f + 1 nodes are becoming available and we expect all the nodes to - // start making progress (advance in rounds). - cluster.authority(2).start(false, Some(1)).await; - - tokio::time::sleep(node_staggered_delay).await; - - // We have only (f) unavailable nodes, so all should have made progress and committed at least after the first round - cluster.assert_progress(3, 2).await; - - // ==== Start fourth authority ==== - // Now 3f + 1 nodes are becoming available (the whole network) and all the nodes - // should make progress - cluster.authority(3).start(false, Some(1)).await; - - tokio::time::sleep(node_staggered_delay).await; - - // All nodes are available so all should have made progress and committed at least after the first round - cluster.assert_progress(4, 2).await; -} - -/// All the nodes have an outage at the same time, when they recover, the rounds begin to advance. -#[ignore] -#[tokio::test] -async fn test_full_outage_and_recovery() { - let _guard = setup_tracing(); - - let stop_and_start_delay = Duration::from_secs(12); - let node_advance_delay = Duration::from_secs(60); - - // A cluster of 4 nodes will be created - let mut cluster = Cluster::new(None); - - // ===== Start the cluster ==== - cluster.start(Some(4), Some(1), None).await; - - // Let the nodes advance a bit - tokio::time::sleep(node_advance_delay).await; - - // Stop all the nodes - cluster.authority(0).stop_all().await; - tokio::time::sleep(stop_and_start_delay).await; - - cluster.authority(1).stop_all().await; - tokio::time::sleep(stop_and_start_delay).await; - - cluster.authority(2).stop_all().await; - tokio::time::sleep(stop_and_start_delay).await; - - cluster.authority(3).stop_all().await; - tokio::time::sleep(stop_and_start_delay).await; - - // Start all the nodes - cluster.authority(0).start(true, Some(1)).await; - tokio::time::sleep(stop_and_start_delay).await; - - cluster.authority(1).start(true, Some(1)).await; - tokio::time::sleep(stop_and_start_delay).await; - - cluster.authority(2).start(true, Some(1)).await; - tokio::time::sleep(stop_and_start_delay).await; - - cluster.authority(3).start(true, Some(1)).await; - - // now wait a bit to give the opportunity to recover - tokio::time::sleep(node_advance_delay).await; - - // Ensure that nodes have made progress - cluster.assert_progress(4, 2).await; -} - -#[ignore] -#[tokio::test] -async fn test_second_node_restart() { - // Enabled debug tracing so we can easily observe the - // nodes logs. - let _guard = setup_tracing(); - - let restart_delay = Duration::from_secs(120); - let node_advance_delay = Duration::from_secs(60); - - // A cluster of 4 nodes will be created - let mut cluster = Cluster::new(None); - - // ===== Start the cluster ==== - cluster.start(Some(4), Some(1), None).await; - - // Let the nodes advance a bit - tokio::time::sleep(node_advance_delay).await; - - // Now restart node 2 with some delay between - cluster.authority(2).restart(true, restart_delay).await; - - // now wait a bit to give the opportunity to recover - tokio::time::sleep(node_advance_delay).await; - - // Ensure that nodes have made progress - cluster.assert_progress(4, 2).await; - - // Now restart node 3 with some delay between - cluster.authority(3).restart(true, restart_delay).await; - - // now wait a bit to give the opportunity to recover - tokio::time::sleep(node_advance_delay).await; - - // Ensure that nodes have made progress - cluster.assert_progress(4, 2).await; -} - -#[ignore] -#[tokio::test] -/// We are testing the loss of liveness of a healthy cluster. While 3f+1 nodes run -/// we are shutting down f+1 nodes. Then we are bringing the f+1 nodes back again -/// We expect the restarted nodes to be able to make new proposals, and all the nodes -/// should be able to propose from where they left of at last round, and the rounds should -/// all advance. -async fn test_loss_of_liveness_without_recovery() { - // Enabled debug tracing so we can easily observe the - // nodes logs. - let _guard = setup_tracing(); - - let node_advance_delay = Duration::from_secs(60); - - // A cluster of 4 nodes will be created - let mut cluster = Cluster::new(None); - - // ===== Start the cluster ==== - cluster.start(Some(4), Some(1), None).await; - - // Let the nodes advance a bit - tokio::time::sleep(node_advance_delay).await; - - // Ensure that nodes have made progress - cluster.assert_progress(4, 2).await; - - // Now stop node 2 & 3 - cluster.authority(2).stop_all().await; - cluster.authority(3).stop_all().await; - - // wait and fetch the latest commit round - tokio::time::sleep(node_advance_delay).await; - let rounds_1 = cluster.assert_progress(2, 0).await; - - // wait and fetch again the rounds - tokio::time::sleep(node_advance_delay).await; - let rounds_2 = cluster.assert_progress(2, 0).await; - - // We assert that nodes haven't advanced at all - assert_eq!(rounds_1, rounds_2); - - // Now bring up nodes - cluster.authority(2).start(true, Some(1)).await; - cluster.authority(3).start(true, Some(1)).await; - - // wait and fetch the latest commit round. All of them should have advanced and we allow a small - // threshold in case some node is faster than the others - tokio::time::sleep(node_advance_delay).await; - let rounds_3 = cluster.assert_progress(4, 2).await; - - // we test that nodes 0 & 1 have actually advanced in rounds compared to before. - assert!(rounds_3.get(&0) > rounds_2.get(&0)); - assert!(rounds_3.get(&1) > rounds_2.get(&1)); -} - -#[ignore] -#[tokio::test] -/// We are testing the loss of liveness of a healthy cluster. While 3f+1 nodes run -/// we are shutting down f+1 nodes one by one with some delay between them. -/// Then we are bringing the f+1 nodes back again. We expect the cluster to -/// recover and effectively make progress. -async fn test_loss_of_liveness_with_recovery() { - // Enabled debug tracing so we can easily observe the - // nodes logs. - let _guard = setup_tracing(); - - let node_advance_delay = Duration::from_secs(60); - - // A cluster of 4 nodes will be created - let mut cluster = Cluster::new(None); - - // ===== Start the cluster ==== - cluster.start(Some(4), Some(1), None).await; - - // Let the nodes advance a bit - tokio::time::sleep(node_advance_delay).await; - - // Ensure that nodes have made progress - cluster.assert_progress(4, 2).await; - - // Now stop node 2 - cluster.authority(2).stop_all().await; - - // allow other nodes to advance - tokio::time::sleep(node_advance_delay).await; - - // Now stop node 3 - cluster.authority(3).stop_all().await; - - // wait and fetch the latest commit round - tokio::time::sleep(node_advance_delay).await; - let rounds_1 = cluster.assert_progress(2, 0).await; - - // wait and fetch again the rounds - tokio::time::sleep(node_advance_delay).await; - let rounds_2 = cluster.assert_progress(2, 0).await; - - // We assert that nodes haven't advanced at all - assert_eq!(rounds_1, rounds_2); - - // Now bring up nodes - cluster.authority(2).start(true, Some(1)).await; - cluster.authority(3).start(true, Some(1)).await; - - // wait and fetch the latest commit round - tokio::time::sleep(node_advance_delay).await; - let rounds_3 = cluster.assert_progress(4, 2).await; - - let round_2_max = rounds_2.values().max().unwrap(); - assert!( - rounds_3.values().all(|v| v > round_2_max), - "All the nodes should have advanced more from the previous round" - ); -} diff --git a/narwhal/test-utils/src/cluster.rs b/narwhal/test-utils/src/cluster.rs index cd0e426f2a90d..45202094800e4 100644 --- a/narwhal/test-utils/src/cluster.rs +++ b/narwhal/test-utils/src/cluster.rs @@ -754,6 +754,7 @@ impl AuthorityDetails { .get(worker_id) .unwrap() .transactions_address, + None, ) .unwrap(); diff --git a/narwhal/worker/src/lib.rs b/narwhal/worker/src/lib.rs index cb453b3bc12b9..f74fa44aca4fc 100644 --- a/narwhal/worker/src/lib.rs +++ b/narwhal/worker/src/lib.rs @@ -13,7 +13,6 @@ mod batch_maker; mod client; mod handlers; mod quorum_waiter; -mod transactions_server; mod tx_validator; mod worker; diff --git a/narwhal/worker/src/tests/worker_tests.rs b/narwhal/worker/src/tests/worker_tests.rs deleted file mode 100644 index 99421109124da..0000000000000 --- a/narwhal/worker/src/tests/worker_tests.rs +++ /dev/null @@ -1,371 +0,0 @@ -// Copyright (c) 2021, Facebook, Inc. and its affiliates -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 -use super::*; -use crate::LocalNarwhalClient; -use crate::{metrics::initialise_metrics, TrivialTransactionValidator}; -use async_trait::async_trait; -use bytes::Bytes; -use fastcrypto::hash::Hash; -use futures::stream::FuturesOrdered; -use futures::StreamExt; -use primary::{CHANNEL_CAPACITY, NUM_SHUTDOWN_RECEIVERS}; -use prometheus::Registry; -use store::rocks; -use store::rocks::MetricConf; -use store::rocks::ReadWriteOptions; -use test_utils::{ - batch, latest_protocol_version, temp_dir, test_network, transaction, CommitteeFixture, -}; -use types::{ - BatchAPI, MockWorkerToPrimary, MockWorkerToWorker, PreSubscribedBroadcastSender, - TransactionProto, TransactionsClient, WorkerBatchMessage, WorkerToWorkerClient, -}; - -// A test validator that rejects every transaction / batch -#[derive(Clone)] -struct NilTxValidator; -#[async_trait] -impl TransactionValidator for NilTxValidator { - type Error = eyre::Report; - - fn validate(&self, _tx: &[u8]) -> Result<(), Self::Error> { - eyre::bail!("Invalid transaction"); - } - fn validate_batch( - &self, - _txs: &Batch, - _protocol_config: &ProtocolConfig, - ) -> Result<(), Self::Error> { - eyre::bail!("Invalid batch"); - } -} - -#[tokio::test] -async fn reject_invalid_clients_transactions() { - let fixture = CommitteeFixture::builder().randomize_ports(true).build(); - let committee = fixture.committee(); - let worker_cache = fixture.worker_cache(); - - let worker_id = 0; - let my_primary = fixture.authorities().next().unwrap(); - let myself = my_primary.worker(worker_id); - let public_key = my_primary.public_key(); - let client = NetworkClient::new_from_keypair(&my_primary.network_keypair()); - - let parameters = Parameters { - batch_size: 200, // Two transactions. - ..Parameters::default() - }; - - // Create a new test store. - let batch_store = rocks::DBMap::::open( - temp_dir(), - MetricConf::default(), - None, - Some("batches"), - &ReadWriteOptions::default(), - ) - .unwrap(); - - let registry = Registry::new(); - let metrics = initialise_metrics(®istry); - - let mut tx_shutdown = PreSubscribedBroadcastSender::new(NUM_SHUTDOWN_RECEIVERS); - - // Spawn a `Worker` instance with a reject-all validator. - Worker::spawn( - my_primary.authority().clone(), - myself.keypair(), - worker_id, - committee.clone(), - worker_cache.clone(), - latest_protocol_version(), - parameters, - NilTxValidator, - client, - batch_store, - metrics, - &mut tx_shutdown, - ); - - // Wait till other services have been able to start up - tokio::task::yield_now().await; - // Send enough transactions to create a batch. - let address = worker_cache - .worker(&public_key, &worker_id) - .unwrap() - .transactions; - let config = mysten_network::config::Config::new(); - let channel = config.connect_lazy(&address).unwrap(); - let mut client = TransactionsClient::new(channel); - let tx = transaction(); - let txn = TransactionProto { - transactions: vec![Bytes::from(tx.clone())], - }; - - // Check invalid transactions are rejected - let res = client.submit_transaction(txn).await; - assert!(res.is_err()); - - let worker_pk = worker_cache.worker(&public_key, &worker_id).unwrap().name; - - let batch = batch(&latest_protocol_version()); - let batch_message = WorkerBatchMessage { - batch: batch.clone(), - }; - - // setup network : impersonate a send from another worker - let another_primary = fixture.authorities().nth(2).unwrap(); - let another_worker = another_primary.worker(worker_id); - let network = test_network( - another_worker.keypair(), - &another_worker.info().worker_address, - ); - // ensure that the networks are connected - network - .connect(myself.info().worker_address.to_anemo_address().unwrap()) - .await - .unwrap(); - let peer = network.peer(PeerId(worker_pk.0.to_bytes())).unwrap(); - - // Check invalid batches are rejected - let res = WorkerToWorkerClient::new(peer) - .report_batch(batch_message) - .await; - assert!(res.is_err()); -} - -/// TODO: test both RemoteNarwhalClient and LocalNarwhalClient in the same test case. -#[tokio::test] -async fn handle_remote_clients_transactions() { - let fixture = CommitteeFixture::builder().randomize_ports(true).build(); - let committee = fixture.committee(); - let worker_cache = fixture.worker_cache(); - - let worker_id = 0; - let my_primary = fixture.authorities().next().unwrap(); - let myself = my_primary.worker(worker_id); - let authority_public_key = my_primary.public_key(); - let client = NetworkClient::new_from_keypair(&my_primary.network_keypair()); - - let parameters = Parameters { - batch_size: 200, // Two transactions. - ..Parameters::default() - }; - - // Create a new test store. - let batch_store = rocks::DBMap::::open( - temp_dir(), - MetricConf::default(), - None, - Some("batches"), - &ReadWriteOptions::default(), - ) - .unwrap(); - - let registry = Registry::new(); - let metrics = initialise_metrics(®istry); - - let mut tx_shutdown = PreSubscribedBroadcastSender::new(NUM_SHUTDOWN_RECEIVERS); - - // Spawn a `Worker` instance. - Worker::spawn( - my_primary.authority().clone(), - myself.keypair(), - worker_id, - committee.clone(), - worker_cache.clone(), - latest_protocol_version(), - parameters, - TrivialTransactionValidator, - client.clone(), - batch_store, - metrics, - &mut tx_shutdown, - ); - - // Spawn a network listener to receive our batch's digest. - let mut peer_networks = Vec::new(); - - // Create batches - let batch = batch(&latest_protocol_version()); - let batch_digest = batch.digest(); - - let (tx_await_batch, mut rx_await_batch) = test_utils::test_channel!(CHANNEL_CAPACITY); - let mut mock_primary_server = MockWorkerToPrimary::new(); - mock_primary_server - .expect_report_own_batch() - .withf(move |request| { - let message = request.body(); - - message.digest == batch_digest && message.worker_id == worker_id - }) - .times(1) - .returning(move |_| { - tx_await_batch.try_send(()).unwrap(); - Ok(anemo::Response::new(())) - }); - client.set_worker_to_primary_local_handler(Arc::new(mock_primary_server)); - - // Spawn enough workers' listeners to acknowledge our batches. - for worker in fixture.authorities().skip(1).map(|a| a.worker(worker_id)) { - let mut mock_server = MockWorkerToWorker::new(); - mock_server - .expect_report_batch() - .returning(|_| Ok(anemo::Response::new(()))); - let routes = anemo::Router::new().add_rpc_service(WorkerToWorkerServer::new(mock_server)); - peer_networks.push(worker.new_network(routes)); - } - - // Wait till other services have been able to start up - tokio::task::yield_now().await; - // Send enough transactions to create a batch. - let address = worker_cache - .worker(&authority_public_key, &worker_id) - .unwrap() - .transactions; - let config = mysten_network::config::Config::new(); - let channel = config.connect_lazy(&address).unwrap(); - let client = TransactionsClient::new(channel); - - let join_handle = tokio::task::spawn(async move { - let mut fut_list = FuturesOrdered::new(); - for tx in batch.transactions() { - let txn = TransactionProto { - transactions: vec![Bytes::from(tx.clone())], - }; - - // Calls to submit_transaction are now blocking, so we need to drive them - // all at the same time, rather than sequentially. - let mut inner_client = client.clone(); - fut_list.push_back(async move { - inner_client.submit_transaction(txn).await.unwrap(); - }); - } - - // Drive all sending in parallel. - while fut_list.next().await.is_some() {} - }); - - // Ensure the primary received the batch's digest (ie. it did not panic). - rx_await_batch.recv().await.unwrap(); - - // Ensure sending ended. - assert!(join_handle.await.is_ok()); -} - -/// TODO: test both RemoteNarwhalClient and LocalNarwhalClient in the same test case. -#[tokio::test] -async fn handle_local_clients_transactions() { - let fixture = CommitteeFixture::builder().randomize_ports(true).build(); - let committee = fixture.committee(); - let worker_cache = fixture.worker_cache(); - - let worker_id = 0; - let my_primary = fixture.authorities().next().unwrap(); - let myself = my_primary.worker(worker_id); - let authority_public_key = my_primary.public_key(); - let client = NetworkClient::new_from_keypair(&my_primary.network_keypair()); - - let parameters = Parameters { - batch_size: 200, // Two transactions. - ..Parameters::default() - }; - - // Create a new test store. - let batch_store = rocks::DBMap::::open( - temp_dir(), - MetricConf::default(), - None, - Some("batches"), - &ReadWriteOptions::default(), - ) - .unwrap(); - - let registry = Registry::new(); - let metrics = initialise_metrics(®istry); - - let mut tx_shutdown = PreSubscribedBroadcastSender::new(NUM_SHUTDOWN_RECEIVERS); - - // Spawn a `Worker` instance. - Worker::spawn( - my_primary.authority().clone(), - myself.keypair(), - worker_id, - committee.clone(), - worker_cache.clone(), - latest_protocol_version(), - parameters, - TrivialTransactionValidator, - client.clone(), - batch_store, - metrics, - &mut tx_shutdown, - ); - - // Spawn a network listener to receive our batch's digest. - let mut peer_networks = Vec::new(); - - // Create batches - let batch = batch(&latest_protocol_version()); - let batch_digest = batch.digest(); - - let (tx_await_batch, mut rx_await_batch) = test_utils::test_channel!(CHANNEL_CAPACITY); - let mut mock_primary_server = MockWorkerToPrimary::new(); - mock_primary_server - .expect_report_own_batch() - .withf(move |request| { - let message = request.body(); - message.digest == batch_digest && message.worker_id == worker_id - }) - .times(1) - .returning(move |_| { - tx_await_batch.try_send(()).unwrap(); - Ok(anemo::Response::new(())) - }); - client.set_worker_to_primary_local_handler(Arc::new(mock_primary_server)); - - // Spawn enough workers' listeners to acknowledge our batches. - for worker in fixture.authorities().skip(1).map(|a| a.worker(worker_id)) { - let mut mock_server = MockWorkerToWorker::new(); - mock_server - .expect_report_batch() - .returning(|_| Ok(anemo::Response::new(()))); - let routes = anemo::Router::new().add_rpc_service(WorkerToWorkerServer::new(mock_server)); - peer_networks.push(worker.new_network(routes)); - } - - // Wait till other services have been able to start up - tokio::task::yield_now().await; - // Send enough transactions to create a batch. - let address = worker_cache - .worker(&authority_public_key, &worker_id) - .unwrap() - .transactions; - let client = LocalNarwhalClient::get_global(&address).unwrap().load(); - - let join_handle = tokio::task::spawn(async move { - let mut fut_list = FuturesOrdered::new(); - for txn in batch.transactions() { - // Calls to submit_transaction are now blocking, so we need to drive them - // all at the same time, rather than sequentially. - let inner_client = client.clone(); - fut_list.push_back(async move { - inner_client - .submit_transactions(vec![txn.clone()]) - .await - .unwrap(); - }); - } - - // Drive all sending in parallel. - while fut_list.next().await.is_some() {} - }); - - // Ensure the primary received the batch's digest (ie. it did not panic). - rx_await_batch.recv().await.unwrap(); - - // Ensure sending ended. - assert!(join_handle.await.is_ok()); -} diff --git a/narwhal/worker/src/transactions_server.rs b/narwhal/worker/src/transactions_server.rs deleted file mode 100644 index 0790083001c44..0000000000000 --- a/narwhal/worker/src/transactions_server.rs +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -use crate::client::LocalNarwhalClient; -use crate::metrics::WorkerEndpointMetrics; -use crate::TransactionValidator; -use async_trait::async_trait; -use futures::stream::FuturesUnordered; -use futures::StreamExt; -use mysten_metrics::metered_channel::Sender; -use mysten_metrics::{monitored_scope, spawn_logged_monitored_task}; -use mysten_network::server::Server; -use mysten_network::Multiaddr; -use std::sync::Arc; -use std::time::Duration; -use tokio::task::JoinHandle; -use tokio::time::{sleep, timeout}; -use tonic::{Request, Response, Status}; -use tracing::{error, info, warn}; -use types::{ - ConditionalBroadcastReceiver, Empty, Transaction, TransactionProto, Transactions, - TransactionsServer, TxResponse, -}; - -pub struct TxServer { - address: Multiaddr, - rx_shutdown: ConditionalBroadcastReceiver, - endpoint_metrics: WorkerEndpointMetrics, - local_client: Arc, - validator: V, -} - -impl TxServer { - #[must_use] - pub fn spawn( - address: Multiaddr, - rx_shutdown: ConditionalBroadcastReceiver, - endpoint_metrics: WorkerEndpointMetrics, - tx_batch_maker: Sender<(Vec, TxResponse)>, - validator: V, - ) -> JoinHandle<()> { - // create and initialize local Narwhal client. - let local_client = LocalNarwhalClient::new(tx_batch_maker); - LocalNarwhalClient::set_global(address.clone(), local_client.clone()); - - spawn_logged_monitored_task!( - Self { - address, - rx_shutdown, - endpoint_metrics, - local_client, - validator, - } - .run(), - "TxServer" - ) - } - - async fn run(mut self) { - const MAX_RETRIES: usize = 10; - const RETRY_BACKOFF: Duration = Duration::from_millis(1_000); - const GRACEFUL_SHUTDOWN_DURATION: Duration = Duration::from_millis(2_000); - - // create the handler - let tx_handler = TxReceiverHandler { - local_client: self.local_client.clone(), - validator: self.validator, - }; - - // now create the server - let mut retries = MAX_RETRIES; - let mut server: Server; - - loop { - match mysten_network::config::Config::new() - .server_builder_with_metrics(self.endpoint_metrics.clone()) - .add_service(TransactionsServer::new(tx_handler.clone())) - .bind(&self.address) - .await - { - Ok(s) => { - server = s; - break; - } - Err(err) => { - retries -= 1; - if retries == 0 { - panic!( - "Couldn't boot transactions server, permanently failed: {}", - err - ); - } - - error!( - "Couldn't boot transactions server at try {}, will wait {}s and retry: {}", - retries, - RETRY_BACKOFF.as_secs_f64(), - err - ); - - sleep(RETRY_BACKOFF).await; - } - } - } - - let shutdown_handle = server.take_cancel_handle().unwrap(); - - let server_handle = spawn_logged_monitored_task!(server.serve()); - - // wait to receive a shutdown signal - let _ = self.rx_shutdown.receiver.recv().await; - - // once do just gracefully signal the node to shutdown - shutdown_handle.send(()).unwrap(); - - // now wait until the handle completes or timeout if it takes long time - match timeout(GRACEFUL_SHUTDOWN_DURATION, server_handle).await { - Ok(_) => { - info!("Successfully shutting down gracefully transactions server"); - } - Err(err) => { - warn!( - "Time out while waiting to gracefully shutdown transactions server: {}", - err - ) - } - } - } -} - -/// Defines how the network receiver handles incoming transactions. -#[derive(Clone)] -pub(crate) struct TxReceiverHandler { - pub(crate) local_client: Arc, - pub(crate) validator: V, -} - -#[async_trait] -impl Transactions for TxReceiverHandler { - async fn submit_transaction( - &self, - request: Request, - ) -> Result, Status> { - let _scope = monitored_scope("SubmitTransaction"); - let transactions = request.into_inner().transactions; - - let validate_scope = monitored_scope("SubmitTransaction_ValidateTx"); - for transaction in &transactions { - if self.validator.validate(transaction.as_ref()).is_err() { - return Err(Status::invalid_argument("Invalid transaction")); - } - } - drop(validate_scope); - - // Send the transaction to Narwhal via the local client. - let submit_scope = monitored_scope("SubmitTransaction_SubmitTx"); - self.local_client - .submit_transactions(transactions.iter().map(|x| x.to_vec()).collect()) - .await - .map_err(|e| Status::internal(e.to_string()))?; - drop(submit_scope); - Ok(Response::new(Empty {})) - } - - async fn submit_transaction_stream( - &self, - request: Request>, - ) -> Result, Status> { - let mut transactions = request.into_inner(); - let mut requests = FuturesUnordered::new(); - - let _scope = monitored_scope("SubmitTransactionStream"); - while let Some(Ok(request)) = transactions.next().await { - let num_txns = request.transactions.len(); - if num_txns != 1 { - return Err(Status::invalid_argument(format!( - "Stream contains an invalid number of transactions: {num_txns}" - ))); - } - let txn = &request.transactions[0]; - let validate_scope = monitored_scope("SubmitTransactionStream_ValidateTx"); - if let Err(err) = self.validator.validate(txn.as_ref()) { - // If the transaction is invalid (often cryptographically), better to drop the client - return Err(Status::invalid_argument(format!( - "Stream contains an invalid transaction {err}" - ))); - } - drop(validate_scope); - // Send the transaction to Narwhal via the local client. - // Note that here we do not wait for a response because this would - // mean that we process only a single message from this stream at a - // time. Instead we gather them and resolve them once the stream is over. - let submit_scope = monitored_scope("SubmitTransactionStream_SubmitTx"); - requests.push(self.local_client.submit_transactions(vec![txn.to_vec()])); - drop(submit_scope); - } - - while let Some(result) = requests.next().await { - if let Err(e) = result { - return Err(Status::internal(e.to_string())); - } - } - - Ok(Response::new(Empty {})) - } -} diff --git a/narwhal/worker/src/worker.rs b/narwhal/worker/src/worker.rs index 63064a1b6c925..b38b80a755623 100644 --- a/narwhal/worker/src/worker.rs +++ b/narwhal/worker/src/worker.rs @@ -41,15 +41,10 @@ use types::{ PrimaryToWorkerServer, WorkerToWorkerServer, }; -#[cfg(test)] -#[path = "tests/worker_tests.rs"] -pub mod worker_tests; - /// The default channel capacity for each channel of the worker. pub const CHANNEL_CAPACITY: usize = 1_000; use crate::metrics::{Metrics, WorkerEndpointMetrics, WorkerMetrics}; -use crate::transactions_server::TxServer; pub struct Worker { /// This authority. @@ -440,19 +435,20 @@ impl Worker { } /// Spawn all tasks responsible to handle clients transactions. + // TODO: finish deleting this. It's partially deleted already and may not work right. fn handle_clients_transactions( &self, mut shutdown_receivers: Vec, node_metrics: Arc, channel_metrics: Arc, - endpoint_metrics: WorkerEndpointMetrics, - validator: impl TransactionValidator, + _endpoint_metrics: WorkerEndpointMetrics, + _validator: impl TransactionValidator, client: NetworkClient, network: anemo::Network, ) -> Vec> { info!("Starting handler for transactions"); - let (tx_batch_maker, rx_batch_maker) = channel_with_total( + let (_tx_batch_maker, rx_batch_maker) = channel_with_total( CHANNEL_CAPACITY, &channel_metrics.tx_batch_maker, &channel_metrics.tx_batch_maker_total, @@ -476,14 +472,6 @@ impl Worker { }) .unwrap_or(address); - let tx_server_handle = TxServer::spawn( - address.clone(), - shutdown_receivers.pop().unwrap(), - endpoint_metrics, - tx_batch_maker, - validator, - ); - // The transactions are sent to the `BatchMaker` that assembles them into batches. It then broadcasts // (in a reliable manner) the batches to all other workers that share the same `id` as us. Finally, it // gathers the 'cancel handlers' of the messages and send them to the `QuorumWaiter`. @@ -518,6 +506,6 @@ impl Worker { self.id, address ); - vec![batch_maker_handle, quorum_waiter_handle, tx_server_handle] + vec![batch_maker_handle, quorum_waiter_handle] } } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 04a14ddddf926..2755915c29141 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -668,6 +668,9 @@ importers: '@mysten/sui': specifier: workspace:* version: link:../../sdk/typescript + '@noble/hashes': + specifier: ^1.4.0 + version: 1.4.0 '@radix-ui/react-dialog': specifier: ^1.1.1 version: 1.1.1(@types/react-dom@18.3.0)(@types/react@18.3.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -729,6 +732,9 @@ importers: '@tsconfig/docusaurus': specifier: ^2.0.3 version: 2.0.3 + '@types/node': + specifier: ^20.14.10 + version: 20.14.10 '@types/react': specifier: ^18.3.3 version: 18.3.3 @@ -1009,7 +1015,7 @@ importers: dependencies: '@mysten/dapp-kit': specifier: ^0.14.25 - version: 0.14.25(@tanstack/react-query@5.59.0(react@18.3.1))(@types/react-dom@18.3.0)(@types/react@18.3.3)(immer@9.0.21)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.5.3) + version: 0.14.25(@tanstack/react-query@5.59.0(react@18.3.1))(@types/react-dom@18.3.0)(@types/react@18.3.3)(babel-plugin-macros@3.1.0)(immer@9.0.21)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.5.3) '@mysten/sui': specifier: ^1.12.0 version: 1.12.0(typescript@5.5.3) @@ -1579,6 +1585,37 @@ importers: specifier: ^7.2.0 version: 7.2.0 + sdk/kms: + dependencies: + '@mysten/sui': + specifier: workspace:* + version: link:../typescript + '@noble/curves': + specifier: ^1.4.2 + version: 1.6.0 + '@noble/hashes': + specifier: ^1.4.0 + version: 1.5.0 + asn1-ts: + specifier: ^8.0.2 + version: 8.0.2 + aws4fetch: + specifier: ^1.0.20 + version: 1.0.20 + devDependencies: + '@mysten/build-scripts': + specifier: workspace:* + version: link:../build-scripts + '@types/node': + specifier: ^20.14.10 + version: 20.14.10 + typescript: + specifier: ^5.5.3 + version: 5.5.3 + vitest: + specifier: ^2.0.1 + version: 2.0.1(@types/node@20.14.10)(happy-dom@14.12.3)(jsdom@24.1.0)(lightningcss@1.27.0)(sass@1.77.6)(terser@5.31.1) + sdk/ledgerjs-hw-app-sui: dependencies: '@ledgerhq/hw-transport': @@ -4300,74 +4337,92 @@ packages: nanostores: ^0.9.0 || ^0.10.0 react: '>=18.0.0' - '@napi-rs/simple-git-android-arm-eabi@0.1.16': - resolution: {integrity: sha512-dbrCL0Pl5KZG7x7tXdtVsA5CO6At5ohDX3myf5xIYn9kN4jDFxsocl8bNt6Vb/hZQoJd8fI+k5VlJt+rFhbdVw==} + '@napi-rs/simple-git-android-arm-eabi@0.1.19': + resolution: {integrity: sha512-XryEH/hadZ4Duk/HS/HC/cA1j0RHmqUGey3MsCf65ZS0VrWMqChXM/xlTPWuY5jfCc/rPubHaqI7DZlbexnX/g==} engines: {node: '>= 10'} cpu: [arm] os: [android] - '@napi-rs/simple-git-android-arm64@0.1.16': - resolution: {integrity: sha512-xYz+TW5J09iK8SuTAKK2D5MMIsBUXVSs8nYp7HcMi8q6FCRO7yJj96YfP9PvKsc/k64hOyqGmL5DhCzY9Cu1FQ==} + '@napi-rs/simple-git-android-arm64@0.1.19': + resolution: {integrity: sha512-ZQ0cPvY6nV9p7zrR9ZPo7hQBkDAcY/CHj3BjYNhykeUCiSNCrhvwX+WEeg5on8M1j4d5jcI/cwVG2FslfiByUg==} engines: {node: '>= 10'} cpu: [arm64] os: [android] - '@napi-rs/simple-git-darwin-arm64@0.1.16': - resolution: {integrity: sha512-XfgsYqxhUE022MJobeiX563TJqyQyX4FmYCnqrtJwAfivESVeAJiH6bQIum8dDEYMHXCsG7nL8Ok0Dp8k2m42g==} + '@napi-rs/simple-git-darwin-arm64@0.1.19': + resolution: {integrity: sha512-viZB5TYgjA1vH+QluhxZo0WKro3xBA+1xSzYx8mcxUMO5gnAoUMwXn0ZO/6Zy6pai+aGae+cj6XihGnrBRu3Pg==} engines: {node: '>= 10'} cpu: [arm64] os: [darwin] - '@napi-rs/simple-git-darwin-x64@0.1.16': - resolution: {integrity: sha512-tkEVBhD6vgRCbeWsaAQqM3bTfpIVGeitamPPRVSbsq8qgzJ5Dx6ZedH27R7KSsA/uao7mZ3dsrNLXbu1Wy5MzA==} + '@napi-rs/simple-git-darwin-x64@0.1.19': + resolution: {integrity: sha512-6dNkzSNUV5X9rsVYQbpZLyJu4Gtkl2vNJ3abBXHX/Etk0ILG5ZasO3ncznIANZQpqcbn/QPHr49J2QYAXGoKJA==} engines: {node: '>= 10'} cpu: [x64] os: [darwin] - '@napi-rs/simple-git-linux-arm-gnueabihf@0.1.16': - resolution: {integrity: sha512-R6VAyNnp/yRaT7DV1Ao3r67SqTWDa+fNq2LrNy0Z8gXk2wB9ZKlrxFtLPE1WSpWknWtyRDLpRlsorh7Evk7+7w==} + '@napi-rs/simple-git-freebsd-x64@0.1.19': + resolution: {integrity: sha512-sB9krVIchzd20FjI2ZZ8FDsTSsXLBdnwJ6CpeVyrhXHnoszfcqxt49ocZHujAS9lMpXq7i2Nv1EXJmCy4KdhwA==} + engines: {node: '>= 10'} + cpu: [x64] + os: [freebsd] + + '@napi-rs/simple-git-linux-arm-gnueabihf@0.1.19': + resolution: {integrity: sha512-6HPn09lr9N1n5/XKfP8Np53g4fEXVxOFqNkS6rTH3Rm1lZHdazTRH62RggXLTguZwjcE+MvOLvoTIoR5kAS8+g==} engines: {node: '>= 10'} cpu: [arm] os: [linux] - '@napi-rs/simple-git-linux-arm64-gnu@0.1.16': - resolution: {integrity: sha512-LAGI0opFKw/HBMCV2qIBK3uWSEW9h4xd2ireZKLJy8DBPymX6NrWIamuxYNyCuACnFdPRxR4LaRFy4J5ZwuMdw==} + '@napi-rs/simple-git-linux-arm64-gnu@0.1.19': + resolution: {integrity: sha512-G0gISckt4cVDp3oh5Z6PV3GHJrJO6Z8bIS+9xA7vTtKdqB1i5y0n3cSFLlzQciLzhr+CajFD27doW4lEyErQ/Q==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] - '@napi-rs/simple-git-linux-arm64-musl@0.1.16': - resolution: {integrity: sha512-I57Ph0F0Yn2KW93ep+V1EzKhACqX0x49vvSiapqIsdDA2PifdEWLc1LJarBolmK7NKoPqKmf6lAKKO9lhiZzkg==} + '@napi-rs/simple-git-linux-arm64-musl@0.1.19': + resolution: {integrity: sha512-OwTRF+H4IZYxmDFRi1IrLMfqbdIpvHeYbJl2X94NVsLVOY+3NUHvEzL3fYaVx5urBaMnIK0DD3wZLbcueWvxbA==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] - '@napi-rs/simple-git-linux-x64-gnu@0.1.16': - resolution: {integrity: sha512-AZYYFY2V7hlcQASPEOWyOa3e1skzTct9QPzz0LiDM3f/hCFY/wBaU2M6NC5iG3d2Kr38heuyFS/+JqxLm5WaKA==} + '@napi-rs/simple-git-linux-powerpc64le-gnu@0.1.19': + resolution: {integrity: sha512-p7zuNNVyzpRvkCt2RIGv9FX/WPcPbZ6/FRUgUTZkA2WU33mrbvNqSi4AOqCCl6mBvEd+EOw5NU4lS9ORRJvAEg==} + engines: {node: '>= 10'} + cpu: [powerpc64le] + os: [linux] + + '@napi-rs/simple-git-linux-s390x-gnu@0.1.19': + resolution: {integrity: sha512-6N2vwJUPLiak8GLrS0a3is0gSb0UwI2CHOOqtvQxPmv+JVI8kn3vKiUscsktdDb0wGEPeZ8PvZs0y8UWix7K4g==} + engines: {node: '>= 10'} + cpu: [s390x] + os: [linux] + + '@napi-rs/simple-git-linux-x64-gnu@0.1.19': + resolution: {integrity: sha512-61YfeO1J13WK7MalLgP3QlV6of2rWnVw1aqxWkAgy/lGxoOFSJ4Wid6ANVCEZk4tJpPX/XNeneqkUz5xpeb2Cw==} engines: {node: '>= 10'} cpu: [x64] os: [linux] - '@napi-rs/simple-git-linux-x64-musl@0.1.16': - resolution: {integrity: sha512-9TyMcYSBJwjT8jwjY9m24BZbu7ozyWTjsmYBYNtK3B0Um1Ov6jthSNneLVvouQ6x+k3Ow+00TiFh6bvmT00r8g==} + '@napi-rs/simple-git-linux-x64-musl@0.1.19': + resolution: {integrity: sha512-cCTWNpMJnN3PrUBItWcs3dQKCydsIasbrS3laMzq8k7OzF93Zrp2LWDTPlLCO9brbBVpBzy2Qk5Xg9uAfe/Ukw==} engines: {node: '>= 10'} cpu: [x64] os: [linux] - '@napi-rs/simple-git-win32-arm64-msvc@0.1.16': - resolution: {integrity: sha512-uslJ1WuAHCYJWui6xjsyT47SjX6KOHDtClmNO8hqKz1pmDSNY7AjyUY8HxvD1lK9bDnWwc4JYhikS9cxCqHybw==} + '@napi-rs/simple-git-win32-arm64-msvc@0.1.19': + resolution: {integrity: sha512-sWavb1BjeLKKBA+PbTsRSSzVNfb7V/dOpaJvkgR5d2kWFn/AHmCZHSSj/3nyZdYf0BdDC+DIvqk3daAEZ6QMVw==} engines: {node: '>= 10'} cpu: [arm64] os: [win32] - '@napi-rs/simple-git-win32-x64-msvc@0.1.16': - resolution: {integrity: sha512-SoEaVeCZCDF1MP+M9bMSXsZWgEjk4On9GWADO5JOulvzR1bKjk0s9PMHwe/YztR9F0sJzrCxwtvBZowhSJsQPg==} + '@napi-rs/simple-git-win32-x64-msvc@0.1.19': + resolution: {integrity: sha512-FmNuPoK4+qwaSCkp8lm3sJlrxk374enW+zCE5ZksXlZzj/9BDJAULJb5QUJ7o9Y8A/G+d8LkdQLPBE2Jaxe5XA==} engines: {node: '>= 10'} cpu: [x64] os: [win32] - '@napi-rs/simple-git@0.1.16': - resolution: {integrity: sha512-C5wRPw9waqL2jk3jEDeJv+f7ScuO3N0a39HVdyFLkwKxHH4Sya4ZbzZsu2JLi6eEqe7RuHipHL6mC7B2OfYZZw==} + '@napi-rs/simple-git@0.1.19': + resolution: {integrity: sha512-jMxvwzkKzd3cXo2EB9GM2ic0eYo2rP/BS6gJt6HnWbsDO1O8GSD4k7o2Cpr2YERtMpGF/MGcDfsfj2EbQPtrXw==} engines: {node: '>= 10'} '@ndelangen/get-tarball@3.0.9': @@ -4433,10 +4488,18 @@ packages: '@noble/curves@1.4.2': resolution: {integrity: sha512-TavHr8qycMChk8UwMld0ZDRvatedkzWfH8IiaeGCfymOP5i0hSCozz9vHOL0nkwk7HRMlFnAiKpS2jrUmSybcw==} + '@noble/curves@1.6.0': + resolution: {integrity: sha512-TlaHRXDehJuRNR9TfZDNQ45mMEd5dwUwmicsafcIX4SsNiqnCHKjE/1alYPd/lDRVhxdhUAlv8uEhMCI5zjIJQ==} + engines: {node: ^14.21.3 || >=16} + '@noble/hashes@1.4.0': resolution: {integrity: sha512-V1JJ1WTRUqHHrOSh597hURcMqVKVGL/ea3kv0gSnEdsEZ0/+VyPghM1lMNGc00z7CIQorSvbKpuJkxvuHbvdbg==} engines: {node: '>= 16'} + '@noble/hashes@1.5.0': + resolution: {integrity: sha512-1j6kQFb7QRru7eKN3ZDvRcP13rugwdxZqCjbiAVZfIJwgj2A65UmT4TgARXGlXgnRkORLTDTrO19ZErt7+QXgA==} + engines: {node: ^14.21.3 || >=16} + '@nodelib/fs.scandir@2.1.5': resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==} engines: {node: '>= 8'} @@ -6597,14 +6660,14 @@ packages: peerDependencies: react: ^18 || ^19 - '@tanstack/react-virtual@3.5.0': - resolution: {integrity: sha512-rtvo7KwuIvqK9zb0VZ5IL7fiJAEnG+0EiFZz8FUOs+2mhGqdGmjKIaT1XU7Zq0eFqL0jonLlhbayJI/J2SA/Bw==} + '@tanstack/react-virtual@3.10.8': + resolution: {integrity: sha512-VbzbVGSsZlQktyLrP5nxE+vE1ZR+U0NFAWPbJLoG2+DKPwd2D7dVICTVIIaYlJqX1ZCEnYDbaOpmMwbsyhBoIA==} peerDependencies: react: ^16.8.0 || ^17.0.0 || ^18.0.0 react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 - '@tanstack/virtual-core@3.5.0': - resolution: {integrity: sha512-KnPRCkQTyqhanNC0K63GBG3wA8I+D1fQuVnAvcBF8f13akOKeQp1gSbu6f77zCxhEk727iV5oQnbHLYzHrECLg==} + '@tanstack/virtual-core@3.10.8': + resolution: {integrity: sha512-PBu00mtt95jbKFi6Llk9aik8bnR3tR/oQP1o3TSi+iG//+Q2RTIzCEgKkHG8BB86kxMNW6O8wku+Lmi+QFR6jA==} '@testing-library/dom@10.3.1': resolution: {integrity: sha512-q/WL+vlXMpC0uXDyfsMtc1rmotzLV8Y0gq6q1gfrrDjQeHoeLrqHbxdPvPNAh1i+xuJl7+BezywcXArz7vLqKQ==} @@ -6835,8 +6898,8 @@ packages: '@types/mdast@3.0.15': resolution: {integrity: sha512-LnwD+mUEfxWMa1QpDraczIn6k0Ee3SMicuYSSzS6ZYl2gKS09EClnJYGd8Du6rfc5r/GZEk5o1mRb8TaTj03sQ==} - '@types/mdast@4.0.3': - resolution: {integrity: sha512-LsjtqsyF+d2/yFOYaN22dHZI1Cpwkrj+g06G8+qtUKlhovPW89YhqSnfKtMbkgmEtYpH2gydRNULd6y8mciAFg==} + '@types/mdast@4.0.4': + resolution: {integrity: sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==} '@types/mdx@2.0.13': resolution: {integrity: sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==} @@ -7708,6 +7771,9 @@ packages: asap@2.0.6: resolution: {integrity: sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==} + asn1-ts@8.0.2: + resolution: {integrity: sha512-M9btvRJRhMhPsUFzAfuqkmQPaLLw1KZNl8xtIBpC5fvbAmlpgJcsLKMP/hxKMAUcH52UUTViEQ/cm6/whkYb+Q==} + asn1@0.2.6: resolution: {integrity: sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==} @@ -7741,8 +7807,8 @@ packages: resolution: {integrity: sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==} engines: {node: '>=8'} - astring@1.8.6: - resolution: {integrity: sha512-ISvCdHdlTDlH5IpxQJIex7BWBywFWgjJSVdwst+/iQCoEYnyOaQ95+X1JGshuBjGp6nxKUy1jMgE3zPqN7fQdg==} + astring@1.9.0: + resolution: {integrity: sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg==} hasBin: true async-limiter@1.0.1: @@ -7787,6 +7853,9 @@ packages: aws4@1.12.0: resolution: {integrity: sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg==} + aws4fetch@1.0.20: + resolution: {integrity: sha512-/djoAN709iY65ETD6LKCtyyEI04XIBP5xVvfmNxsEP0uJB5tyaGBztSryRr4HqMStr9R06PisQE7m9zDTXKu6g==} + axe-core@4.9.1: resolution: {integrity: sha512-QbUdXJVTpvUTHU7871ppZkdOLBeGUKBQWHkHrvN2V9IQWGMt61zf3B45BtzjxEJzYuj0JBjBZP/hmYS/R9pmAw==} engines: {node: '>=4'} @@ -8592,8 +8661,8 @@ packages: peerDependencies: cytoscape: ^3.2.0 - cytoscape@3.29.2: - resolution: {integrity: sha512-2G1ycU28Nh7OHT9rkXRLpCDP30MKH1dXJORZuBhtEhEW7pKwgPi77ImqlCWinouyE1PNepIOGZBOrE84DG7LyQ==} + cytoscape@3.30.2: + resolution: {integrity: sha512-oICxQsjW8uSaRmn4UK/jkczKOqTrVqt5/1WL0POiJUT2EKNc9STM4hYFHv917yu55aTBMFNRzymlJhVAiWPCxw==} engines: {node: '>=0.10'} d3-array@2.12.1: @@ -8776,8 +8845,8 @@ packages: resolution: {integrity: sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw==} engines: {node: '>=0.11'} - dayjs@1.11.11: - resolution: {integrity: sha512-okzr3f11N6WuqYtZSvm+F776mB41wRZMhKP+hc34YdW+KmtYYK9iqvHSwo2k9FEH3fhGXvOPV6yz2IcSrfRUDg==} + dayjs@1.11.13: + resolution: {integrity: sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg==} de-indent@1.0.2: resolution: {integrity: sha512-e/1zu3xH5MQryN2zdVaF0OrdNLUbvWxzMbi+iNA6Bky7l1RoP8a2fIbRocyHclXt/arDrrR6lL3TqFD9pMQTsg==} @@ -9049,8 +9118,8 @@ packages: resolution: {integrity: sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==} engines: {node: '>= 4'} - dompurify@3.1.6: - resolution: {integrity: sha512-cTOAhc36AalkjtBpfG6O8JimdTMWNXjiePT2xQH/ppBGi/4uIpmj8eKyIkMJErXWARyINV/sB38yf8JCLF5pbQ==} + dompurify@3.1.7: + resolution: {integrity: sha512-VaTstWtsneJY8xzy7DekmYWEOZcmzIe3Qb3zPd4STve1OBTa+e+WmS1ITQec1fZYXI3HCsOZZiSMpG6oxoWMWQ==} domutils@2.8.0: resolution: {integrity: sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==} @@ -9800,8 +9869,8 @@ packages: resolution: {integrity: sha512-45eNySEs7n692jLN+eHQ6zvC9e1cqu9Dq1PpDHTcWRri2HFEs8is8Anmp1RcIhYxA5TZYD6RuESG2jdj6nkDJQ==} engines: {node: '>=0.4.0'} - focus-visible@5.2.0: - resolution: {integrity: sha512-Rwix9pBtC1Nuy5wysTmKy+UjbDJpIfg8eHjw0rjZ1mX4GNLz1Bmd16uDpI3Gk1i70Fgcs8Csg2lPm8HULFg9DQ==} + focus-visible@5.2.1: + resolution: {integrity: sha512-8Bx950VD1bWTQJEH/AM6SpEk+SU55aVnp4Ujhuuxy3eMEBCRwBnTBnVXr9YAPvZL3/CNjCa8u4IWfNmEO53whA==} follow-redirects@1.15.6: resolution: {integrity: sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==} @@ -10285,8 +10354,8 @@ packages: hast-util-from-html-isomorphic@2.0.0: resolution: {integrity: sha512-zJfpXq44yff2hmE0XmwEOzdWin5xwH+QIhMLOScpX91e/NSGPsAzNCvLQDIEPyO2TXi+lBmU6hjLIhV8MwP2kw==} - hast-util-from-html@2.0.1: - resolution: {integrity: sha512-RXQBLMl9kjKVNkJTIO6bZyb2n+cUH8LFaSSzo82jiLT6Tfc+Pt7VQCS+/h3YwG4jaNE2TA2sdJisGWR+aJrp0g==} + hast-util-from-html@2.0.3: + resolution: {integrity: sha512-CUSRHXyKjzHov8yKsQjGOElXy/3EKpyX56ELnkHH34vDVw1N1XSQ1ZcAvTyAPtGqLTuKP/uxM+aLkSPqF/EtMw==} hast-util-from-parse5@8.0.1: resolution: {integrity: sha512-Er/Iixbc7IEa7r/XLtuG52zoqn/b3Xng/w6aZQ0xGVxzhw5xUFxcRqdPzP6yFi/4HBYRaifaI5fQ1RH8n0ZeOQ==} @@ -10297,8 +10366,8 @@ packages: hast-util-parse-selector@4.0.0: resolution: {integrity: sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==} - hast-util-raw@9.0.3: - resolution: {integrity: sha512-ICWvVOF2fq4+7CMmtCPD5CM4QKjPbHpPotE6+8tDooV0ZuyJVUzHsrNX+O5NaRbieTf0F7FfeBOMAwi6Td0+yQ==} + hast-util-raw@9.0.4: + resolution: {integrity: sha512-LHE65TD2YiNsHD3YuXcKPHXPLuYh/gjp12mOfU8jxSrm1f/yJpsb0F/KKljS6U9LJoP0Ux+tCe8iJ2AsPzTdgA==} hast-util-to-estree@2.3.3: resolution: {integrity: sha512-ihhPIUPxN0v0w6M5+IiAZZrn0LH2uZomeWwhn7uP7avZC6TE7lIiEh2yBMPr5+zi1aUCXq6VoYRgs2Bw9xmycQ==} @@ -11071,8 +11140,8 @@ packages: engines: {node: '>=6'} hasBin: true - jsonc-parser@3.2.1: - resolution: {integrity: sha512-AilxAyFOAcK5wA1+LeaySVBrHsGQvUFCDWXKpZjzaL0PqW+xfBOttn8GNtWKFWqneyMZj41MWF9Kl6iPWLwgOA==} + jsonc-parser@3.3.1: + resolution: {integrity: sha512-HUgH65KyejrUFPvHFPbqOY0rsFip3Bo5wb4ngvdi1EpCYWUQDC5V+Y7mZws+DLkr4M//zQJoanu1SP+87Dv1oQ==} jsonfile@4.0.0: resolution: {integrity: sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==} @@ -11104,8 +11173,8 @@ packages: jws@3.2.2: resolution: {integrity: sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA==} - katex@0.16.10: - resolution: {integrity: sha512-ZiqaC04tp2O5utMsl2TEZTXxa6WSC4yo0fv5ML++D3QZv/vx2Mct0mTlRx3O+uUkjfuAgOkzsCmq5MiUEsDDdA==} + katex@0.16.11: + resolution: {integrity: sha512-RQrI8rlHY92OLf3rho/Ts8i/XvjgguEjOkO1BEXcU3N8BqPpSzBNwV/G0Ukr+P/l3ivvJUE/Fa/CwbS6HesGNQ==} hasBin: true keyv@4.5.3: @@ -11408,8 +11477,8 @@ packages: resolution: {integrity: sha512-a54IwgWPaeBCAAsv13YgmALOF1elABB08FxO9i+r4VFk5Vl4pKokRPeX8u5TCgSsPi6ec1otfLjdOpVcgbpshg==} hasBin: true - markdown-table@3.0.3: - resolution: {integrity: sha512-Z1NL3Tb1M9wH4XESsCDEksWoKTdlUafKc4pt0GRwjUyXaCFZ+dc3g2erqB6zm3szA2IUSi7VnPI+o/9jnxh9hw==} + markdown-table@3.0.4: + resolution: {integrity: sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==} markdown-to-jsx@7.3.2: resolution: {integrity: sha512-B+28F5ucp83aQm+OxNrPkS8z0tMKaeHiy0lHJs3LqCyDQFtWuenaIrkaVTgAm1pf1AU85LXltva86hlaT17i8Q==} @@ -11474,8 +11543,8 @@ packages: mdast-util-to-hast@12.3.0: resolution: {integrity: sha512-pits93r8PhnIoU4Vy9bjW39M2jJ6/tdHyja9rrot9uujkN7UTU9SDnE6WNJz/IGyQk3XHX6yNNtrBH6cQzm8Hw==} - mdast-util-to-hast@13.1.0: - resolution: {integrity: sha512-/e2l/6+OdGp/FB+ctrJ9Avz71AN/GRH3oi/3KAx/kMnoUsD6q0woXlDT8lLEeViVKE7oZxE7RXzvO3T8kF2/sA==} + mdast-util-to-hast@13.2.0: + resolution: {integrity: sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==} mdast-util-to-markdown@1.5.0: resolution: {integrity: sha512-bbv7TPv/WC49thZPg3jXuqzuvI45IL2EVAr/KxF0BSdHsU0ceFHOmwQn6evxAh1GaoK/6GQ1wp4R4oW2+LFL/A==} @@ -11526,8 +11595,8 @@ packages: resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} engines: {node: '>= 8'} - mermaid@10.9.0: - resolution: {integrity: sha512-swZju0hFox/B/qoLKK0rOxxgh8Cf7rJSfAUc1u8fezVihYMvrJAS45GzAxTVf4Q+xn9uMgitBcmWk7nWGXOs/g==} + mermaid@10.9.3: + resolution: {integrity: sha512-V80X1isSEvAewIL3xhmz/rVmc27CVljcsbWxkxlWJWY/1kQa4XOABqpDl2qQLGKzpKm6WbTfUEKImBlUfFYArw==} meros@1.3.0: resolution: {integrity: sha512-2BNGOimxEz5hmjUG2FwoxCt5HN7BXdaWyFqEwxPTrJzVdABtrL4TiHTcsWSFAxPQ/tOnEaQEJh3qWq71QRMY+w==} @@ -11888,8 +11957,8 @@ packages: react: '>=16.x <=18.x' react-dom: '>=16.x <=18.x' - next-seo@6.5.0: - resolution: {integrity: sha512-MfzUeWTN/x/rsKp/1n0213eojO97lIl0unxqbeCY+6pAucViHDA8GSLRRcXpgjsSmBxfCFdfpu7LXbt4ANQoNQ==} + next-seo@6.6.0: + resolution: {integrity: sha512-0VSted/W6XNtgAtH3D+BZrMLLudqfm0D5DYNJRXHcDgan/1ZF1tDFIsWrmvQlYngALyphPfZ3ZdOqlKpKdvG6w==} peerDependencies: next: ^8.1.1-canary.54 || >=9.0.0 react: '>=16.0.0' @@ -13314,8 +13383,8 @@ packages: resolution: {integrity: sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==} hasBin: true - rehype-katex@7.0.0: - resolution: {integrity: sha512-h8FPkGE00r2XKU+/acgqwWUlyzve1IiOKwsEkg4pDL3k48PiE0Pt+/uLtVHDVkN1yA4iurZN6UES8ivHVEQV6Q==} + rehype-katex@7.0.1: + resolution: {integrity: sha512-OiM2wrZ/wuhKkigASodFoo8wimG3H12LWQaH8qSPVJn9apWKFSH3YOCtbKpBorTVw/eI7cuT21XBbvwEswbIOA==} rehype-pretty-code@0.9.11: resolution: {integrity: sha512-Eq90eCYXQJISktfRZ8PPtwc5SUyH6fJcxS8XOMnHPUQZBtC6RYo67gGlley9X2nR8vlniPj0/7oCDEYHKQa/oA==} @@ -13745,8 +13814,8 @@ packages: sonic-boom@3.3.0: resolution: {integrity: sha512-LYxp34KlZ1a2Jb8ZQgFCK3niIHzibdwtwNUWKg0qQRzsDoJ3Gfgkf8KdBTFU3SkejDEIlWwnSnpVdOZIhFMl/g==} - sort-keys@5.0.0: - resolution: {integrity: sha512-Pdz01AvCAottHTPQGzndktFNdbRA75BgOfeT1hH+AMnJFv8lynkPi42rfeEhpx1saTEI3YNMWxfqu0sFD1G8pw==} + sort-keys@5.1.0: + resolution: {integrity: sha512-aSbHV0DaBcr7u0PVHXzM6NbZNAtrr9sF6+Qfs9UUVG7Ll3jQ6hHi8F/xqIIcn2rvIVbr0v/2zyjSdwSV47AgLQ==} engines: {node: '>=12'} source-map-js@1.2.0: @@ -14006,8 +14075,8 @@ packages: babel-plugin-macros: optional: true - stylis@4.3.2: - resolution: {integrity: sha512-bhtUjWd/z6ltJiQwg0dUfxEJ+W+jdqQd8TbWLWyeIJHlnsqmGLRFFd8e5mA0AZi/zx90smXRlN66YMTcaSFifg==} + stylis@4.3.4: + resolution: {integrity: sha512-osIBl6BGUmSfDkyH2mB7EFvCJntXDrLhKjHTRj/rK6xLH0yuPrHULDRQzKokSOD4VoorhtKpfcfW1GAntu8now==} sucrase@3.35.0: resolution: {integrity: sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==} @@ -14840,8 +14909,8 @@ packages: resolution: {integrity: sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw==} engines: {'0': node >=0.6.0} - vfile-location@5.0.2: - resolution: {integrity: sha512-NXPYyxyBSH7zB5U6+3uDdd6Nybz6o6/od9rk8bp9H8GR3L+cm/fC0uUTbqBmUTnMCUDslAGBOIKNfvvb+gGlDg==} + vfile-location@5.0.3: + resolution: {integrity: sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg==} vfile-matter@3.0.1: resolution: {integrity: sha512-CAAIDwnh6ZdtrqAuxdElUqQRQDQgbbIrYtDYI8gCjXS1qQ+1XdLoK8FIZWxJwn0/I+BkSSZpar3SOgjemQz4fg==} @@ -14855,8 +14924,8 @@ packages: vfile@5.3.7: resolution: {integrity: sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==} - vfile@6.0.1: - resolution: {integrity: sha512-1bYqc7pt6NIADBJ98UiG0Bn/CHIVOoZ/IyEkqIruLg0mE1BKzkOXY2D6CSqQIcKqgadppE5lrxgWXJmXd7zZJw==} + vfile@6.0.3: + resolution: {integrity: sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==} vite-node@1.6.0: resolution: {integrity: sha512-de6HJgzC+TFzOu0NTC4RAIsyf/DY/ibWDYQUcuEA84EMHhcefTUGkjFHKKEJhQN4A+6I0u++kr3l36ZF2d7XRw==} @@ -15422,26 +15491,26 @@ snapshots: dependencies: '@amplitude/types': 1.10.2 '@amplitude/utils': 1.10.2 - tslib: 2.6.3 + tslib: 2.7.0 '@amplitude/node@1.10.2': dependencies: '@amplitude/identify': 1.10.2 '@amplitude/types': 1.10.2 '@amplitude/utils': 1.10.2 - tslib: 2.6.3 + tslib: 2.7.0 '@amplitude/plugin-page-view-tracking-browser@0.8.0': dependencies: '@amplitude/analytics-client-common': 0.7.0 '@amplitude/analytics-types': 0.20.0 - tslib: 2.6.0 + tslib: 2.7.0 '@amplitude/plugin-web-attribution-browser@0.7.0': dependencies: '@amplitude/analytics-client-common': 0.7.0 '@amplitude/analytics-types': 0.20.0 - tslib: 2.6.0 + tslib: 2.7.0 '@amplitude/types@1.10.2': {} @@ -15595,7 +15664,7 @@ snapshots: '@babel/core': 7.24.7 '@babel/helper-compilation-targets': 7.24.7 '@babel/helper-plugin-utils': 7.24.7 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) lodash.debounce: 4.0.8 resolve: 1.22.8 semver: 7.6.2 @@ -15607,7 +15676,7 @@ snapshots: '@babel/core': 7.24.7 '@babel/helper-compilation-targets': 7.24.7 '@babel/helper-plugin-utils': 7.24.7 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) lodash.debounce: 4.0.8 resolve: 1.22.8 transitivePeerDependencies: @@ -16660,7 +16729,7 @@ snapshots: '@babel/helper-split-export-declaration': 7.24.7 '@babel/parser': 7.24.7 '@babel/types': 7.24.7 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) globals: 11.12.0 transitivePeerDependencies: - supports-color @@ -17119,7 +17188,7 @@ snapshots: '@devicefarmer/adbkit-monkey': 1.2.1 bluebird: 3.7.2 commander: 9.5.0 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) node-forge: 1.3.1 split: 1.0.1 transitivePeerDependencies: @@ -17559,7 +17628,7 @@ snapshots: '@graphql-tools/utils': 10.0.13(graphql@16.9.0) '@graphql-typed-document-node/core': 3.2.0(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.6.2 + tslib: 2.6.3 transitivePeerDependencies: - encoding - supports-color @@ -17570,7 +17639,7 @@ snapshots: '@graphql-tools/schema': 10.0.2(graphql@16.9.0) '@graphql-tools/utils': 10.0.13(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.6.2 + tslib: 2.6.3 '@graphql-codegen/gql-tag-operations@4.0.4(graphql@16.9.0)': dependencies: @@ -17592,7 +17661,7 @@ snapshots: graphql: 16.9.0 import-from: 4.0.0 lodash: 4.17.21 - tslib: 2.6.2 + tslib: 2.6.3 '@graphql-codegen/plugin-helpers@5.0.4(graphql@16.9.0)': dependencies: @@ -17698,7 +17767,7 @@ snapshots: '@graphql-tools/utils': 10.0.13(graphql@16.9.0) '@whatwg-node/fetch': 0.9.16 graphql: 16.9.0 - tslib: 2.6.2 + tslib: 2.7.0 transitivePeerDependencies: - encoding @@ -17716,7 +17785,7 @@ snapshots: '@graphql-tools/utils': 10.0.13(graphql@16.9.0) globby: 11.1.0 graphql: 16.9.0 - tslib: 2.6.2 + tslib: 2.7.0 unixify: 1.0.0 transitivePeerDependencies: - supports-color @@ -17791,7 +17860,7 @@ snapshots: graphql: 16.9.0 is-glob: 4.0.3 micromatch: 4.0.7 - tslib: 2.6.2 + tslib: 2.7.0 unixify: 1.0.0 transitivePeerDependencies: - supports-color @@ -17804,7 +17873,7 @@ snapshots: '@graphql-tools/utils': 10.0.13(graphql@16.9.0) '@whatwg-node/fetch': 0.9.16 graphql: 16.9.0 - tslib: 2.6.2 + tslib: 2.7.0 value-or-promise: 1.0.12 transitivePeerDependencies: - '@types/node' @@ -17817,7 +17886,7 @@ snapshots: '@graphql-tools/utils': 10.0.13(graphql@16.9.0) globby: 11.1.0 graphql: 16.9.0 - tslib: 2.6.2 + tslib: 2.7.0 unixify: 1.0.0 '@graphql-tools/graphql-tag-pluck@8.2.0(graphql@16.9.0)': @@ -17845,7 +17914,7 @@ snapshots: '@graphql-tools/utils': 10.0.13(graphql@16.9.0) globby: 11.1.0 graphql: 16.9.0 - tslib: 2.6.2 + tslib: 2.7.0 unixify: 1.0.0 '@graphql-tools/load@8.0.1(graphql@16.9.0)': @@ -17854,13 +17923,13 @@ snapshots: '@graphql-tools/utils': 10.0.13(graphql@16.9.0) graphql: 16.9.0 p-limit: 3.1.0 - tslib: 2.6.2 + tslib: 2.7.0 '@graphql-tools/merge@9.0.1(graphql@16.9.0)': dependencies: '@graphql-tools/utils': 10.0.13(graphql@16.9.0) graphql: 16.9.0 - tslib: 2.6.2 + tslib: 2.7.0 '@graphql-tools/optimize@2.0.0(graphql@16.9.0)': dependencies: @@ -17875,7 +17944,7 @@ snapshots: '@types/json-stable-stringify': 1.0.36 '@whatwg-node/fetch': 0.9.16 chalk: 4.1.2 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) dotenv: 16.4.1 graphql: 16.9.0 graphql-request: 6.1.0(graphql@16.9.0) @@ -17886,7 +17955,7 @@ snapshots: json-stable-stringify: 1.1.1 lodash: 4.17.21 scuid: 1.1.0 - tslib: 2.6.2 + tslib: 2.7.0 yaml-ast-parser: 0.0.43 transitivePeerDependencies: - '@types/node' @@ -17936,7 +18005,7 @@ snapshots: '@whatwg-node/fetch': 0.9.16 graphql: 16.9.0 isomorphic-ws: 5.0.0(ws@8.18.0) - tslib: 2.6.2 + tslib: 2.7.0 value-or-promise: 1.0.12 ws: 8.18.0 transitivePeerDependencies: @@ -17951,7 +18020,7 @@ snapshots: cross-inspect: 1.0.0 dset: 3.1.3 graphql: 16.9.0 - tslib: 2.6.2 + tslib: 2.7.0 '@graphql-tools/utils@10.3.1(graphql@16.9.0)': dependencies: @@ -17997,7 +18066,7 @@ snapshots: '@headlessui/react@1.7.19(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@tanstack/react-virtual': 3.5.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@tanstack/react-virtual': 3.10.8(react-dom@18.3.1(react@18.3.1))(react@18.3.1) client-only: 0.0.1 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) @@ -18363,7 +18432,7 @@ snapshots: dependencies: bs58: 6.0.0 - '@mysten/dapp-kit@0.14.25(@tanstack/react-query@5.59.0(react@18.3.1))(@types/react-dom@18.3.0)(@types/react@18.3.3)(immer@9.0.21)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.5.3)': + '@mysten/dapp-kit@0.14.25(@tanstack/react-query@5.59.0(react@18.3.1))(@types/react-dom@18.3.0)(@types/react@18.3.3)(babel-plugin-macros@3.1.0)(immer@9.0.21)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.5.3)': dependencies: '@mysten/sui': 1.12.0(typescript@5.5.3) '@mysten/wallet-standard': 0.13.7(typescript@5.5.3) @@ -18429,52 +18498,64 @@ snapshots: nanostores: 0.10.3 react: 18.3.1 - '@napi-rs/simple-git-android-arm-eabi@0.1.16': + '@napi-rs/simple-git-android-arm-eabi@0.1.19': + optional: true + + '@napi-rs/simple-git-android-arm64@0.1.19': optional: true - '@napi-rs/simple-git-android-arm64@0.1.16': + '@napi-rs/simple-git-darwin-arm64@0.1.19': optional: true - '@napi-rs/simple-git-darwin-arm64@0.1.16': + '@napi-rs/simple-git-darwin-x64@0.1.19': optional: true - '@napi-rs/simple-git-darwin-x64@0.1.16': + '@napi-rs/simple-git-freebsd-x64@0.1.19': optional: true - '@napi-rs/simple-git-linux-arm-gnueabihf@0.1.16': + '@napi-rs/simple-git-linux-arm-gnueabihf@0.1.19': optional: true - '@napi-rs/simple-git-linux-arm64-gnu@0.1.16': + '@napi-rs/simple-git-linux-arm64-gnu@0.1.19': optional: true - '@napi-rs/simple-git-linux-arm64-musl@0.1.16': + '@napi-rs/simple-git-linux-arm64-musl@0.1.19': optional: true - '@napi-rs/simple-git-linux-x64-gnu@0.1.16': + '@napi-rs/simple-git-linux-powerpc64le-gnu@0.1.19': optional: true - '@napi-rs/simple-git-linux-x64-musl@0.1.16': + '@napi-rs/simple-git-linux-s390x-gnu@0.1.19': optional: true - '@napi-rs/simple-git-win32-arm64-msvc@0.1.16': + '@napi-rs/simple-git-linux-x64-gnu@0.1.19': optional: true - '@napi-rs/simple-git-win32-x64-msvc@0.1.16': + '@napi-rs/simple-git-linux-x64-musl@0.1.19': optional: true - '@napi-rs/simple-git@0.1.16': + '@napi-rs/simple-git-win32-arm64-msvc@0.1.19': + optional: true + + '@napi-rs/simple-git-win32-x64-msvc@0.1.19': + optional: true + + '@napi-rs/simple-git@0.1.19': optionalDependencies: - '@napi-rs/simple-git-android-arm-eabi': 0.1.16 - '@napi-rs/simple-git-android-arm64': 0.1.16 - '@napi-rs/simple-git-darwin-arm64': 0.1.16 - '@napi-rs/simple-git-darwin-x64': 0.1.16 - '@napi-rs/simple-git-linux-arm-gnueabihf': 0.1.16 - '@napi-rs/simple-git-linux-arm64-gnu': 0.1.16 - '@napi-rs/simple-git-linux-arm64-musl': 0.1.16 - '@napi-rs/simple-git-linux-x64-gnu': 0.1.16 - '@napi-rs/simple-git-linux-x64-musl': 0.1.16 - '@napi-rs/simple-git-win32-arm64-msvc': 0.1.16 - '@napi-rs/simple-git-win32-x64-msvc': 0.1.16 + '@napi-rs/simple-git-android-arm-eabi': 0.1.19 + '@napi-rs/simple-git-android-arm64': 0.1.19 + '@napi-rs/simple-git-darwin-arm64': 0.1.19 + '@napi-rs/simple-git-darwin-x64': 0.1.19 + '@napi-rs/simple-git-freebsd-x64': 0.1.19 + '@napi-rs/simple-git-linux-arm-gnueabihf': 0.1.19 + '@napi-rs/simple-git-linux-arm64-gnu': 0.1.19 + '@napi-rs/simple-git-linux-arm64-musl': 0.1.19 + '@napi-rs/simple-git-linux-powerpc64le-gnu': 0.1.19 + '@napi-rs/simple-git-linux-s390x-gnu': 0.1.19 + '@napi-rs/simple-git-linux-x64-gnu': 0.1.19 + '@napi-rs/simple-git-linux-x64-musl': 0.1.19 + '@napi-rs/simple-git-win32-arm64-msvc': 0.1.19 + '@napi-rs/simple-git-win32-x64-msvc': 0.1.19 '@ndelangen/get-tarball@3.0.9': dependencies: @@ -18515,8 +18596,14 @@ snapshots: dependencies: '@noble/hashes': 1.4.0 + '@noble/curves@1.6.0': + dependencies: + '@noble/hashes': 1.5.0 + '@noble/hashes@1.4.0': {} + '@noble/hashes@1.5.0': {} + '@nodelib/fs.scandir@2.1.5': dependencies: '@nodelib/fs.stat': 2.0.5 @@ -18552,7 +18639,7 @@ snapshots: dependencies: '@oclif/errors': 1.3.6 '@oclif/parser': 3.8.17 - debug: 4.3.5(supports-color@8.1.1) + debug: 4.3.7(supports-color@8.1.1) globby: 11.1.0 is-wsl: 2.2.0 tslib: 2.7.0 @@ -18566,7 +18653,7 @@ snapshots: debug: 4.3.5(supports-color@8.1.1) globby: 11.1.0 is-wsl: 2.2.0 - tslib: 2.6.3 + tslib: 2.7.0 transitivePeerDependencies: - supports-color @@ -18574,7 +18661,7 @@ snapshots: dependencies: '@oclif/errors': 1.3.6 '@oclif/parser': 3.8.17 - debug: 4.3.5(supports-color@8.1.1) + debug: 4.3.7(supports-color@8.1.1) globby: 11.1.0 is-wsl: 2.2.0 tslib: 2.7.0 @@ -18618,7 +18705,7 @@ snapshots: '@oclif/errors': 1.3.6 '@oclif/linewrap': 1.0.0 chalk: 4.1.2 - tslib: 2.6.3 + tslib: 2.7.0 '@oclif/plugin-autocomplete@0.3.0': dependencies: @@ -18693,69 +18780,73 @@ snapshots: '@open-draft/until@2.1.0': {} - '@parcel/bundler-default@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/bundler-default@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 '@parcel/graph': 3.2.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/rust': 2.12.0 '@parcel/utils': 2.12.0 nullthrows: 1.1.1 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/cache@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/cache@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/core': 2.12.0(@swc/helpers@0.5.5) '@parcel/fs': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/logger': 2.12.0 '@parcel/utils': 2.12.0 lmdb: 2.8.5 + transitivePeerDependencies: + - '@swc/helpers' '@parcel/codeframe@2.12.0': dependencies: chalk: 4.1.2 - '@parcel/compressor-raw@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/compressor-raw@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' '@parcel/config-default@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)(postcss@8.4.39)(relateurl@0.2.7)(terser@5.31.1)(typescript@5.5.3)': dependencies: - '@parcel/bundler-default': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/compressor-raw': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/bundler-default': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/compressor-raw': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/core': 2.12.0(@swc/helpers@0.5.5) - '@parcel/namer-default': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/optimizer-css': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/optimizer-htmlnano': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(postcss@8.4.39)(relateurl@0.2.7)(terser@5.31.1)(typescript@5.5.3) - '@parcel/optimizer-image': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/optimizer-svgo': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/namer-default': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/optimizer-css': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/optimizer-htmlnano': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)(postcss@8.4.39)(relateurl@0.2.7)(terser@5.31.1)(typescript@5.5.3) + '@parcel/optimizer-image': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/optimizer-svgo': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/optimizer-swc': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) - '@parcel/packager-css': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/packager-html': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/packager-js': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/packager-raw': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/packager-svg': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/packager-wasm': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/reporter-dev-server': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/resolver-default': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/runtime-browser-hmr': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/runtime-js': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/runtime-react-refresh': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/runtime-service-worker': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/transformer-babel': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/transformer-css': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/transformer-html': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/transformer-image': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/packager-css': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/packager-html': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/packager-js': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/packager-raw': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/packager-svg': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/packager-wasm': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/reporter-dev-server': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/resolver-default': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/runtime-browser-hmr': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/runtime-js': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/runtime-react-refresh': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/runtime-service-worker': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/transformer-babel': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/transformer-css': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/transformer-html': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/transformer-image': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/transformer-js': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/transformer-json': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/transformer-postcss': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/transformer-posthtml': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/transformer-raw': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/transformer-react-refresh-wrap': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/transformer-svg': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/transformer-json': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/transformer-postcss': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/transformer-posthtml': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/transformer-raw': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/transformer-react-refresh-wrap': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/transformer-svg': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) transitivePeerDependencies: - '@swc/helpers' - cssnano @@ -18770,14 +18861,14 @@ snapshots: '@parcel/core@2.12.0(@swc/helpers@0.5.5)': dependencies: '@mischnic/json-sourcemap': 0.1.1 - '@parcel/cache': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/cache': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/diagnostic': 2.12.0 '@parcel/events': 2.12.0 '@parcel/fs': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/graph': 3.2.0 '@parcel/logger': 2.12.0 '@parcel/package-manager': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/profiler': 2.12.0 '@parcel/rust': 2.12.0 '@parcel/source-map': 2.1.1 @@ -18828,13 +18919,14 @@ snapshots: dependencies: chalk: 4.1.2 - '@parcel/namer-default@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/namer-default@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) nullthrows: 1.1.1 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' '@parcel/node-resolver-core@3.3.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': dependencies: @@ -18848,10 +18940,10 @@ snapshots: transitivePeerDependencies: - '@parcel/core' - '@parcel/optimizer-css@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/optimizer-css@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/source-map': 2.1.1 '@parcel/utils': 2.12.0 browserslist: 4.23.1 @@ -18859,16 +18951,18 @@ snapshots: nullthrows: 1.1.1 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/optimizer-htmlnano@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(postcss@8.4.39)(relateurl@0.2.7)(terser@5.31.1)(typescript@5.5.3)': + '@parcel/optimizer-htmlnano@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)(postcss@8.4.39)(relateurl@0.2.7)(terser@5.31.1)(typescript@5.5.3)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) htmlnano: 2.1.1(postcss@8.4.39)(relateurl@0.2.7)(svgo@2.8.0)(terser@5.31.1)(typescript@5.5.3) nullthrows: 1.1.1 posthtml: 0.16.6 svgo: 2.8.0 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - cssnano - postcss - purgecss @@ -18878,28 +18972,31 @@ snapshots: - typescript - uncss - '@parcel/optimizer-image@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/optimizer-image@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/core': 2.12.0(@swc/helpers@0.5.5) '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/rust': 2.12.0 '@parcel/utils': 2.12.0 '@parcel/workers': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + transitivePeerDependencies: + - '@swc/helpers' - '@parcel/optimizer-svgo@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/optimizer-svgo@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 svgo: 2.8.0 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' '@parcel/optimizer-swc@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/source-map': 2.1.1 '@parcel/utils': 2.12.0 '@swc/core': 1.6.13(@swc/helpers@0.5.5) @@ -18923,31 +19020,33 @@ snapshots: transitivePeerDependencies: - '@swc/helpers' - '@parcel/packager-css@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/packager-css@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/source-map': 2.1.1 '@parcel/utils': 2.12.0 lightningcss: 1.27.0 nullthrows: 1.1.1 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/packager-html@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/packager-html@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/types': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 nullthrows: 1.1.1 posthtml: 0.16.6 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/packager-js@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/packager-js@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/rust': 2.12.0 '@parcel/source-map': 2.1.1 '@parcel/types': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) @@ -18956,33 +19055,38 @@ snapshots: nullthrows: 1.1.1 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/packager-raw@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/packager-raw@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/packager-svg@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/packager-svg@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/types': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 posthtml: 0.16.6 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/packager-wasm@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/packager-wasm@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/plugin@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/plugin@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/types': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' '@parcel/profiler@2.12.0': dependencies: @@ -18990,71 +19094,79 @@ snapshots: '@parcel/events': 2.12.0 chrome-trace-event: 1.0.4 - '@parcel/reporter-cli@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/reporter-cli@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/types': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 chalk: 4.1.2 term-size: 2.2.1 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/reporter-dev-server@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/reporter-dev-server@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/reporter-tracer@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/reporter-tracer@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 chrome-trace-event: 1.0.4 nullthrows: 1.1.1 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/resolver-default@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/resolver-default@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/node-resolver-core': 3.3.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/runtime-browser-hmr@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/runtime-browser-hmr@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/runtime-js@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/runtime-js@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 nullthrows: 1.1.1 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/runtime-react-refresh@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/runtime-react-refresh@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 react-error-overlay: 6.0.9 react-refresh: 0.9.0 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/runtime-service-worker@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/runtime-service-worker@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 nullthrows: 1.1.1 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' '@parcel/rust@2.12.0': {} @@ -19062,10 +19174,10 @@ snapshots: dependencies: detect-libc: 1.0.3 - '@parcel/transformer-babel@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/transformer-babel@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/source-map': 2.1.1 '@parcel/utils': 2.12.0 browserslist: 4.23.1 @@ -19074,11 +19186,12 @@ snapshots: semver: 7.6.2 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/transformer-css@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/transformer-css@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/source-map': 2.1.1 '@parcel/utils': 2.12.0 browserslist: 4.23.1 @@ -19086,11 +19199,12 @@ snapshots: nullthrows: 1.1.1 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/transformer-html@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/transformer-html@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/rust': 2.12.0 nullthrows: 1.1.1 posthtml: 0.16.6 @@ -19100,20 +19214,23 @@ snapshots: srcset: 4.0.0 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/transformer-image@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/transformer-image@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/core': 2.12.0(@swc/helpers@0.5.5) - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 '@parcel/workers': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) nullthrows: 1.1.1 + transitivePeerDependencies: + - '@swc/helpers' '@parcel/transformer-js@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': dependencies: '@parcel/core': 2.12.0(@swc/helpers@0.5.5) '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/rust': 2.12.0 '@parcel/source-map': 2.1.1 '@parcel/utils': 2.12.0 @@ -19124,17 +19241,18 @@ snapshots: regenerator-runtime: 0.13.11 semver: 7.6.2 - '@parcel/transformer-json@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/transformer-json@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) json5: 2.2.3 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/transformer-postcss@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/transformer-postcss@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/rust': 2.12.0 '@parcel/utils': 2.12.0 clone: 2.1.2 @@ -19143,10 +19261,11 @@ snapshots: semver: 7.6.2 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/transformer-posthtml@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/transformer-posthtml@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 nullthrows: 1.1.1 posthtml: 0.16.6 @@ -19155,25 +19274,28 @@ snapshots: semver: 7.6.2 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/transformer-raw@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/transformer-raw@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/transformer-react-refresh-wrap@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/transformer-react-refresh-wrap@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 react-refresh: 0.9.0 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' - '@parcel/transformer-svg@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))': + '@parcel/transformer-svg@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: '@parcel/diagnostic': 2.12.0 - '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/plugin': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/rust': 2.12.0 nullthrows: 1.1.1 posthtml: 0.16.6 @@ -19182,10 +19304,11 @@ snapshots: semver: 7.6.2 transitivePeerDependencies: - '@parcel/core' + - '@swc/helpers' '@parcel/types@2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5)': dependencies: - '@parcel/cache': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/cache': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/diagnostic': 2.12.0 '@parcel/fs': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/package-manager': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) @@ -20461,7 +20584,7 @@ snapshots: '@sentry/core': 7.61.0 '@sentry/types': 7.61.0 '@sentry/utils': 7.61.0 - tslib: 2.6.0 + tslib: 2.7.0 '@sentry/browser@7.59.2': dependencies: @@ -20512,7 +20635,7 @@ snapshots: dependencies: '@sentry/types': 7.61.0 '@sentry/utils': 7.61.0 - tslib: 2.6.0 + tslib: 2.7.0 '@sentry/hub@6.19.7': dependencies: @@ -20579,7 +20702,7 @@ snapshots: '@sentry/utils@7.61.0': dependencies: '@sentry/types': 7.61.0 - tslib: 2.6.0 + tslib: 2.7.0 '@sentry/webpack-plugin@1.20.0': dependencies: @@ -21328,7 +21451,7 @@ snapshots: '@storybook/react-docgen-typescript-plugin@1.0.6--canary.9.0c3f3b7.0(typescript@5.5.3)(webpack@5.92.1(@swc/core@1.6.13(@swc/helpers@0.5.5))(webpack-cli@5.1.4(webpack@5.92.1)))': dependencies: - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) endent: 2.1.0 find-cache-dir: 3.3.2 flat-cache: 3.2.0 @@ -21694,13 +21817,13 @@ snapshots: '@tanstack/query-core': 5.59.0 react: 18.3.1 - '@tanstack/react-virtual@3.5.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + '@tanstack/react-virtual@3.10.8(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: - '@tanstack/virtual-core': 3.5.0 + '@tanstack/virtual-core': 3.10.8 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - '@tanstack/virtual-core@3.5.0': {} + '@tanstack/virtual-core@3.10.8': {} '@testing-library/dom@10.3.1': dependencies: @@ -21742,7 +21865,7 @@ snapshots: '@theguild/remark-mermaid@0.0.5(react@18.3.1)': dependencies: - mermaid: 10.9.0 + mermaid: 10.9.3 react: 18.3.1 unist-util-visit: 5.0.0 transitivePeerDependencies: @@ -21948,7 +22071,7 @@ snapshots: dependencies: '@types/unist': 2.0.10 - '@types/mdast@4.0.3': + '@types/mdast@4.0.4': dependencies: '@types/unist': 3.0.2 @@ -22093,7 +22216,7 @@ snapshots: '@typescript-eslint/scope-manager': 5.62.0 '@typescript-eslint/type-utils': 5.62.0(eslint@8.45.0)(typescript@5.5.3) '@typescript-eslint/utils': 5.62.0(eslint@8.45.0)(typescript@5.5.3) - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) eslint: 8.45.0 graphemer: 1.4.0 ignore: 5.3.1 @@ -22157,7 +22280,7 @@ snapshots: '@typescript-eslint/scope-manager': 5.62.0 '@typescript-eslint/types': 5.62.0 '@typescript-eslint/typescript-estree': 5.62.0(typescript@5.5.3) - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) eslint: 8.45.0 optionalDependencies: typescript: 5.5.3 @@ -22219,7 +22342,7 @@ snapshots: dependencies: '@typescript-eslint/typescript-estree': 5.62.0(typescript@5.5.3) '@typescript-eslint/utils': 5.62.0(eslint@8.45.0)(typescript@5.5.3) - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) eslint: 8.45.0 tsutils: 3.21.0(typescript@5.5.3) optionalDependencies: @@ -22243,7 +22366,7 @@ snapshots: dependencies: '@typescript-eslint/typescript-estree': 7.16.0(typescript@5.5.3) '@typescript-eslint/utils': 7.16.0(eslint@9.6.0)(typescript@5.5.3) - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) eslint: 9.6.0 ts-api-utils: 1.3.0(typescript@5.5.3) optionalDependencies: @@ -22265,7 +22388,7 @@ snapshots: dependencies: '@typescript-eslint/types': 5.33.1 '@typescript-eslint/visitor-keys': 5.33.1 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) globby: 11.1.0 is-glob: 4.0.3 semver: 7.6.2 @@ -22279,7 +22402,7 @@ snapshots: dependencies: '@typescript-eslint/types': 5.62.0 '@typescript-eslint/visitor-keys': 5.62.0 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) globby: 11.1.0 is-glob: 4.0.3 semver: 7.6.2 @@ -22322,7 +22445,7 @@ snapshots: dependencies: '@typescript-eslint/types': 8.0.0-alpha.30 '@typescript-eslint/visitor-keys': 8.0.0-alpha.30 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) globby: 11.1.0 is-glob: 4.0.3 minimatch: 9.0.5 @@ -22851,19 +22974,19 @@ snapshots: agent-base@6.0.2: dependencies: - debug: 4.3.5(supports-color@8.1.1) + debug: 4.3.7(supports-color@8.1.1) transitivePeerDependencies: - supports-color agent-base@7.1.0: dependencies: - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) transitivePeerDependencies: - supports-color agent-base@7.1.1: dependencies: - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) transitivePeerDependencies: - supports-color @@ -22978,11 +23101,11 @@ snapshots: aria-hidden@1.2.3: dependencies: - tslib: 2.6.0 + tslib: 2.7.0 aria-hidden@1.2.4: dependencies: - tslib: 2.6.3 + tslib: 2.7.0 aria-query@4.2.2: dependencies: @@ -23079,6 +23202,10 @@ snapshots: asap@2.0.6: {} + asn1-ts@8.0.2: + dependencies: + tslib: 2.7.0 + asn1@0.2.6: dependencies: safer-buffer: 2.1.2 @@ -23113,7 +23240,7 @@ snapshots: astral-regex@2.0.0: {} - astring@1.8.6: {} + astring@1.9.0: {} async-limiter@1.0.1: {} @@ -23147,6 +23274,8 @@ snapshots: aws4@1.12.0: {} + aws4fetch@1.0.20: {} + axe-core@4.9.1: {} axios@0.26.1: @@ -24113,12 +24242,12 @@ snapshots: csstype@3.1.3: {} - cytoscape-cose-bilkent@4.1.0(cytoscape@3.29.2): + cytoscape-cose-bilkent@4.1.0(cytoscape@3.30.2): dependencies: cose-base: 1.0.3 - cytoscape: 3.29.2 + cytoscape: 3.30.2 - cytoscape@3.29.2: {} + cytoscape@3.30.2: {} d3-array@2.12.1: dependencies: @@ -24331,7 +24460,7 @@ snapshots: dependencies: '@babel/runtime': 7.24.7 - dayjs@1.11.11: {} + dayjs@1.11.13: {} de-indent@1.0.2: {} @@ -24359,9 +24488,11 @@ snapshots: optionalDependencies: supports-color: 8.1.1 - debug@4.3.7: + debug@4.3.7(supports-color@8.1.1): dependencies: ms: 2.1.3 + optionalDependencies: + supports-color: 8.1.1 decamelize@1.2.0: {} @@ -24482,7 +24613,7 @@ snapshots: detect-port@1.5.1: dependencies: address: 1.2.2 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) transitivePeerDependencies: - supports-color @@ -24550,7 +24681,7 @@ snapshots: dependencies: domelementtype: 2.3.0 - dompurify@3.1.6: {} + dompurify@3.1.7: {} domutils@2.8.0: dependencies: @@ -24819,14 +24950,14 @@ snapshots: esbuild-register@3.4.2(esbuild@0.18.20): dependencies: - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) esbuild: 0.18.20 transitivePeerDependencies: - supports-color esbuild-register@3.5.0(esbuild@0.18.20): dependencies: - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) esbuild: 0.18.20 transitivePeerDependencies: - supports-color @@ -25227,7 +25358,7 @@ snapshots: ajv: 6.12.6 chalk: 4.1.2 cross-spawn: 7.0.3 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) doctrine: 3.0.0 escape-string-regexp: 4.0.0 eslint-scope: 7.2.2 @@ -25388,7 +25519,7 @@ snapshots: estree-util-to-js@1.2.0: dependencies: '@types/estree-jsx': 1.0.5 - astring: 1.8.6 + astring: 1.9.0 source-map: 0.7.4 estree-util-value-to-estree@1.3.0: @@ -25708,7 +25839,7 @@ snapshots: flow-parser@0.212.0: {} - focus-visible@5.2.0: {} + focus-visible@5.2.1: {} follow-redirects@1.15.6: {} @@ -26175,12 +26306,12 @@ snapshots: graphql-tag@2.12.6(graphql@15.9.0): dependencies: graphql: 15.9.0 - tslib: 2.6.0 + tslib: 2.7.0 graphql-tag@2.12.6(graphql@16.9.0): dependencies: graphql: 16.9.0 - tslib: 2.6.0 + tslib: 2.7.0 graphql-ws@5.14.3(graphql@16.9.0): dependencies: @@ -26278,7 +26409,7 @@ snapshots: hash-obj@4.0.0: dependencies: is-obj: 3.0.0 - sort-keys: 5.0.0 + sort-keys: 5.1.0 type-fest: 1.4.0 hasown@2.0.2: @@ -26295,16 +26426,16 @@ snapshots: dependencies: '@types/hast': 3.0.4 hast-util-from-dom: 5.0.0 - hast-util-from-html: 2.0.1 + hast-util-from-html: 2.0.3 unist-util-remove-position: 5.0.0 - hast-util-from-html@2.0.1: + hast-util-from-html@2.0.3: dependencies: '@types/hast': 3.0.4 devlop: 1.1.0 hast-util-from-parse5: 8.0.1 parse5: 7.1.2 - vfile: 6.0.1 + vfile: 6.0.3 vfile-message: 4.0.2 hast-util-from-parse5@8.0.1: @@ -26314,8 +26445,8 @@ snapshots: devlop: 1.1.0 hastscript: 8.0.0 property-information: 6.5.0 - vfile: 6.0.1 - vfile-location: 5.0.2 + vfile: 6.0.3 + vfile-location: 5.0.3 web-namespaces: 2.0.1 hast-util-is-element@3.0.0: @@ -26326,7 +26457,7 @@ snapshots: dependencies: '@types/hast': 3.0.4 - hast-util-raw@9.0.3: + hast-util-raw@9.0.4: dependencies: '@types/hast': 3.0.4 '@types/unist': 3.0.2 @@ -26334,11 +26465,11 @@ snapshots: hast-util-from-parse5: 8.0.1 hast-util-to-parse5: 8.0.0 html-void-elements: 3.0.0 - mdast-util-to-hast: 13.1.0 + mdast-util-to-hast: 13.2.0 parse5: 7.1.2 unist-util-position: 5.0.0 unist-util-visit: 5.0.0 - vfile: 6.0.1 + vfile: 6.0.3 web-namespaces: 2.0.1 zwitch: 2.0.4 @@ -26475,7 +26606,7 @@ snapshots: http-call@5.3.0: dependencies: content-type: 1.0.5 - debug: 4.3.5(supports-color@8.1.1) + debug: 4.3.7(supports-color@8.1.1) is-retry-allowed: 1.2.0 is-stream: 2.0.1 parse-json: 4.0.0 @@ -26494,14 +26625,14 @@ snapshots: http-proxy-agent@7.0.0: dependencies: agent-base: 7.1.0 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) transitivePeerDependencies: - supports-color http-proxy-agent@7.0.2: dependencies: agent-base: 7.1.1 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) transitivePeerDependencies: - supports-color @@ -26519,7 +26650,7 @@ snapshots: https-proxy-agent@4.0.0: dependencies: agent-base: 5.1.1 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) transitivePeerDependencies: - supports-color @@ -26533,14 +26664,14 @@ snapshots: https-proxy-agent@7.0.2: dependencies: agent-base: 7.1.0 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) transitivePeerDependencies: - supports-color https-proxy-agent@7.0.5: dependencies: agent-base: 7.1.1 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) transitivePeerDependencies: - supports-color @@ -27138,7 +27269,7 @@ snapshots: json5@2.2.3: {} - jsonc-parser@3.2.1: {} + jsonc-parser@3.3.1: {} jsonfile@4.0.0: optionalDependencies: @@ -27189,7 +27320,7 @@ snapshots: jwa: 1.4.1 safe-buffer: 5.2.1 - katex@0.16.10: + katex@0.16.11: dependencies: commander: 8.3.0 @@ -27483,7 +27614,7 @@ snapshots: punycode.js: 2.3.1 uc.micro: 2.1.0 - markdown-table@3.0.3: {} + markdown-table@3.0.4: {} markdown-to-jsx@7.3.2(react@18.3.1): dependencies: @@ -27551,7 +27682,7 @@ snapshots: mdast-util-gfm-table@1.0.7: dependencies: '@types/mdast': 3.0.15 - markdown-table: 3.0.3 + markdown-table: 3.0.4 mdast-util-from-markdown: 1.3.1 mdast-util-to-markdown: 1.5.0 transitivePeerDependencies: @@ -27643,17 +27774,17 @@ snapshots: unist-util-position: 4.0.4 unist-util-visit: 4.1.2 - mdast-util-to-hast@13.1.0: + mdast-util-to-hast@13.2.0: dependencies: '@types/hast': 3.0.4 - '@types/mdast': 4.0.3 + '@types/mdast': 4.0.4 '@ungap/structured-clone': 1.2.0 devlop: 1.1.0 micromark-util-sanitize-uri: 2.0.0 trim-lines: 3.0.1 unist-util-position: 5.0.0 unist-util-visit: 5.0.0 - vfile: 6.0.1 + vfile: 6.0.3 mdast-util-to-markdown@1.5.0: dependencies: @@ -27706,25 +27837,25 @@ snapshots: merge2@1.4.1: {} - mermaid@10.9.0: + mermaid@10.9.3: dependencies: '@braintree/sanitize-url': 6.0.4 '@types/d3-scale': 4.0.8 '@types/d3-scale-chromatic': 3.0.3 - cytoscape: 3.29.2 - cytoscape-cose-bilkent: 4.1.0(cytoscape@3.29.2) + cytoscape: 3.30.2 + cytoscape-cose-bilkent: 4.1.0(cytoscape@3.30.2) d3: 7.9.0 d3-sankey: 0.12.3 dagre-d3-es: 7.0.10 - dayjs: 1.11.11 - dompurify: 3.1.6 + dayjs: 1.11.13 + dompurify: 3.1.7 elkjs: 0.9.3 - katex: 0.16.10 + katex: 0.16.11 khroma: 2.1.0 lodash-es: 4.17.21 mdast-util-from-markdown: 1.3.1 non-layered-tidy-tree-layout: 2.0.2 - stylis: 4.3.2 + stylis: 4.3.4 ts-dedent: 2.2.0 uuid: 9.0.1 web-worker: 1.3.0 @@ -27817,7 +27948,7 @@ snapshots: micromark-extension-math@2.1.2: dependencies: '@types/katex': 0.16.7 - katex: 0.16.10 + katex: 0.16.11 micromark-factory-space: 1.1.0 micromark-util-character: 1.2.0 micromark-util-symbol: 1.1.0 @@ -28009,7 +28140,7 @@ snapshots: micromark@3.2.0: dependencies: '@types/debug': 4.1.12 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) decode-named-character-reference: 1.0.2 micromark-core-commonmark: 1.1.0 micromark-factory-space: 1.1.0 @@ -28235,7 +28366,7 @@ snapshots: transitivePeerDependencies: - supports-color - next-seo@6.5.0(next@14.2.13(@babel/core@7.24.7)(@playwright/test@1.45.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.77.6))(react-dom@18.3.1(react@18.3.1))(react@18.3.1): + next-seo@6.6.0(next@14.2.13(@babel/core@7.24.7)(@playwright/test@1.45.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.77.6))(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: next: 14.2.13(@babel/core@7.24.7)(@playwright/test@1.45.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.77.6) react: 18.3.1 @@ -28281,12 +28412,12 @@ snapshots: clsx: 2.1.1 escape-string-regexp: 5.0.0 flexsearch: 0.7.43 - focus-visible: 5.2.0 + focus-visible: 5.2.1 git-url-parse: 13.1.1 intersection-observer: 0.12.2 match-sorter: 6.3.4 next: 14.2.13(@babel/core@7.24.7)(@playwright/test@1.45.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.77.6) - next-seo: 6.5.0(next@14.2.13(@babel/core@7.24.7)(@playwright/test@1.45.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.77.6))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + next-seo: 6.6.0(next@14.2.13(@babel/core@7.24.7)(@playwright/test@1.45.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.77.6))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) next-themes: 0.2.1(next@14.2.13(@babel/core@7.24.7)(@playwright/test@1.45.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.77.6))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) nextra: 2.13.4(next@14.2.13(@babel/core@7.24.7)(@playwright/test@1.45.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.77.6))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) react: 18.3.1 @@ -28299,21 +28430,21 @@ snapshots: '@headlessui/react': 1.7.19(react-dom@18.3.1(react@18.3.1))(react@18.3.1) '@mdx-js/mdx': 2.3.0 '@mdx-js/react': 2.3.0(react@18.3.1) - '@napi-rs/simple-git': 0.1.16 + '@napi-rs/simple-git': 0.1.19 '@theguild/remark-mermaid': 0.0.5(react@18.3.1) '@theguild/remark-npm2yarn': 0.2.1 clsx: 2.1.1 github-slugger: 2.0.0 graceful-fs: 4.2.11 gray-matter: 4.0.3 - katex: 0.16.10 + katex: 0.16.11 lodash.get: 4.4.2 next: 14.2.13(@babel/core@7.24.7)(@playwright/test@1.45.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.77.6) next-mdx-remote: 4.4.1(react-dom@18.3.1(react@18.3.1))(react@18.3.1) p-limit: 3.1.0 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - rehype-katex: 7.0.0 + rehype-katex: 7.0.1 rehype-pretty-code: 0.9.11(shiki@0.14.7) rehype-raw: 7.0.0 remark-gfm: 3.0.1 @@ -28680,9 +28811,9 @@ snapshots: '@parcel/fs': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/logger': 2.12.0 '@parcel/package-manager': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) - '@parcel/reporter-cli': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/reporter-dev-server': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) - '@parcel/reporter-tracer': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5)) + '@parcel/reporter-cli': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/reporter-dev-server': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) + '@parcel/reporter-tracer': 2.12.0(@parcel/core@2.12.0(@swc/helpers@0.5.5))(@swc/helpers@0.5.5) '@parcel/utils': 2.12.0 chalk: 4.1.2 commander: 7.2.0 @@ -29381,7 +29512,7 @@ snapshots: puppeteer-core@2.1.1: dependencies: '@types/mime-types': 2.1.1 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) extract-zip: 1.7.0 https-proxy-agent: 4.0.0 mime: 2.6.0 @@ -29553,7 +29684,7 @@ snapshots: dependencies: react: 18.3.1 react-style-singleton: 2.2.1(@types/react@18.3.3)(react@18.3.1) - tslib: 2.6.3 + tslib: 2.7.0 optionalDependencies: '@types/react': 18.3.3 @@ -29570,7 +29701,7 @@ snapshots: react: 18.3.1 react-remove-scroll-bar: 2.3.6(@types/react@18.3.3)(react@18.3.1) react-style-singleton: 2.2.1(@types/react@18.3.3)(react@18.3.1) - tslib: 2.6.3 + tslib: 2.7.0 use-callback-ref: 1.3.0(@types/react@18.3.3)(react@18.3.1) use-sidecar: 1.1.2(@types/react@18.3.3)(react@18.3.1) optionalDependencies: @@ -29581,7 +29712,7 @@ snapshots: react: 18.3.1 react-remove-scroll-bar: 2.3.6(@types/react@18.3.3)(react@18.3.1) react-style-singleton: 2.2.1(@types/react@18.3.3)(react@18.3.1) - tslib: 2.6.3 + tslib: 2.7.0 use-callback-ref: 1.3.2(@types/react@18.3.3)(react@18.3.1) use-sidecar: 1.1.2(@types/react@18.3.3)(react@18.3.1) optionalDependencies: @@ -29775,15 +29906,15 @@ snapshots: dependencies: jsesc: 0.5.0 - rehype-katex@7.0.0: + rehype-katex@7.0.1: dependencies: '@types/hast': 3.0.4 '@types/katex': 0.16.7 hast-util-from-html-isomorphic: 2.0.0 hast-util-to-text: 4.0.2 - katex: 0.16.10 + katex: 0.16.11 unist-util-visit-parents: 6.0.1 - vfile: 6.0.1 + vfile: 6.0.3 rehype-pretty-code@0.9.11(shiki@0.14.7): dependencies: @@ -29795,8 +29926,8 @@ snapshots: rehype-raw@7.0.0: dependencies: '@types/hast': 3.0.4 - hast-util-raw: 9.0.3 - vfile: 6.0.1 + hast-util-raw: 9.0.4 + vfile: 6.0.3 relateurl@0.2.7: {} @@ -30256,7 +30387,7 @@ snapshots: shiki@0.14.7: dependencies: ansi-sequence-parser: 1.1.1 - jsonc-parser: 3.2.1 + jsonc-parser: 3.3.1 vscode-oniguruma: 1.7.0 vscode-textmate: 8.0.0 @@ -30336,7 +30467,7 @@ snapshots: dependencies: atomic-sleep: 1.0.0 - sort-keys@5.0.0: + sort-keys@5.1.0: dependencies: is-plain-obj: 4.1.0 @@ -30602,7 +30733,7 @@ snapshots: optionalDependencies: '@babel/core': 7.24.7 - stylis@4.3.2: {} + stylis@4.3.4: {} sucrase@3.35.0: dependencies: @@ -31500,10 +31631,10 @@ snapshots: core-util-is: 1.0.2 extsprintf: 1.3.0 - vfile-location@5.0.2: + vfile-location@5.0.3: dependencies: '@types/unist': 3.0.2 - vfile: 6.0.1 + vfile: 6.0.3 vfile-matter@3.0.1: dependencies: @@ -31528,16 +31659,15 @@ snapshots: unist-util-stringify-position: 3.0.3 vfile-message: 3.1.4 - vfile@6.0.1: + vfile@6.0.3: dependencies: '@types/unist': 3.0.2 - unist-util-stringify-position: 4.0.0 vfile-message: 4.0.2 vite-node@1.6.0(@types/node@20.14.10)(lightningcss@1.27.0)(sass@1.77.6)(terser@5.31.1): dependencies: cac: 6.7.14 - debug: 4.3.7 + debug: 4.3.7(supports-color@8.1.1) pathe: 1.1.2 picocolors: 1.0.1 vite: 5.3.3(@types/node@20.14.10)(lightningcss@1.27.0)(sass@1.77.6)(terser@5.31.1) @@ -31554,7 +31684,7 @@ snapshots: vite-node@2.0.1(@types/node@20.14.10)(lightningcss@1.27.0)(sass@1.77.6)(terser@5.31.1): dependencies: cac: 6.7.14 - debug: 4.3.5(supports-color@8.1.1) + debug: 4.3.7(supports-color@8.1.1) pathe: 1.1.2 picocolors: 1.0.1 vite: 5.3.3(@types/node@20.14.10)(lightningcss@1.27.0)(sass@1.77.6)(terser@5.31.1) @@ -31600,7 +31730,7 @@ snapshots: '@vitest/spy': 2.0.1 '@vitest/utils': 2.0.1 chai: 5.1.1 - debug: 4.3.5(supports-color@8.1.1) + debug: 4.3.7(supports-color@8.1.1) execa: 8.0.1 magic-string: 0.30.10 pathe: 1.1.2 diff --git a/pnpm-workspace.yaml b/pnpm-workspace.yaml index 908c7ee00ae94..9bb3667fe64c2 100644 --- a/pnpm-workspace.yaml +++ b/pnpm-workspace.yaml @@ -26,3 +26,4 @@ packages: - '!sdk/typescript/keypairs/secp256r1' - '!sdk/typescript/graphql/schemas/2024.1' - '!sdk/typescript/graphql/schemas/2024.4' + - '!sdk/kms/aws' diff --git a/sdk/build-scripts/src/utils/buildPackage.ts b/sdk/build-scripts/src/utils/buildPackage.ts index fec294a1d1176..2e578dc729aa4 100755 --- a/sdk/build-scripts/src/utils/buildPackage.ts +++ b/sdk/build-scripts/src/utils/buildPackage.ts @@ -60,6 +60,7 @@ async function buildCJS( entryPoints, outdir: 'dist/cjs', sourcemap: true, + outbase: 'src', ...buildOptions, }); await buildTypes('tsconfig.json'); @@ -90,6 +91,7 @@ async function buildESM( target: 'es2020', entryPoints, outdir: 'dist/esm', + outbase: 'src', sourcemap: true, ...buildOptions, diff --git a/sdk/create-dapp/CHANGELOG.md b/sdk/create-dapp/CHANGELOG.md index c3362f697c758..f3358068f6cfb 100644 --- a/sdk/create-dapp/CHANGELOG.md +++ b/sdk/create-dapp/CHANGELOG.md @@ -1,5 +1,13 @@ # @mysten/create-dapp +## 0.3.28 + +### Patch Changes + +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 + - @mysten/dapp-kit@0.14.28 + ## 0.3.27 ### Patch Changes diff --git a/sdk/create-dapp/package.json b/sdk/create-dapp/package.json index 7737c2db3379a..353c4f2e4b7bc 100644 --- a/sdk/create-dapp/package.json +++ b/sdk/create-dapp/package.json @@ -3,7 +3,7 @@ "author": "Mysten Labs ", "description": "A CLI for creating new Sui dApps", "homepage": "https://sdk.mystenlabs.com", - "version": "0.3.27", + "version": "0.3.28", "license": "Apache-2.0", "files": [ "CHANGELOG.md", diff --git a/sdk/dapp-kit/CHANGELOG.md b/sdk/dapp-kit/CHANGELOG.md index ca385c4a3fc06..9fcd1d3708f6c 100644 --- a/sdk/dapp-kit/CHANGELOG.md +++ b/sdk/dapp-kit/CHANGELOG.md @@ -1,5 +1,14 @@ # @mysten/dapp-kit +## 0.14.28 + +### Patch Changes + +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 + - @mysten/wallet-standard@0.13.9 + - @mysten/zksend@0.11.9 + ## 0.14.27 ### Patch Changes diff --git a/sdk/dapp-kit/package.json b/sdk/dapp-kit/package.json index d9481732ea330..86327f7f2e732 100644 --- a/sdk/dapp-kit/package.json +++ b/sdk/dapp-kit/package.json @@ -3,7 +3,7 @@ "author": "Mysten Labs ", "description": "A collection of React hooks and components for interacting with the Sui blockchain and wallets.", "homepage": "https://sdk.mystenlabs.com/typescript", - "version": "0.14.27", + "version": "0.14.28", "license": "Apache-2.0", "files": [ "CHANGELOG.md", diff --git a/sdk/deepbook-v3/CHANGELOG.md b/sdk/deepbook-v3/CHANGELOG.md index 3aee7f84ea116..50b3bdf8fcc33 100644 --- a/sdk/deepbook-v3/CHANGELOG.md +++ b/sdk/deepbook-v3/CHANGELOG.md @@ -1,5 +1,12 @@ # @mysten/deepbook-v3 +## 0.12.2 + +### Patch Changes + +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 + ## 0.12.1 ### Patch Changes diff --git a/sdk/deepbook-v3/package.json b/sdk/deepbook-v3/package.json index d4002e7ac6e76..99054fc7780a1 100644 --- a/sdk/deepbook-v3/package.json +++ b/sdk/deepbook-v3/package.json @@ -2,7 +2,7 @@ "name": "@mysten/deepbook-v3", "author": "Mysten Labs ", "description": "Sui Deepbook SDK", - "version": "0.12.1", + "version": "0.12.2", "license": "Apache-2.0", "type": "commonjs", "main": "./dist/cjs/index.js", diff --git a/sdk/deepbook/CHANGELOG.md b/sdk/deepbook/CHANGELOG.md index 73608ddc7d768..a900fd1e38c4c 100644 --- a/sdk/deepbook/CHANGELOG.md +++ b/sdk/deepbook/CHANGELOG.md @@ -1,5 +1,12 @@ # @mysten/deepbook +## 0.8.23 + +### Patch Changes + +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 + ## 0.8.22 ### Patch Changes diff --git a/sdk/deepbook/package.json b/sdk/deepbook/package.json index 711d7b93b1fbf..10d4847acdfe0 100644 --- a/sdk/deepbook/package.json +++ b/sdk/deepbook/package.json @@ -2,7 +2,7 @@ "name": "@mysten/deepbook", "author": "Mysten Labs ", "description": "Sui Deepbook SDK", - "version": "0.8.22", + "version": "0.8.23", "license": "Apache-2.0", "type": "commonjs", "main": "./dist/cjs/index.js", diff --git a/sdk/enoki/CHANGELOG.md b/sdk/enoki/CHANGELOG.md index b4176c6e2d489..6bffef310266c 100644 --- a/sdk/enoki/CHANGELOG.md +++ b/sdk/enoki/CHANGELOG.md @@ -1,5 +1,13 @@ # @mysten/enoki +## 0.4.7 + +### Patch Changes + +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 + - @mysten/zklogin@0.7.24 + ## 0.4.6 ### Patch Changes diff --git a/sdk/enoki/package.json b/sdk/enoki/package.json index 5a72fedfc8f35..d6d4eb4c7e1a4 100644 --- a/sdk/enoki/package.json +++ b/sdk/enoki/package.json @@ -1,6 +1,6 @@ { "name": "@mysten/enoki", - "version": "0.4.6", + "version": "0.4.7", "description": "TODO: Description", "license": "Apache-2.0", "author": "Mysten Labs ", diff --git a/sdk/graphql-transport/CHANGELOG.md b/sdk/graphql-transport/CHANGELOG.md index d1746220f232a..f6d5e13a6d984 100644 --- a/sdk/graphql-transport/CHANGELOG.md +++ b/sdk/graphql-transport/CHANGELOG.md @@ -1,5 +1,12 @@ # @mysten/graphql-transport +## 0.2.25 + +### Patch Changes + +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 + ## 0.2.24 ### Patch Changes diff --git a/sdk/graphql-transport/package.json b/sdk/graphql-transport/package.json index 481d20b2dc86e..1518ad56194ce 100644 --- a/sdk/graphql-transport/package.json +++ b/sdk/graphql-transport/package.json @@ -1,6 +1,6 @@ { "name": "@mysten/graphql-transport", - "version": "0.2.24", + "version": "0.2.25", "description": "A GraphQL transport to allow SuiClient to work with RPC 2.0", "license": "Apache-2.0", "author": "Mysten Labs ", diff --git a/sdk/kiosk/CHANGELOG.md b/sdk/kiosk/CHANGELOG.md index 975703c2a7aa6..24d4204792caa 100644 --- a/sdk/kiosk/CHANGELOG.md +++ b/sdk/kiosk/CHANGELOG.md @@ -1,5 +1,13 @@ # @mysten/kiosk +## 0.9.23 + +### Patch Changes + +- 4166d71: Fix doc comment on `getKiosk` command +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 + ## 0.9.22 ### Patch Changes diff --git a/sdk/kiosk/package.json b/sdk/kiosk/package.json index 98e442b40e885..b9246f27aa1ed 100644 --- a/sdk/kiosk/package.json +++ b/sdk/kiosk/package.json @@ -2,7 +2,7 @@ "name": "@mysten/kiosk", "author": "Mysten Labs ", "description": "Sui Kiosk library", - "version": "0.9.22", + "version": "0.9.23", "license": "Apache-2.0", "type": "commonjs", "main": "./dist/cjs/index.js", diff --git a/sdk/kiosk/src/client/kiosk-client.ts b/sdk/kiosk/src/client/kiosk-client.ts index 23fe8312896af..0c740e05524a7 100644 --- a/sdk/kiosk/src/client/kiosk-client.ts +++ b/sdk/kiosk/src/client/kiosk-client.ts @@ -77,8 +77,8 @@ export class KioskClient { /** * Fetches the kiosk contents. - * @param kioskId The ID of the kiosk to fetch. - * @param options Optioal + * @param id The ID of the kiosk to fetch. + * @param options Optional to control the fetch behavior. * @returns */ async getKiosk({ id, options }: { id: string; options?: FetchKioskOptions }): Promise { diff --git a/sdk/kms/.env.example b/sdk/kms/.env.example new file mode 100644 index 0000000000000..f369a86efb003 --- /dev/null +++ b/sdk/kms/.env.example @@ -0,0 +1,4 @@ +export AWS_ACCESS_KEY_ID="" +export AWS_SECRET_ACCESS_KEY="" +export AWS_REGION="" +export AWS_KMS_KEY_ID="" diff --git a/sdk/kms/CHANGELOG.md b/sdk/kms/CHANGELOG.md new file mode 100644 index 0000000000000..dbbe361c4ce9c --- /dev/null +++ b/sdk/kms/CHANGELOG.md @@ -0,0 +1,15 @@ +# @mysten/kms + +## 0.0.3 + +### Patch Changes + +- 02c9e46: Fix exports on the bundled package + +## 0.0.2 + +### Patch Changes + +- b3f3925: Introduces @mysten/kms which initially exposes a Sui AWS KMS signer +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 diff --git a/sdk/kms/README.md b/sdk/kms/README.md new file mode 100644 index 0000000000000..ec6fda431acff --- /dev/null +++ b/sdk/kms/README.md @@ -0,0 +1,75 @@ +# Sui KMS Signers + +The Sui KMS Signers package provides a set of tools for securely signing transactions using Key +Management Services (KMS) like AWS KMS. + +## Table of Contents + +- [AWS KMS Signer](#aws-kms-signer) + - [Usage](#usage) + - [API](#api) + - [fromKeyId](#fromkeyid) + - [Parameters](#parameters) + - [Examples](#examples) + +## AWS KMS Signer + +The AWS KMS Signer allows you to leverage AWS's Key Management Service to sign Sui transactions. + +### Usage + +```typescript +import { AwsKmsSigner } from '@mysten/kms/aws'; + +const prepareSigner = async () => { + const { AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION, AWS_KMS_KEY_ID } = process.env; + + return AwsKmsSigner.fromKeyId(AWS_KMS_KEY_ID, { + region: AWS_REGION, + accessKeyId: AWS_ACCESS_KEY_ID, + secretAccessKey: AWS_SECRET_ACCESS_KEY, + }); +}; +``` + +### API + +#### fromKeyId + +Create an AWS KMS signer from AWS Key ID and AWS credentials. This method initializes the signer +with the necessary AWS credentials and region information, allowing it to interact with AWS KMS to +perform cryptographic operations. + +##### Parameters + +- `keyId` + **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** + The AWS KMS key ID. +- `options` + **[object](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object)** An + object containing AWS credentials and region. + - `region` + **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** + The AWS region. + - `accessKeyId` + **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** + The AWS access key ID. + - `secretAccessKey` + **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** + The AWS secret access key. + +##### Examples + +```typescript +const signer = await AwsKmsSigner.fromKeyId('your-kms-key-id', { + region: 'us-west-2', + accessKeyId: 'your-access-key-id', + secretAccessKey: 'your-secret-access-key', +}); +``` + +Returns +**[Promise](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise)<[AwsKmsSigner](./src/aws/aws-kms-signer.ts)>** +An instance of AwsKmsSigner. + +**Notice**: AWS Signer requires Node >=20 due to dependency on `crypto` diff --git a/sdk/kms/aws/package.json b/sdk/kms/aws/package.json new file mode 100644 index 0000000000000..31e61fb81abe7 --- /dev/null +++ b/sdk/kms/aws/package.json @@ -0,0 +1,6 @@ +{ + "private": true, + "import": "../dist/esm/aws/index.js", + "main": "../dist/cjs/aws/index.js", + "sideEffects": false +} diff --git a/sdk/kms/package.json b/sdk/kms/package.json new file mode 100644 index 0000000000000..07e19974dd924 --- /dev/null +++ b/sdk/kms/package.json @@ -0,0 +1,59 @@ +{ + "name": "@mysten/kms", + "version": "0.0.3", + "description": "A collection of KMS signers for various cloud providers", + "license": "Apache-2.0", + "author": "Mysten Labs ", + "type": "commonjs", + "exports": { + "./aws": { + "import": "./dist/esm/aws/index.js", + "require": "./dist/cjs/aws/index.js" + } + }, + "sideEffects": false, + "files": [ + "CHANGELOG.md", + "LICENSE", + "README.md", + "aws", + "dist", + "src" + ], + "scripts": { + "clean": "rm -rf tsconfig.tsbuildinfo ./dist", + "build": "build-package", + "prepublishOnly": "pnpm build", + "prettier:check": "prettier -c --ignore-unknown .", + "prettier:fix": "prettier -w --ignore-unknown .", + "eslint:check": "eslint --max-warnings=0 .", + "eslint:fix": "pnpm run eslint:check --fix", + "lint": "pnpm run eslint:check && pnpm run prettier:check", + "lint:fix": "pnpm run eslint:fix && pnpm run prettier:fix", + "test": "vitest" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/mystenlabs/sui.git" + }, + "bugs": { + "url": "https://github.com/mystenlabs/sui/issues" + }, + "homepage": "https://github.com/mystenlabs/sui#readme", + "devDependencies": { + "@mysten/build-scripts": "workspace:*", + "@types/node": "^20.14.10", + "typescript": "^5.5.3", + "vitest": "^2.0.1" + }, + "dependencies": { + "@mysten/sui": "workspace:*", + "@noble/curves": "^1.4.2", + "@noble/hashes": "^1.4.0", + "asn1-ts": "^8.0.2", + "aws4fetch": "^1.0.20" + }, + "engines": { + "node": ">=20" + } +} diff --git a/sdk/kms/src/aws/aws-client.ts b/sdk/kms/src/aws/aws-client.ts new file mode 100644 index 0000000000000..9da87d090582d --- /dev/null +++ b/sdk/kms/src/aws/aws-client.ts @@ -0,0 +1,131 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +import { Secp256k1PublicKey } from '@mysten/sui/keypairs/secp256k1'; +import { Secp256r1PublicKey } from '@mysten/sui/keypairs/secp256r1'; +import { fromBase64 } from '@mysten/sui/utils'; +import { ASN1Construction, ASN1TagClass, DERElement } from 'asn1-ts'; +import { AwsClient } from 'aws4fetch'; + +import { compressPublicKeyClamped } from './utils.js'; + +interface KmsCommands { + Sign: { + request: { + KeyId: string; + Message: string; + MessageType: 'RAW' | 'DIGEST'; + SigningAlgorithm: 'ECDSA_SHA_256'; + }; + response: { + KeyId: string; + KeyOrigin: string; + Signature: string; + SigningAlgorithm: string; + }; + }; + GetPublicKey: { + request: { KeyId: string }; + response: { + CustomerMasterKeySpec: string; + KeyId: string; + KeyOrigin: string; + KeySpec: string; + KeyUsage: string; + PublicKey: string; + SigningAlgorithms: string[]; + }; + }; +} + +export interface AwsClientOptions extends Partial[0]> {} + +export class AwsKmsClient extends AwsClient { + constructor(options: AwsClientOptions = {}) { + if (!options.accessKeyId || !options.secretAccessKey) { + throw new Error('AWS Access Key ID and Secret Access Key are required'); + } + + if (!options.region) { + throw new Error('Region is required'); + } + + super({ + region: options.region, + accessKeyId: options.accessKeyId, + secretAccessKey: options.secretAccessKey, + service: 'kms', + ...options, + }); + } + + async getPublicKey(keyId: string) { + const publicKeyResponse = await this.runCommand('GetPublicKey', { KeyId: keyId }); + + if (!publicKeyResponse.PublicKey) { + throw new Error('Public Key not found for the supplied `keyId`'); + } + + const publicKey = fromBase64(publicKeyResponse.PublicKey); + + const encodedData: Uint8Array = publicKey; + const derElement = new DERElement(); + derElement.fromBytes(encodedData); + + // Validate the ASN.1 structure of the public key + if ( + !( + derElement.tagClass === ASN1TagClass.universal && + derElement.construction === ASN1Construction.constructed + ) + ) { + throw new Error('Unexpected ASN.1 structure'); + } + + const components = derElement.components; + const publicKeyElement = components[1]; + + if (!publicKeyElement) { + throw new Error('Public Key not found in the DER structure'); + } + + const compressedKey = compressPublicKeyClamped(publicKeyElement.bitString); + + switch (publicKeyResponse.KeySpec) { + case 'ECC_NIST_P256': + return new Secp256r1PublicKey(compressedKey); + case 'ECC_SECG_P256K1': + return new Secp256k1PublicKey(compressedKey); + default: + throw new Error('Unsupported key spec: ' + publicKeyResponse.KeySpec); + } + } + + async runCommand( + command: T, + body: KmsCommands[T]['request'], + { + region = this.region!, + }: { + region?: string; + } = {}, + ): Promise { + if (!region) { + throw new Error('Region is required'); + } + + const res = await this.fetch(`https://kms.${region}.amazonaws.com/`, { + headers: { + 'Content-Type': 'application/x-amz-json-1.1', + 'X-Amz-Target': `TrentService.${command}`, + }, + body: JSON.stringify(body), + }); + + if (!res.ok) { + throw new Error(await res.text()); + } + + return res.json(); + } +} diff --git a/sdk/kms/src/aws/aws-kms-signer.ts b/sdk/kms/src/aws/aws-kms-signer.ts new file mode 100644 index 0000000000000..8480bee7861c2 --- /dev/null +++ b/sdk/kms/src/aws/aws-kms-signer.ts @@ -0,0 +1,147 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +import type { PublicKey, SignatureFlag } from '@mysten/sui/cryptography'; +import { SIGNATURE_FLAG_TO_SCHEME, Signer } from '@mysten/sui/cryptography'; +import { fromBase64, toBase64 } from '@mysten/sui/utils'; +import { secp256r1 } from '@noble/curves/p256'; +import { secp256k1 } from '@noble/curves/secp256k1'; +import { DERElement } from 'asn1-ts'; + +import type { AwsClientOptions } from './aws-client.js'; +import { AwsKmsClient } from './aws-client.js'; + +/** + * Configuration options for initializing the AwsKmsSigner. + */ +export interface AwsKmsSignerOptions { + /** AWS KMS Key ID used for signing */ + kmsKeyId: string; + /** Options for setting up the AWS KMS client */ + client: AwsKmsClient; + /** Public key */ + publicKey: PublicKey; +} + +/** + * Aws KMS Signer integrates AWS Key Management Service (KMS) with the Sui blockchain + * to provide signing capabilities using AWS-managed cryptographic keys. + */ +export class AwsKmsSigner extends Signer { + #publicKey: PublicKey; + /** AWS KMS client instance */ + #client: AwsKmsClient; + /** AWS KMS Key ID used for signing */ + #kmsKeyId: string; + + /** + * Creates an instance of AwsKmsSigner. It's expected to call the static `fromKeyId` method to create an instance. + * For example: + * ``` + * const signer = await AwsKmsSigner.fromKeyId(keyId, options); + * ``` + * @throws Will throw an error if required AWS credentials or region are not provided. + */ + constructor({ kmsKeyId, client, publicKey }: AwsKmsSignerOptions) { + super(); + if (!kmsKeyId) throw new Error('KMS Key ID is required'); + + this.#client = client; + this.#kmsKeyId = kmsKeyId; + this.#publicKey = publicKey; + } + + /** + * Retrieves the key scheme used by this signer. + * @returns AWS supports only Secp256k1 and Secp256r1 schemes. + */ + getKeyScheme() { + return SIGNATURE_FLAG_TO_SCHEME[this.#publicKey.flag() as SignatureFlag]; + } + + /** + * Retrieves the public key associated with this signer. + * @returns The Secp256k1PublicKey instance. + * @throws Will throw an error if the public key has not been initialized. + */ + getPublicKey() { + return this.#publicKey; + } + + /** + * Signs the given data using AWS KMS. + * @param bytes - The data to be signed as a Uint8Array. + * @returns A promise that resolves to the signature as a Uint8Array. + * @throws Will throw an error if the public key is not initialized or if signing fails. + */ + async sign(bytes: Uint8Array): Promise { + const signResponse = await this.#client.runCommand('Sign', { + KeyId: this.#kmsKeyId, + Message: toBase64(bytes), + MessageType: 'RAW', + SigningAlgorithm: 'ECDSA_SHA_256', + }); + + // Concatenate the signature components into a compact form + return this.#getConcatenatedSignature(fromBase64(signResponse.Signature)); + } + + /** + * Synchronous signing is not supported by AWS KMS. + * @throws Always throws an error indicating synchronous signing is unsupported. + */ + signData(): never { + throw new Error('KMS Signer does not support sync signing'); + } + + /** + * Generates a concatenated signature from a DER-encoded signature. + * + * This signature format is consumable by Sui's `toSerializedSignature` method. + * + * @param signature - A `Uint8Array` representing the DER-encoded signature. + * @returns A `Uint8Array` containing the concatenated signature in compact form. + * + * @throws {Error} If the input signature is invalid or cannot be processed. + */ + #getConcatenatedSignature(signature: Uint8Array): Uint8Array { + if (!signature || signature.length === 0) { + throw new Error('Invalid signature'); + } + + // Initialize a DERElement to parse the DER-encoded signature + const derElement = new DERElement(); + derElement.fromBytes(signature); + + const [r, s] = derElement.toJSON() as [string, string]; + + switch (this.getKeyScheme()) { + case 'Secp256k1': + return new secp256k1.Signature(BigInt(r), BigInt(s)).normalizeS().toCompactRawBytes(); + case 'Secp256r1': + return new secp256r1.Signature(BigInt(r), BigInt(s)).normalizeS().toCompactRawBytes(); + } + + // Create a Secp256k1Signature using the extracted r and s values + const secp256k1Signature = new secp256k1.Signature(BigInt(r), BigInt(s)); + + // Normalize the signature and convert it to compact raw bytes + return secp256k1Signature.normalizeS().toCompactRawBytes(); + } + + /** + * Prepares the signer by fetching and setting the public key from AWS KMS. + * It is recommended to initialize an `AwsKmsSigner` instance using this function. + * @returns A promise that resolves once a `AwsKmsSigner` instance is prepared (public key is set). + */ + static async fromKeyId(keyId: string, options: AwsClientOptions) { + const client = new AwsKmsClient(options); + + const pubKey = await client.getPublicKey(keyId); + + return new AwsKmsSigner({ + kmsKeyId: keyId, + client, + publicKey: pubKey, + }); + } +} diff --git a/sdk/kms/src/aws/index.ts b/sdk/kms/src/aws/index.ts new file mode 100644 index 0000000000000..8902adb226c14 --- /dev/null +++ b/sdk/kms/src/aws/index.ts @@ -0,0 +1,9 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +import type { AwsClientOptions } from './aws-client.js'; +import type { AwsKmsSignerOptions } from './aws-kms-signer.js'; +import { AwsKmsSigner } from './aws-kms-signer.js'; + +export { AwsKmsSigner }; + +export type { AwsKmsSignerOptions, AwsClientOptions }; diff --git a/sdk/kms/src/aws/utils.ts b/sdk/kms/src/aws/utils.ts new file mode 100644 index 0000000000000..43a2afcc7e0b0 --- /dev/null +++ b/sdk/kms/src/aws/utils.ts @@ -0,0 +1,65 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +/** The total number of bits in the DER bit string for the uncompressed public key. */ +export const DER_BIT_STRING_LENGTH = 520; + +/** The total number of bytes corresponding to the DER bit string length. */ +export const DER_BYTES_LENGTH = DER_BIT_STRING_LENGTH / 8; + +// Reference Specifications: +// https://datatracker.ietf.org/doc/html/rfc5480#section-2.2 +// https://www.secg.org/sec1-v2.pdf + +/** + * Converts an array of bits into a byte array. + * + * @param bitsArray - A `Uint8ClampedArray` representing the bits to convert. + * @returns A `Uint8Array` containing the corresponding bytes. + * + * @throws {Error} If the input array does not have the expected length. + */ +function bitsToBytes(bitsArray: Uint8ClampedArray): Uint8Array { + const bytes = new Uint8Array(DER_BYTES_LENGTH); + for (let i = 0; i < DER_BIT_STRING_LENGTH; i++) { + if (bitsArray[i] === 1) { + bytes[Math.floor(i / 8)] |= 1 << (7 - (i % 8)); + } + } + return bytes; +} + +/** + * Compresses an uncompressed public key into its compressed form. + * + * The uncompressed key must follow the DER bit string format as specified in [RFC 5480](https://datatracker.ietf.org/doc/html/rfc5480#section-2.2) + * and [SEC 1: Elliptic Curve Cryptography](https://www.secg.org/sec1-v2.pdf). + * + * @param uncompressedKey - A `Uint8ClampedArray` representing the uncompressed public key bits. + * @returns A `Uint8Array` containing the compressed public key. + * + * @throws {Error} If the uncompressed key has an unexpected length or does not start with the expected prefix. + */ +export function compressPublicKeyClamped(uncompressedKey: Uint8ClampedArray): Uint8Array { + if (uncompressedKey.length !== DER_BIT_STRING_LENGTH) { + throw new Error('Unexpected length for an uncompressed public key'); + } + + // Convert bits to bytes + const uncompressedBytes = bitsToBytes(uncompressedKey); + + // Ensure the public key starts with the standard uncompressed prefix 0x04 + if (uncompressedBytes[0] !== 0x04) { + throw new Error('Public key does not start with 0x04'); + } + + // Extract X-Coordinate (skip the first byte, which is the prefix 0x04) + const xCoord = uncompressedBytes.slice(1, 33); + + // Determine parity byte for Y coordinate based on the last byte + const yCoordLastByte = uncompressedBytes[64]; + const parityByte = yCoordLastByte % 2 === 0 ? 0x02 : 0x03; + + // Return the compressed public key consisting of the parity byte and X-coordinate + return new Uint8Array([parityByte, ...xCoord]); +} diff --git a/sdk/kms/tests/e2e-aws-kms.test.ts b/sdk/kms/tests/e2e-aws-kms.test.ts new file mode 100644 index 0000000000000..5eddbd57120df --- /dev/null +++ b/sdk/kms/tests/e2e-aws-kms.test.ts @@ -0,0 +1,46 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 +import { beforeAll, describe, expect, it } from 'vitest'; + +import { AwsKmsSigner } from '../src/aws/aws-kms-signer'; + +const { E2E_AWS_KMS_TEST_ENABLE } = process.env; + +describe.runIf(E2E_AWS_KMS_TEST_ENABLE)('Aws KMS signer E2E testing', () => { + let signer: AwsKmsSigner; + beforeAll(async () => { + const { AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION, AWS_KMS_KEY_ID } = process.env; + + if (!AWS_ACCESS_KEY_ID || !AWS_SECRET_ACCESS_KEY || !AWS_REGION || !AWS_KMS_KEY_ID) { + throw new Error('Missing one or more required environment variables.'); + } + + signer = await AwsKmsSigner.fromKeyId(AWS_KMS_KEY_ID, { + region: AWS_REGION, + accessKeyId: AWS_ACCESS_KEY_ID, + secretAccessKey: AWS_SECRET_ACCESS_KEY, + }); + }); + + it('should retrieve the correct sui address', async () => { + // Get the public key + const publicKey = signer.getPublicKey(); + expect(publicKey.toSuiAddress()).toEqual( + '0x2bfc782b6bf66f305fdeb19a203386efee3e62bce3ceb9d3d53eafbe0b14a035', + ); + }); + + it('should sign a message and verify against pubkey', async () => { + // Define a test message + const testMessage = 'Hello, AWS KMS Signer!'; + const messageBytes = new TextEncoder().encode(testMessage); + + // Sign the test message + const { signature } = await signer.signPersonalMessage(messageBytes); + + // verify signature against pubkey + const publicKey = signer.getPublicKey(); + const isValid = await publicKey.verifyPersonalMessage(messageBytes, signature); + expect(isValid).toBe(true); + }); +}); diff --git a/sdk/kms/tsconfig.esm.json b/sdk/kms/tsconfig.esm.json new file mode 100644 index 0000000000000..5048bdf8ffc62 --- /dev/null +++ b/sdk/kms/tsconfig.esm.json @@ -0,0 +1,7 @@ +{ + "extends": "./tsconfig.json", + "compilerOptions": { + "module": "ESNext", + "outDir": "dist/esm" + } +} diff --git a/sdk/kms/tsconfig.json b/sdk/kms/tsconfig.json new file mode 100644 index 0000000000000..b4ed52cfe8a5e --- /dev/null +++ b/sdk/kms/tsconfig.json @@ -0,0 +1,11 @@ +{ + "extends": "../build-scripts/tsconfig.shared.json", + "include": ["src"], + "compilerOptions": { + "module": "CommonJS", + "outDir": "dist/cjs", + "isolatedModules": true, + "rootDir": "src" + }, + "references": [{ "path": "../typescript" }] +} diff --git a/sdk/kms/vitest.config.ts b/sdk/kms/vitest.config.ts new file mode 100644 index 0000000000000..07452fe1219c2 --- /dev/null +++ b/sdk/kms/vitest.config.ts @@ -0,0 +1,19 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +import { defineConfig } from 'vitest/config'; + +export default defineConfig({ + test: { + minWorkers: 1, + maxWorkers: 4, + hookTimeout: 1000000, + testTimeout: 1000000, + env: { + NODE_ENV: 'test', + }, + }, + resolve: { + alias: {}, + }, +}); diff --git a/sdk/suins-toolkit/CHANGELOG.md b/sdk/suins-toolkit/CHANGELOG.md index 62172a2cb5e31..e4e196552a66b 100644 --- a/sdk/suins-toolkit/CHANGELOG.md +++ b/sdk/suins-toolkit/CHANGELOG.md @@ -1,5 +1,12 @@ # @mysten/suins-toolkit +## 0.5.23 + +### Patch Changes + +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 + ## 0.5.22 ### Patch Changes diff --git a/sdk/suins-toolkit/package.json b/sdk/suins-toolkit/package.json index 60d0d8024ee98..6474d6ba6b32e 100644 --- a/sdk/suins-toolkit/package.json +++ b/sdk/suins-toolkit/package.json @@ -2,7 +2,7 @@ "name": "@mysten/suins-toolkit", "author": "Mysten Labs ", "description": "SuiNS TypeScript SDK", - "version": "0.5.22", + "version": "0.5.23", "license": "Apache-2.0", "type": "commonjs", "main": "./dist/cjs/index.js", diff --git a/sdk/typescript/CHANGELOG.md b/sdk/typescript/CHANGELOG.md index cc7a666518210..0dfddaa1fa2b5 100644 --- a/sdk/typescript/CHANGELOG.md +++ b/sdk/typescript/CHANGELOG.md @@ -1,5 +1,12 @@ # @mysten/sui.js +## 1.14.0 + +### Minor Changes + +- c24814b: Adds a custom header; 'Client-Request-Method' which will contain the method name used in + each outgoing jsonrpc request + ## 1.13.0 ### Minor Changes diff --git a/sdk/typescript/package.json b/sdk/typescript/package.json index 27dcf7f82960f..ee6af72ea0b2b 100644 --- a/sdk/typescript/package.json +++ b/sdk/typescript/package.json @@ -3,7 +3,7 @@ "author": "Mysten Labs ", "description": "Sui TypeScript API(Work in Progress)", "homepage": "https://sdk.mystenlabs.com", - "version": "1.13.0", + "version": "1.14.0", "license": "Apache-2.0", "sideEffects": false, "files": [ diff --git a/sdk/typescript/src/client/http-transport.ts b/sdk/typescript/src/client/http-transport.ts index 54309646ea1a4..b690316dbcfbe 100644 --- a/sdk/typescript/src/client/http-transport.ts +++ b/sdk/typescript/src/client/http-transport.ts @@ -95,6 +95,7 @@ export class SuiHTTPTransport implements SuiTransport { 'Client-Sdk-Type': 'typescript', 'Client-Sdk-Version': PACKAGE_VERSION, 'Client-Target-Api-Version': TARGETED_RPC_VERSION, + 'Client-Request-Method': input.method, ...this.#options.rpc?.headers, }, body: JSON.stringify({ diff --git a/sdk/typescript/src/version.ts b/sdk/typescript/src/version.ts index 204307cfc05e4..0181845015457 100644 --- a/sdk/typescript/src/version.ts +++ b/sdk/typescript/src/version.ts @@ -3,5 +3,5 @@ // This file is generated by genversion.mjs. Do not edit it directly. -export const PACKAGE_VERSION = '1.13.0'; -export const TARGETED_RPC_VERSION = '1.36.0'; +export const PACKAGE_VERSION = '1.14.0'; +export const TARGETED_RPC_VERSION = '1.37.0'; diff --git a/sdk/typescript/test/unit/client/http-transport.test.ts b/sdk/typescript/test/unit/client/http-transport.test.ts index 654a2756af5dd..cd3e54692be70 100644 --- a/sdk/typescript/test/unit/client/http-transport.test.ts +++ b/sdk/typescript/test/unit/client/http-transport.test.ts @@ -62,6 +62,7 @@ describe('SuiHTTPTransport', () => { 'Client-Sdk-Type': 'typescript', 'Client-Sdk-Version': PACKAGE_VERSION, 'Client-Target-Api-Version': TARGETED_RPC_VERSION, + 'Client-Request-Method': 'getAllBalances', }, method: 'POST', }); diff --git a/sdk/wallet-standard/CHANGELOG.md b/sdk/wallet-standard/CHANGELOG.md index 0e4aea6b9f07f..e80ab97c666ab 100644 --- a/sdk/wallet-standard/CHANGELOG.md +++ b/sdk/wallet-standard/CHANGELOG.md @@ -1,5 +1,12 @@ # @mysten/wallet-standard +## 0.13.9 + +### Patch Changes + +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 + ## 0.13.8 ### Patch Changes diff --git a/sdk/wallet-standard/package.json b/sdk/wallet-standard/package.json index f3dfcb12cc5b1..3e22c3d53a556 100644 --- a/sdk/wallet-standard/package.json +++ b/sdk/wallet-standard/package.json @@ -1,6 +1,6 @@ { "name": "@mysten/wallet-standard", - "version": "0.13.8", + "version": "0.13.9", "description": "A suite of standard utilities for implementing wallets based on the Wallet Standard.", "license": "Apache-2.0", "author": "Mysten Labs ", diff --git a/sdk/zklogin/CHANGELOG.md b/sdk/zklogin/CHANGELOG.md index 94e9b9c9fb2fe..32a37002033fa 100644 --- a/sdk/zklogin/CHANGELOG.md +++ b/sdk/zklogin/CHANGELOG.md @@ -1,5 +1,12 @@ # @mysten/zklogin +## 0.7.24 + +### Patch Changes + +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 + ## 0.7.23 ### Patch Changes diff --git a/sdk/zklogin/package.json b/sdk/zklogin/package.json index 381fb1eef207e..f7729c21718ff 100644 --- a/sdk/zklogin/package.json +++ b/sdk/zklogin/package.json @@ -1,6 +1,6 @@ { "name": "@mysten/zklogin", - "version": "0.7.23", + "version": "0.7.24", "description": "Utilities for interacting with zkLogin in Sui", "license": "Apache-2.0", "author": "Mysten Labs ", diff --git a/sdk/zksend/CHANGELOG.md b/sdk/zksend/CHANGELOG.md index 678a1a2129fde..301e41a6e75ee 100644 --- a/sdk/zksend/CHANGELOG.md +++ b/sdk/zksend/CHANGELOG.md @@ -1,5 +1,13 @@ # @mysten/zksend +## 0.11.9 + +### Patch Changes + +- Updated dependencies [c24814b] + - @mysten/sui@1.14.0 + - @mysten/wallet-standard@0.13.9 + ## 0.11.8 ### Patch Changes diff --git a/sdk/zksend/package.json b/sdk/zksend/package.json index c7becb82c936f..e4c5e88609d2d 100644 --- a/sdk/zksend/package.json +++ b/sdk/zksend/package.json @@ -1,6 +1,6 @@ { "name": "@mysten/zksend", - "version": "0.11.8", + "version": "0.11.9", "description": "TODO: Write Description", "license": "Apache-2.0", "author": "Mysten Labs ", diff --git a/sui-execution/Cargo.toml b/sui-execution/Cargo.toml index 6da2eba7cbf57..785a0e47929f2 100644 --- a/sui-execution/Cargo.toml +++ b/sui-execution/Cargo.toml @@ -51,16 +51,16 @@ petgraph = "0.5.1" [features] default = [] -gas-profiler = [ - "sui-adapter-latest/gas-profiler", - "sui-adapter-v0/gas-profiler", - "sui-adapter-v1/gas-profiler", - "sui-adapter-v2/gas-profiler", -# "sui-adapter-$CUT/gas-profiler", - "move-vm-runtime-v0/gas-profiler", - "move-vm-runtime-v1/gas-profiler", - "move-vm-runtime-latest/gas-profiler", - "move-vm-runtime-v2/gas-profiler", -# "move-vm-runtime-$CUT/gas-profiler", - "move-vm-config/gas-profiler", +tracing = [ + "sui-adapter-latest/tracing", + "sui-adapter-v0/tracing", + "sui-adapter-v1/tracing", + "sui-adapter-v2/tracing", +# "sui-adapter-$CUT/tracing", + "move-vm-runtime-v0/tracing", + "move-vm-runtime-v1/tracing", + "move-vm-runtime-latest/tracing", + "move-vm-runtime-v2/tracing", +# "move-vm-runtime-$CUT/tracing", + "move-vm-config/tracing", ] diff --git a/sui-execution/latest/sui-adapter/Cargo.toml b/sui-execution/latest/sui-adapter/Cargo.toml index 89507f02d64d3..5da6ec1bd45be 100644 --- a/sui-execution/latest/sui-adapter/Cargo.toml +++ b/sui-execution/latest/sui-adapter/Cargo.toml @@ -40,9 +40,9 @@ parking_lot.workspace = true move-package.workspace = true [features] -gas-profiler = [ - "sui-types/gas-profiler", - "move-vm-runtime/gas-profiler", - "move-vm-profiler/gas-profiler", - "move-vm-config/gas-profiler", +tracing = [ + "sui-types/tracing", + "move-vm-runtime/tracing", + "move-vm-profiler/tracing", + "move-vm-config/tracing", ] diff --git a/sui-execution/latest/sui-adapter/src/adapter.rs b/sui-execution/latest/sui-adapter/src/adapter.rs index c1856234639c7..f7fca14dcf6a2 100644 --- a/sui-execution/latest/sui-adapter/src/adapter.rs +++ b/sui-execution/latest/sui-adapter/src/adapter.rs @@ -4,7 +4,7 @@ pub use checked::*; #[sui_macros::with_checked_arithmetic] mod checked { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] use move_vm_config::runtime::VMProfilerConfig; use std::path::PathBuf; use std::{collections::BTreeMap, sync::Arc}; @@ -44,9 +44,9 @@ mod checked { protocol_config: &ProtocolConfig, _enable_profiler: Option, ) -> Result { - #[cfg(not(feature = "gas-profiler"))] + #[cfg(not(feature = "tracing"))] let vm_profiler_config = None; - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] let vm_profiler_config = _enable_profiler.clone().map(|path| VMProfilerConfig { full_path: path, track_bytecode_instructions: false, diff --git a/sui-execution/latest/sui-adapter/src/programmable_transactions/context.rs b/sui-execution/latest/sui-adapter/src/programmable_transactions/context.rs index 0b8d483463a45..e41b3c09f7ea4 100644 --- a/sui-execution/latest/sui-adapter/src/programmable_transactions/context.rs +++ b/sui-execution/latest/sui-adapter/src/programmable_transactions/context.rs @@ -194,7 +194,7 @@ mod checked { // Set the profiler if in CLI #[skip_checked_arithmetic] - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; use move_vm_types::gas::GasMeter; diff --git a/sui-execution/latest/sui-adapter/src/programmable_transactions/execution.rs b/sui-execution/latest/sui-adapter/src/programmable_transactions/execution.rs index 23a29dbc747a5..a43078c1cd283 100644 --- a/sui-execution/latest/sui-adapter/src/programmable_transactions/execution.rs +++ b/sui-execution/latest/sui-adapter/src/programmable_transactions/execution.rs @@ -642,10 +642,10 @@ mod checked { )]) } - fn check_compatibility<'a>( + fn check_compatibility( context: &ExecutionContext, existing_package: &MovePackage, - upgrading_modules: impl IntoIterator, + upgrading_modules: &[CompiledModule], policy: u8, ) -> Result<(), ExecutionError> { // Make sure this is a known upgrade policy. @@ -662,7 +662,26 @@ mod checked { invariant_violation!("Tried to normalize modules in existing package but failed") }; - let mut new_normalized = normalize_deserialized_modules(upgrading_modules.into_iter()); + let existing_modules_len = current_normalized.len(); + let upgrading_modules_len = upgrading_modules.len(); + let disallow_new_modules = context + .protocol_config + .disallow_new_modules_in_deps_only_packages() + && policy as u8 == UpgradePolicy::DEP_ONLY; + + if disallow_new_modules && existing_modules_len != upgrading_modules_len { + return Err(ExecutionError::new_with_source( + ExecutionErrorKind::PackageUpgradeError { + upgrade_error: PackageUpgradeError::IncompatibleUpgrade, + }, + format!( + "Existing package has {existing_modules_len} modules, but new package has \ + {upgrading_modules_len}. Adding or removing a module to a deps only package is not allowed." + ), + )); + } + + let mut new_normalized = normalize_deserialized_modules(upgrading_modules.iter()); for (name, cur_module) in current_normalized { let Some(new_module) = new_normalized.remove(&name) else { return Err(ExecutionError::new_with_source( @@ -676,6 +695,9 @@ mod checked { check_module_compatibility(&policy, &cur_module, &new_module)?; } + // If we disallow new modules double check that there are no modules left in `new_normalized`. + debug_assert!(!disallow_new_modules || new_normalized.is_empty()); + Ok(()) } diff --git a/sui-execution/latest/sui-move-natives/src/crypto/group_ops.rs b/sui-execution/latest/sui-move-natives/src/crypto/group_ops.rs index 93535741f7567..c94c3ed052fb5 100644 --- a/sui-execution/latest/sui-move-natives/src/crypto/group_ops.rs +++ b/sui-execution/latest/sui-move-natives/src/crypto/group_ops.rs @@ -42,6 +42,14 @@ fn is_msm_supported(context: &NativeContext) -> bool { .enable_group_ops_native_function_msm() } +fn is_uncompressed_g1_supported(context: &NativeContext) -> bool { + context + .extensions() + .get::() + .protocol_config + .uncompressed_g1_group_elements() +} + // Gas related structs and functions. #[derive(Clone)] @@ -86,6 +94,14 @@ pub struct GroupOpsCostParams { pub bls12381_msm_max_len: Option, // costs for decode, pairing, and encode output pub bls12381_pairing_cost: Option, + // costs for conversion to and from uncompressed form + pub bls12381_g1_to_uncompressed_g1_cost: Option, + pub bls12381_uncompressed_g1_to_g1_cost: Option, + // costs for sum of elements uncompressed form + pub bls12381_uncompressed_g1_sum_base_cost: Option, + pub bls12381_uncompressed_g1_sum_cost_per_term: Option, + // limit the number of terms in a sum + pub bls12381_uncompressed_g1_sum_max_terms: Option, } macro_rules! native_charge_gas_early_exit_option { @@ -109,6 +125,7 @@ enum Groups { BLS12381G1 = 1, BLS12381G2 = 2, BLS12381GT = 3, + BLS12381UncompressedG1 = 4, } impl Groups { @@ -118,6 +135,7 @@ impl Groups { 1 => Some(Groups::BLS12381G1), 2 => Some(Groups::BLS12381G2), 3 => Some(Groups::BLS12381GT), + 4 => Some(Groups::BLS12381UncompressedG1), _ => None, } } @@ -751,3 +769,149 @@ pub fn internal_pairing( Err(_) => Ok(NativeResult::err(cost, INVALID_INPUT_ERROR)), } } + +/*************************************************************************************************** + * native fun internal_convert + * Implementation of the Move native function `internal_convert(from_type:u8, to_type: u8, e: &vector): vector` + * gas cost: group_ops_bls12381_g1_from_uncompressed_cost / group_ops_bls12381_g1_from_compressed_cost + **************************************************************************************************/ +pub fn internal_convert( + context: &mut NativeContext, + ty_args: Vec, + mut args: VecDeque, +) -> PartialVMResult { + debug_assert!(ty_args.is_empty()); + debug_assert!(args.len() == 3); + + let cost = context.gas_used(); + + if !(is_uncompressed_g1_supported(context)) { + return Ok(NativeResult::err(cost, NOT_SUPPORTED_ERROR)); + } + + let e_ref = pop_arg!(args, VectorRef); + let e = e_ref.as_bytes_ref(); + let to_type = pop_arg!(args, u8); + let from_type = pop_arg!(args, u8); + + let cost_params = &context + .extensions() + .get::() + .group_ops_cost_params + .clone(); + + let result = match (Groups::from_u8(from_type), Groups::from_u8(to_type)) { + (Some(Groups::BLS12381UncompressedG1), Some(Groups::BLS12381G1)) => { + native_charge_gas_early_exit_option!( + context, + cost_params.bls12381_uncompressed_g1_to_g1_cost + ); + e.to_vec() + .try_into() + .map_err(|_| FastCryptoError::InvalidInput) + .map(bls::G1ElementUncompressed::from_trusted_byte_array) + .and_then(|e| bls::G1Element::try_from(&e)) + .map(|e| e.to_byte_array().to_vec()) + } + (Some(Groups::BLS12381G1), Some(Groups::BLS12381UncompressedG1)) => { + native_charge_gas_early_exit_option!( + context, + cost_params.bls12381_g1_to_uncompressed_g1_cost + ); + parse_trusted::(&e) + .map(|e| bls::G1ElementUncompressed::from(&e)) + .map(|e| e.into_byte_array().to_vec()) + } + _ => Err(FastCryptoError::InvalidInput), + }; + + match result { + Ok(bytes) => Ok(NativeResult::ok(cost, smallvec![Value::vector_u8(bytes)])), + // Since all Element are validated on construction, this error should never happen unless the requested type is wrong. + Err(_) => Ok(NativeResult::err(cost, INVALID_INPUT_ERROR)), + } +} + +/*************************************************************************************************** + * native fun internal_sum + * Implementation of the Move native function `internal_sum(type:u8, terms: &vector>): vector` + * gas cost: group_ops_bls12381_g1_sum_of_uncompressed_base_cost + len(terms) * group_ops_bls12381_g1_sum_of_uncompressed_cost_per_term + **************************************************************************************************/ +pub fn internal_sum( + context: &mut NativeContext, + ty_args: Vec, + mut args: VecDeque, +) -> PartialVMResult { + debug_assert!(ty_args.is_empty()); + debug_assert!(args.len() == 2); + + let cost = context.gas_used(); + + if !(is_uncompressed_g1_supported(context)) { + return Ok(NativeResult::err(cost, NOT_SUPPORTED_ERROR)); + } + + let cost_params = &context + .extensions() + .get::() + .group_ops_cost_params + .clone(); + + // The input is a reference to a vector of vector's + let inputs = pop_arg!(args, VectorRef); + let group_type = pop_arg!(args, u8); + + let length = inputs + .len(&Type::Vector(Box::new(Type::U8)))? + .value_as::()?; + + let result = match Groups::from_u8(group_type) { + Some(Groups::BLS12381UncompressedG1) => { + let max_terms = cost_params + .bls12381_uncompressed_g1_sum_max_terms + .ok_or_else(|| { + PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) + .with_message("Max number of terms is not set".to_string()) + })?; + + if length > max_terms { + return Ok(NativeResult::err(cost, INPUT_TOO_LONG_ERROR)); + } + + native_charge_gas_early_exit_option!( + context, + cost_params + .bls12381_uncompressed_g1_sum_base_cost + .and_then(|base| cost_params + .bls12381_uncompressed_g1_sum_cost_per_term + .map(|per_term| base + per_term * length.into())) + ); + + // Read the input vector + (0..length) + .map(|i| { + inputs + .borrow_elem(i as usize, &Type::Vector(Box::new(Type::U8))) + .and_then(Value::value_as::) + .map_err(|_| FastCryptoError::InvalidInput) + .and_then(|v| { + v.as_bytes_ref() + .to_vec() + .try_into() + .map_err(|_| FastCryptoError::InvalidInput) + }) + .map(bls::G1ElementUncompressed::from_trusted_byte_array) + }) + .collect::>>() + .and_then(|e| bls::G1ElementUncompressed::sum(&e)) + .map(|e| bls::G1ElementUncompressed::from(&e)) + .map(|e| e.into_byte_array().to_vec()) + } + _ => Err(FastCryptoError::InvalidInput), + }; + + match result { + Ok(bytes) => Ok(NativeResult::ok(cost, smallvec![Value::vector_u8(bytes)])), + Err(_) => Ok(NativeResult::err(cost, INVALID_INPUT_ERROR)), + } +} diff --git a/sui-execution/latest/sui-move-natives/src/lib.rs b/sui-execution/latest/sui-move-natives/src/lib.rs index e2f50dfe1b86a..193d2f1d51935 100644 --- a/sui-execution/latest/sui-move-natives/src/lib.rs +++ b/sui-execution/latest/sui-move-natives/src/lib.rs @@ -627,6 +627,21 @@ impl NativesCostTable { bls12381_pairing_cost: protocol_config .group_ops_bls12381_pairing_cost_as_option() .map(Into::into), + bls12381_g1_to_uncompressed_g1_cost: protocol_config + .group_ops_bls12381_g1_to_uncompressed_g1_cost_as_option() + .map(Into::into), + bls12381_uncompressed_g1_to_g1_cost: protocol_config + .group_ops_bls12381_uncompressed_g1_to_g1_cost_as_option() + .map(Into::into), + bls12381_uncompressed_g1_sum_base_cost: protocol_config + .group_ops_bls12381_uncompressed_g1_sum_base_cost_as_option() + .map(Into::into), + bls12381_uncompressed_g1_sum_cost_per_term: protocol_config + .group_ops_bls12381_uncompressed_g1_sum_cost_per_term_as_option() + .map(Into::into), + bls12381_uncompressed_g1_sum_max_terms: protocol_config + .group_ops_bls12381_uncompressed_g1_sum_max_terms_as_option() + .map(Into::into), }, vdf_cost_params: VDFCostParams { vdf_verify_cost: protocol_config @@ -890,6 +905,16 @@ pub fn all_natives(silent: bool, protocol_config: &ProtocolConfig) -> NativeFunc "internal_pairing", make_native!(group_ops::internal_pairing), ), + ( + "group_ops", + "internal_convert", + make_native!(group_ops::internal_convert), + ), + ( + "group_ops", + "internal_sum", + make_native!(group_ops::internal_sum), + ), ("object", "delete_impl", make_native!(object::delete_impl)), ("object", "borrow_uid", make_native!(object::borrow_uid)), ( diff --git a/sui-execution/v0/sui-adapter/Cargo.toml b/sui-execution/v0/sui-adapter/Cargo.toml index 2fff322014716..aec84a77b2d6d 100644 --- a/sui-execution/v0/sui-adapter/Cargo.toml +++ b/sui-execution/v0/sui-adapter/Cargo.toml @@ -40,9 +40,9 @@ parking_lot.workspace = true move-package.workspace = true [features] -gas-profiler = [ - "sui-types/gas-profiler", - "move-vm-runtime/gas-profiler", - "move-vm-profiler/gas-profiler", - "move-vm-config/gas-profiler", +tracing = [ + "sui-types/tracing", + "move-vm-runtime/tracing", + "move-vm-profiler/tracing", + "move-vm-config/tracing", ] diff --git a/sui-execution/v0/sui-adapter/src/adapter.rs b/sui-execution/v0/sui-adapter/src/adapter.rs index d95c13b547eb3..1874dca8f53e9 100644 --- a/sui-execution/v0/sui-adapter/src/adapter.rs +++ b/sui-execution/v0/sui-adapter/src/adapter.rs @@ -5,7 +5,7 @@ pub use checked::*; #[sui_macros::with_checked_arithmetic] mod checked { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] use move_vm_config::runtime::VMProfilerConfig; use std::path::PathBuf; use std::{collections::BTreeMap, sync::Arc}; @@ -45,9 +45,9 @@ mod checked { protocol_config: &ProtocolConfig, _enable_profiler: Option, ) -> Result { - #[cfg(not(feature = "gas-profiler"))] + #[cfg(not(feature = "tracing"))] let vm_profiler_config = None; - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] let vm_profiler_config = _enable_profiler.clone().map(|path| VMProfilerConfig { full_path: path, track_bytecode_instructions: false, diff --git a/sui-execution/v0/sui-adapter/src/programmable_transactions/context.rs b/sui-execution/v0/sui-adapter/src/programmable_transactions/context.rs index 1e7f02c9babb2..92667370b5fea 100644 --- a/sui-execution/v0/sui-adapter/src/programmable_transactions/context.rs +++ b/sui-execution/v0/sui-adapter/src/programmable_transactions/context.rs @@ -199,7 +199,7 @@ mod checked { // Set the profiler if in CLI #[skip_checked_arithmetic] - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; use move_vm_types::gas::GasMeter; diff --git a/sui-execution/v1/sui-adapter/Cargo.toml b/sui-execution/v1/sui-adapter/Cargo.toml index 44a1409c61e5e..1ccf3b753f31f 100644 --- a/sui-execution/v1/sui-adapter/Cargo.toml +++ b/sui-execution/v1/sui-adapter/Cargo.toml @@ -39,9 +39,9 @@ parking_lot.workspace = true move-package.workspace = true [features] -gas-profiler = [ - "sui-types/gas-profiler", - "move-vm-runtime/gas-profiler", - "move-vm-profiler/gas-profiler", - "move-vm-config/gas-profiler", +tracing = [ + "sui-types/tracing", + "move-vm-runtime/tracing", + "move-vm-profiler/tracing", + "move-vm-config/tracing", ] diff --git a/sui-execution/v1/sui-adapter/src/adapter.rs b/sui-execution/v1/sui-adapter/src/adapter.rs index 8b72d30244853..68251237d683d 100644 --- a/sui-execution/v1/sui-adapter/src/adapter.rs +++ b/sui-execution/v1/sui-adapter/src/adapter.rs @@ -5,7 +5,7 @@ pub use checked::*; #[sui_macros::with_checked_arithmetic] mod checked { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] use move_vm_config::runtime::VMProfilerConfig; use std::path::PathBuf; use std::{collections::BTreeMap, sync::Arc}; @@ -45,9 +45,9 @@ mod checked { protocol_config: &ProtocolConfig, _enable_profiler: Option, ) -> Result { - #[cfg(not(feature = "gas-profiler"))] + #[cfg(not(feature = "tracing"))] let vm_profiler_config = None; - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] let vm_profiler_config = _enable_profiler.clone().map(|path| VMProfilerConfig { full_path: path, track_bytecode_instructions: false, diff --git a/sui-execution/v1/sui-adapter/src/programmable_transactions/context.rs b/sui-execution/v1/sui-adapter/src/programmable_transactions/context.rs index 434d7a68d7fbd..d95bf2ba907db 100644 --- a/sui-execution/v1/sui-adapter/src/programmable_transactions/context.rs +++ b/sui-execution/v1/sui-adapter/src/programmable_transactions/context.rs @@ -190,7 +190,7 @@ mod checked { // Set the profiler if feature is enabled #[skip_checked_arithmetic] - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; use move_vm_types::gas::GasMeter; diff --git a/sui-execution/v2/sui-adapter/Cargo.toml b/sui-execution/v2/sui-adapter/Cargo.toml index 3e3c2eb9f79b0..2b1288a29ab72 100644 --- a/sui-execution/v2/sui-adapter/Cargo.toml +++ b/sui-execution/v2/sui-adapter/Cargo.toml @@ -39,9 +39,9 @@ parking_lot.workspace = true move-package.workspace = true [features] -gas-profiler = [ - "sui-types/gas-profiler", - "move-vm-runtime/gas-profiler", - "move-vm-profiler/gas-profiler", - "move-vm-config/gas-profiler", +tracing = [ + "sui-types/tracing", + "move-vm-runtime/tracing", + "move-vm-profiler/tracing", + "move-vm-config/tracing", ] diff --git a/sui-execution/v2/sui-adapter/src/adapter.rs b/sui-execution/v2/sui-adapter/src/adapter.rs index 687494b9d57af..5c89316384f6a 100644 --- a/sui-execution/v2/sui-adapter/src/adapter.rs +++ b/sui-execution/v2/sui-adapter/src/adapter.rs @@ -4,7 +4,7 @@ pub use checked::*; #[sui_macros::with_checked_arithmetic] mod checked { - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] use move_vm_config::runtime::VMProfilerConfig; use std::path::PathBuf; use std::{collections::BTreeMap, sync::Arc}; @@ -44,9 +44,9 @@ mod checked { protocol_config: &ProtocolConfig, _enable_profiler: Option, ) -> Result { - #[cfg(not(feature = "gas-profiler"))] + #[cfg(not(feature = "tracing"))] let vm_profiler_config = None; - #[cfg(feature = "gas-profiler")] + #[cfg(feature = "tracing")] let vm_profiler_config = _enable_profiler.clone().map(|path| VMProfilerConfig { full_path: path, track_bytecode_instructions: false, diff --git a/sui-execution/v2/sui-adapter/src/programmable_transactions/context.rs b/sui-execution/v2/sui-adapter/src/programmable_transactions/context.rs index c749d0eb8b078..7d92727099435 100644 --- a/sui-execution/v2/sui-adapter/src/programmable_transactions/context.rs +++ b/sui-execution/v2/sui-adapter/src/programmable_transactions/context.rs @@ -194,7 +194,7 @@ mod checked { // Set the profiler if in CLI #[skip_checked_arithmetic] - move_vm_profiler::gas_profiler_feature_enabled! { + move_vm_profiler::tracing_feature_enabled! { use move_vm_profiler::GasProfiler; use move_vm_types::gas::GasMeter; diff --git a/turbo.json b/turbo.json index 93d745bb34ec5..4c9570f0756ce 100644 --- a/turbo.json +++ b/turbo.json @@ -4,7 +4,8 @@ "lint": {}, "test": { "dependsOn": ["^build", "build"], - "outputs": ["coverage/**"] + "outputs": ["coverage/**"], + "env": ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION", "AWS_KMS_KEY_ID"] }, "dev": { "dependsOn": ["^build"],